prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
import scipy.signal as sp
from .common import *
from . import adaptivestft
from . import cheaptrick
from . import hnm_qfft
from . import hnm_qhm
def synthSinusoid(hFreq, hAmp, hPhase, r, sr):
# constant
nOut = len(r)
nHar = len(hFreq)
nyq = sr / 2
# check input
assert(nHar == len(hAmp))
assert(nHar == len(hPhase))
assert(hFreq.ndim == 1)
assert(hAmp.ndim == 1)
assert(hPhase.ndim == 1)
assert(sr > 0.0)
# compute
out = np.zeros(nOut)
for iHar in range(nHar):
freq = hFreq[iHar]
amp = hAmp[iHar]
phase = hPhase[iHar] if(not hPhase is None) else 0.0
if(freq <= 0.0 or freq >= nyq):
break
if(amp <= 0.0):
continue
out[:] += np.cos(2.0 * np.pi / sr * freq * r + phase) * amp
return out
def filterNoise(x, noiseEnvList, hopSize):
olaFac = 2
windowFac = 4
nHop, noiseEnvBin = noiseEnvList.shape
windowSize = hopSize * windowFac
nBin = windowSize // 2 + 1
nX = len(x)
assert(getNFrame(nX, hopSize) == nHop)
assert(hopSize % olaFac == 0)
window, windowMean = np.hanning(windowSize), 0.5
analyzeNormFac = 0.5 * windowMean * windowSize
synthNormScale = windowFac // 2 * olaFac
window = np.sqrt(window)
buff = np.zeros(nBin, dtype = np.complex128)
out = np.zeros(nX)
for iFrame in range(nHop * olaFac):
iHop = iFrame // olaFac
iCenter = iFrame * hopSize // olaFac
frame = getFrame(x, iCenter, windowSize)
if(np.max(frame) == np.min(frame)):
continue
ffted = np.fft.rfft(frame * window)
phase = np.angle(ffted)
env = ipl.interp1d(np.linspace(0, nBin, noiseEnvBin), noiseEnvList[iHop], kind = "linear")(np.arange(nBin))
magn = np.exp(env) * analyzeNormFac
buff.real = magn * np.cos(phase)
buff.imag = magn * np.sin(phase)
synthed = np.fft.irfft(buff) * window
ob, oe, ib, ie = getFrameRange(nX, iCenter, windowSize)
out[ib:ie] += synthed[ob:oe] / synthNormScale
return out
class Analyzer:
supoortedHarmonicAnalysisMethod = {
"qfft": hnm_qfft.Processor,
"qhmair": hnm_qhm.Processor,
}
def __init__(self, sr, **kwargs):
self.samprate = float(sr)
self.hopSize = kwargs.get("hopSize", roundUpToPowerOf2(self.samprate * 0.0025))
self.fftSize = kwargs.get("fftSize", roundUpToPowerOf2(self.samprate * 0.05))
self.mvf = kwargs.get("mvf", min(sr / 2 - 1e3, 20e3))
self.harmonicAnalysisMethod = kwargs.get("harmonicAnalysisMethod", 'qfft')
self.harmonicAnalysisParameter = kwargs.get("harmonicAnalysisParameter", {})
self.noiseEnergyThreshold = kwargs.get("noiseEnergyThreshold", 1e-8)
assert(self.mvf <= self.samprate / 2)
def __call__(self, x, f0List):
# constant
nX = len(x)
nBin = self.fftSize // 2 + 1
nHop = getNFrame(nX, self.hopSize)
minF0 = np.min(f0List[f0List > 0.0])
maxHar = max(0, int(self.mvf / minF0))
# check input
assert(nHop == len(f0List))
assert(f0List.ndim == 1)
# dc adjustment
x = simpleDCRemove(x)
# (quasi)harmonic analysis
harProc = self.supoortedHarmonicAnalysisMethod[self.harmonicAnalysisMethod](self.mvf, self.samprate, **self.harmonicAnalysisParameter, hopSize = self.hopSize)
f0List, hFreqList, hAmpList, hPhaseList = harProc(x, f0List, maxHar)
# resynth & ola & record sinusoid energy
sinusoid = np.zeros(nX, dtype = np.float64)
sinusoidEnergyList = np.zeros(nHop, dtype = np.float64)
olaWindow = np.hanning(2 * self.hopSize)
energyAnalysisWindowNormFac = 1.0 / np.sqrt(0.375)
for iHop, f0 in enumerate(f0List):
if(f0 <= 0.0):
continue
energyAnalysisRadius = int(round(self.samprate / f0)) * 2
synthLeft = max(energyAnalysisRadius, self.hopSize)
synthRight = max(energyAnalysisRadius + 1, self.hopSize)
synthRange = np.arange(-synthLeft, synthRight)
ob, oe, ib, ie = getFrameRange(nX, iHop * self.hopSize, 2 * self.hopSize)
synthed = synthSinusoid(hFreqList[iHop], hAmpList[iHop], hPhaseList[iHop], synthRange, self.samprate)
# integrate energy
energyBegin = synthLeft - energyAnalysisRadius
energyAnalysisWindow = np.hanning(energyAnalysisRadius * 2 + 1)
sinusoidEnergyList[iHop] = np.mean((synthed[energyBegin:energyBegin + energyAnalysisRadius * 2 + 1] * energyAnalysisWindow * energyAnalysisWindowNormFac) ** 2)
# ola
olaBegin = synthLeft - self.hopSize
sinusoid[ib:ie] += synthed[olaBegin + ob:olaBegin + oe] * olaWindow[ob:oe]
noise = x - sinusoid # extract noise
# build noise envelope
envProc = cheaptrick.Processor(self.samprate, hopSize = self.hopSize, fftSize = self.fftSize)
noiseEnvList = envProc(noise, f0List)
# record noise energy
energyAnalysisWindowNormFac = 1.0 / np.sqrt(0.375)
noiseEnergyList = np.zeros(nHop, dtype = np.float64)
for iHop, f0 in enumerate(f0List):
if(f0 > 0.0):
frame = getFrame(noise, iHop * self.hopSize, 4 * int(round(self.samprate / f0)) + 1)
else:
frame = getFrame(noise, iHop * self.hopSize, 2 * self.hopSize)
energyAnalysisWindow = np.hanning(len(frame))
noiseEnergyList[iHop] = np.mean((frame * energyAnalysisWindow * energyAnalysisWindowNormFac) ** 2)
noiseEnergyList[noiseEnergyList < self.noiseEnergyThreshold] = 0.0
# relative phase shift
need = f0List > 0.0
f0Need = f0List[need]
nNeed = len(f0Need)
f0Need = f0Need.reshape(nNeed, 1)
base = hPhaseList[need].T[0].reshape(nNeed, 1)
hPhaseList[need] = wrap(hPhaseList[need] - (hFreqList[need] / f0Need) * base)
# relative harmonic shift
voiced = f0List > 0.0
nVoiced = np.sum(voiced)
hFreqList[voiced] /= f0List[voiced].reshape(nVoiced, 1) * np.arange(1, maxHar + 1)
hFreqList[hFreqList <= 0.0] = 1.0
# debug
saveWav("sin.wav", sinusoid, self.samprate)
saveWav("noise.wav", noise, self.samprate)
return f0List, hFreqList, hAmpList, hPhaseList, sinusoidEnergyList, noiseEnvList, noiseEnergyList
class Synther:
def __init__(self, sr, **kwargs):
self.samprate = float(sr)
self.hopSize = kwargs.get("hopSize", roundUpToPowerOf2(self.samprate * 0.0025))
self.fftSize = kwargs.get("fftSize", roundUpToPowerOf2(self.samprate * 0.05))
self.olaFac = kwargs.get("olaFac", 2)
self.mvf = kwargs.get("mvf", min(sr / 2 - 1e3, 20e3))
self.maxNoiseEnvHarmonic = kwargs.get("maxNoiseEnvHarmonic", 4)
self.maxNoiseEnvDCAdjustment = kwargs.get("maxNoiseEnvDCAdjustment", 10.0)
assert(self.mvf <= self.samprate / 2)
assert(self.hopSize % self.olaFac == 0)
def __call__(self, f0List, hFreqList, hAmpList, hPhaseList, sinusoidEnergyList, noiseEnvList, noiseEnergyList, enableSinusoid = True, enableNoise = True):
# constant
nHop = len(f0List)
nOut = nHop * self.hopSize
nHar = hFreqList.shape[1]
nBin = self.fftSize // 2 + 1
energyAnalysisWindowNormFac = 1.0 / np.sqrt(0.375)
hFreqList = hFreqList.copy()
# check input
assert(f0List.ndim == 1)
assert(hFreqList.ndim == 2)
assert(hAmpList.ndim == 2)
assert(hPhaseList.ndim == 2)
assert(sinusoidEnergyList.ndim == 1)
assert(len(hFreqList) == nHop)
assert(len(hAmpList) == nHop)
assert(len(hPhaseList) == nHop)
assert(len(sinusoidEnergyList) == nHop)
assert(noiseEnvList.ndim == 2)
assert(noiseEnergyList.ndim == 1)
assert(len(noiseEnvList) == nHop)
assert(len(noiseEnergyList) == nHop)
assert(noiseEnvList.shape[1] == nBin)
# relative harmonic shift
voiced = f0List > 0.0
nVoiced = np.sum(voiced)
hFreqList[voiced] *= f0List[voiced].reshape(nVoiced, 1) * np.arange(1, nHar + 1)
hFreqList[np.logical_or(hFreqList <= 0.0, hFreqList > self.mvf)] = 0.0
# relative phase shift & olaFac
syncedHPhaseList = np.zeros((nHop * self.olaFac, nHar))
basePhase = 0.0
syncedHPhaseList[0] = hPhaseList[0]
for iFrame in range(1, nHop * self.olaFac):
iHop = iFrame // self.olaFac
f0 = f0List[iHop]
if(f0 <= 0.0):
continue
basePhase += f0 * 2 * np.pi * (self.hopSize / self.olaFac / self.samprate)
syncedHPhaseList[iFrame] = wrap(hPhaseList[iHop] + (hFreqList[iHop] / f0) * wrap(basePhase))
if(enableSinusoid):
sinusoid = np.zeros(nOut)
synthWindow = np.hanning(2 * self.hopSize)
for iFrame in range(nHop * self.olaFac):
iHop = iFrame // self.olaFac
f0 = f0List[iHop]
if(f0 <= 0.0 or sinusoidEnergyList[iHop] <= 0.0):
continue
energyAnalysisRadius = int(round(self.samprate / f0)) * 2
synthLeft = max(energyAnalysisRadius, self.hopSize)
synthRight = max(energyAnalysisRadius + 1, self.hopSize)
synthRange = np.arange(-synthLeft, synthRight)
energyAnalysisWindow =
|
np.hanning(energyAnalysisRadius * 2 + 1)
|
numpy.hanning
|
#!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 ai :
"""This script compares sources common to two user-specified sourcelists and displays various measures of their
differences. The differences can be calculated using one of three user-selectable methods:
* Absolute: Just a simple difference e.g. Comparison - Reference.
* Overall mean percent difference: Percent difference based on the **overall mean** reference value e.g. ((Comparison-Reference)/mean_overall(Reference)) x 100.
* Dynamic percent difference: Percent difference values are calculated discretely for each pair of comparison/reference values e.g. ((Comparison[n]-Reference[n])/Reference[n]) x 100.
3x3-sigma clipped mean, median and standard deviation, and non-sigma clipped min and max values are
computed for the following:
* Image X position (in pixels)
* Image Y position (in pixels)
* Right Ascension (in arcseconds)
* Declination (in arcseconds)
* Flux (Outer Aperture)
* Magnitude (Inner Aperture)
* Magnitude (Outer Aperture)
* Magnitude Error (Inner Aperture)
* Magnitude Error (Outer Aperture)
* MSKY
* STDDEV
* CI (Concentration Index)
Absolute bit-wise comparisons are also performed for the following item:
* Flag Value
.. note::
Not all sourcelist types compatible with this comparison script contain all of the columns listed above.
Statistics (and optionally plots) can only be generated for columns common to both user-specified sourcelists.
Thus, it is to be expected that not all runs will yield comparisons for all columns listed above.
Regression Testing
------------------
**All** of the following criteria must be met for the test to be declared "successful":
* All non-flag (linear) column comparisons: Less than 5% of all comparison - reference difference values are greater than 3 sigma from the sigma-clipped mean
* X position:
* Y position:
* Right Ascension:
* Declination:
* Flux (Inner Aperture):
* Flux (Outer Aperture):
* Magnitude (Inner Aperture):
* Magnitude (Outer Aperture):
* Magnitude error (Inner Aperture):
* Magnitude error (Outer Aperture):
* Flag Value: Less than 5% of all matched sources have differing flag values.
.. note::
Sigma-clipped values for mean, sigma, and median are computed using the astropy.stats.sigma_clipped_stats() routine with three rounds of three-sigma clipping.
Plots
-----
Plots will be generated if the optional plot input is set to 'screen' or 'file'. If set to 'screen', the plots will be
simply displayed on-screen, and not written to disk. If set to 'file', a plot and statistical summary (on a second
page) is generated for each valid comparison. All plots and summaries saved to a single multiple-page pdf file. The
default name of the combined plot pdf file is 'combined_plot.pdf'. If a user-specified filename prefix string was
specified, it will be prepended to the default name.
The code generates up to three different types of plots:
* Difference histogram plots: These are the most commonly generated plot products are are produced for all column
comparisons except for the bit-wise comparison.
* Bit value barcharts
* X-Y absolute difference vector plot
Path
----
drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py
Dependencies
------------
- drizzlepac/drizzlepac/devutils/comparison_tools/starmatch_hist.py
- The `PyPDF2 <https://pypi.org/project/PyPDF2/>`_ python library
Inputs
------
* Required input
1. *sourcelistNames*
* A space-separated pair of sourcelists to compare. The first sourcelist is assumed to be the reference sourcelist that the second is being compared to.
* Optional inputs:
#. -d *debugMode*
* Perform additional match quality diagnostics
* Input choices: "True" or "False"
* Default value: False
#. -i *imageNames*
* A space-separated list of the fits images that were used to generate the input sourcelists. The first image corresponds to the first listed sourcelist, and so in. These will be used to improve the sourcelist alignment and matching.
#. -m *diffMode*
* How should the comp-ref difference be calculated? "absolute" is simply the straight comp-ref difference. "pmean" is the mean percent difference ((C-R)/avg(R)) x 100. "pdynamic" is the dynamic percent difference ((C-R)/R) x 100
* Input choices: "absolute", "pmean" or "pdynamic"
* Default value: "pmean"
#. -p *plotGen*
* Generate plots?
* Input choices: "True" or "False"
* Default value: False
#. -s *plotfile_prefix_string*
* Text string that will prepend the plot files generated if plots are written to files ***REQUIRES the -p option set to 'file'***
* Default value: blank text string ('')
#. -v *verbose*
* Display verbose output?
* Input choices: "True" or "False"
* Default value: True
Classes and Functions
---------------------
"""
import argparse
import collections
from datetime import datetime
import os
import pdb
import random
import sys
from astropy.io import fits
from astropy.stats import sigma_clipped_stats
from astropy.coordinates import SkyCoord
from astropy.table import Table
import matplotlib.pyplot as plt
import numpy as np
from PyPDF2 import PdfFileMerger
from drizzlepac.haputils import diagnostic_utils
from drizzlepac.devutils.comparison_tools import starmatch_hist
from stsci.tools import logutil
from stwcs import wcsutil
__taskname__ = 'compare_sourcelists'
MSG_DATEFMT = '%Y%j%H%M%S'
SPLUNK_MSG_FORMAT = '%(asctime)s %(levelname)s src=%(name)s- %(message)s'
log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout, format=SPLUNK_MSG_FORMAT,
datefmt=MSG_DATEFMT)
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def check_match_quality(matched_x_list, matched_y_list):
"""Creates region file to check quality of source matching.
Parameters
----------
matched_x_list : list
list of ref and comp x coords for matched sources
matched_y_list : list
list of ref and comp y coords for matched sources
Returns
-------
Nothing.
"""
out_filename = "match_check.reg"
num_display = 5000 # Number of pairs to plot
list_length = len(matched_x_list[0])
if num_display > list_length: # if the list of matched sources is smaller than num_display, just use all matched pairs, rather than a randomly selected subset.
index_list = np.arange(list_length)
else:
index_list = random.sample(range(1, list_length), num_display)
with open(out_filename, "w") as fout:
for index_no in index_list:
fout.write("circle({},{},10) # color=green\n".format(matched_x_list[0][index_no], matched_y_list[0][
index_no])) # write ref source circle
fout.write("circle({},{},10) # color=red\n".format(matched_x_list[1][index_no],
matched_y_list[1][index_no])) # write comp source circle
fout.write(
"line({},{},{},{}) # color=blue\n".format(matched_x_list[0][index_no], matched_y_list[0][index_no],
matched_x_list[1][index_no],
matched_y_list[1][index_no])) # write line connecting the two
log.info("Wrote region file {}".format(out_filename))
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def computeFlagStats(matchedRA, max_diff, plotGen, plot_title, plotfile_prefix, catalog_names, verbose):
"""Compute and report statistics on the differences in flagging.
Parameters
----------
matchedRA : numpy.ndarray
A 2 x len(refLines) sized numpy array. Column 1: matched reference values.
Column 2: The corresponding matched comparison values
max_diff : float
Maximum allowable percentage of all matched sources with differences in their flag values for comparison to be
declared a success
plotGen : bool
Generate plots and display them to the screen (True/False)?
plot_title : str
text string that will be used in plot title.
plotfile_prefix : str
text string that will prepend the plot files generated if plots are written to files
catalog_names : list
list of the sourcelist filenames used as the comparison and the reference
verbose : bool
display verbose output?
Returns
-------
regTestStatus : str
overall test result and statistics
"""
pdf_file_list = []
log.info(">>>>>> Comparison - reference sourcelist {} differences <<<<<<".format(plot_title))
# compute overall percentage of matched sources with flagging differences
flag_diff_list = list(matchedRA[0] - matchedRA[1])
n_total = len(matchedRA[0])
n_unchanged = flag_diff_list.count(0)
n_changed = n_total - n_unchanged
pct_changed = (float(n_changed) / float(n_total)) * 100.0
# set up arrays to count stuff up
bit_list = [0, 1, 2, 4, 8, 16, 32, 64, 128]
refFlagBreakdown = np.zeros(9, dtype=int)
compFlagBreakdown = np.zeros(9, dtype=int)
unchangedFlagBreakdown = np.zeros(9, dtype=int)
on_off_FlagFlips = np.zeros(9, dtype=int)
off_on_FlagFlips = np.zeros(9, dtype=int)
for refFlag, compFlag in zip(matchedRA[0], matchedRA[1]):
# break down each flag value into component bit values, add values to totals
refFlagRA = deconstruct_flag(refFlag)
compFlagRA = deconstruct_flag(compFlag)
refFlagBreakdown += refFlagRA
compFlagBreakdown += compFlagRA
# find differences in flagging, total up which bits were turned on, which were turned off.
diffFlagRA = compFlagRA - refFlagRA
if not np.array_equal(refFlagRA, compFlagRA):
off_on_FlagFlips[np.where(diffFlagRA == 1)] += 1 # bits that are off in ref but on in comp
on_off_FlagFlips[np.where(diffFlagRA == -1)] += 1 # bits that are on in ref but off in comp
unchangedFlagBreakdown[np.where((refFlagRA == 1) & (
compFlagRA == 1))] += 1 # takes care of the case were comp and ref have differing bits, but also have additional bits that are unchanged.
if
|
np.array_equal(refFlagRA, compFlagRA)
|
numpy.array_equal
|
# coding: utf8
import numpy as np
import pybullet as pyb
class FootstepPlanner:
"""A footstep planner that handles the choice of future
footsteps location depending on the current and reference
velocities of the quadruped.
Args:
dt (float): Duration of one time step of the MPC
n_periods (int): Number of gait periods in one gait cycle
"""
def __init__(self, dt, n_periods):
# Feedback gain for the feedback term of the planner
self.k_feedback = 0.03
# Position of shoulders in local frame
self.shoulders = np.array(
[[0.19, 0.19, -0.19, -0.19], [0.15005, -0.15005, 0.15005, -0.15005]])
# Time step of the contact sequence
self.dt = dt
# Value of the gravity acceleartion
self.g = 9.81
# Value of the maximum allowed deviation due to leg length
self.L = 0.12
# The desired (x,y) position of footsteps
# If a foot is in swing phase it is where it should land
# If a foot is in stance phase is is where it should land at the end of its next swing phase
self.footsteps = self.shoulders.copy()
# Previous variable but in world frame for visualisation purpose
self.footsteps_world = self.footsteps.copy()
# To store the result of the get_prediction function
self.footsteps_prediction = np.zeros((3, 4))
# To store the result of the update_footsteps_tsid function
self.footsteps_tsid = np.zeros((3, 4))
self.t_remaining_tsid = np.zeros((1, 4))
# Gait duration
self.n_periods = n_periods
self.T_gait = 0.32
# Number of time steps in the prediction horizon
self.n_steps = np.int(n_periods*self.T_gait/self.dt)
# Reference trajectory matrix of size 12 by (1 + N) with the current state of
# the robot in column 0 and the N steps of the prediction horizon in the others
self.xref = np.zeros((12, 1 + self.n_steps))
# Gait matrix
self.gait = np.zeros((20, 5))
self.fsteps = np.full((self.gait.shape[0], 13), np.nan)
self.gait_invdyn = self.gait.copy()
self.fsteps_invdyn = self.fsteps.copy()
self.flag_rotation_command = int(0)
self.h_rotation_command = 0.20
# Create gait matrix
self.create_walking_trot()
# self.create_bounding()
# self.create_side_walking()
# self.create_static()
def getRefStates(self, k, T_gait, lC, abg, lV, lW, v_ref, h_ref=0.2027682):
"""Compute the reference trajectory of the CoM for each time step of the
predition horizon. The ouput is a matrix of size 12 by (N+1) with N the number
of time steps in the gait cycle (T_gait/dt) and 12 the position, orientation,
linear velocity and angular velocity vertically stacked. The first column contains
the current state while the remaining N columns contains the desired future states.
Args:
k (int): the number of MPC iterations since the start of the simulation
T_gait (float): duration of one period of gait
lC (3x0 array): position of the center of mass in local frame
abg (3x0 array): orientation of the trunk in local frame
lV (3x0 array): linear velocity of the CoM in local frame
lW (3x0 array): angular velocity of the trunk in local frame
v_ref (6x1 array): desired velocity vector of the flying base in local frame (linear and angular stacked)
h_ref (float): reference height for the trunk
"""
# Update x and y velocities taking into account the rotation of the base over the prediction horizon
yaw = np.linspace(0, T_gait-self.dt, self.n_steps) * v_ref[5, 0]
self.xref[6, 1:] = v_ref[0, 0] * np.cos(yaw) - v_ref[1, 0] * np.sin(yaw)
self.xref[7, 1:] = v_ref[0, 0] * np.sin(yaw) + v_ref[1, 0] * np.cos(yaw)
# Update x and y depending on x and y velocities (cumulative sum)
self.xref[0, 1:] = self.dt * np.cumsum(self.xref[6, 1:])
self.xref[1, 1:] = self.dt * np.cumsum(self.xref[7, 1:])
# Start from position of the CoM in local frame
self.xref[0, 1:] += lC[0, 0]
self.xref[1, 1:] += lC[1, 0]
# Desired height is supposed constant so we only need to set it once
if k == 0:
self.xref[2, 1:] = h_ref
# No need to update Z velocity as the reference is always 0
# No need to update roll and roll velocity as the reference is always 0 for those
# No need to update pitch and pitch velocity as the reference is always 0 for those
# Update yaw and yaw velocity
dt_vector = np.linspace(self.dt, T_gait, self.n_steps)
self.xref[5, 1:] = v_ref[5, 0] * dt_vector
self.xref[11, 1:] = v_ref[5, 0]
# Update the current state
self.xref[0:3, 0:1] = lC
self.xref[3:6, 0:1] = abg
self.xref[6:9, 0:1] = lV
self.xref[9:12, 0:1] = lW
# Time steps [0, dt, 2*dt, ...]
to = np.linspace(0, T_gait-self.dt, self.n_steps)
# Threshold for gamepad command (since even if you do not touch the joystick it's not 0.0)
step = 0.05
# Detect if command is above threshold
if (np.abs(v_ref[2, 0]) > step) and (self.flag_rotation_command != 1):
self.flag_rotation_command = 1
# State machine
if (np.abs(v_ref[2, 0]) > step) and (self.flag_rotation_command == 1): # Command with joystick
self.h_rotation_command += v_ref[2, 0] * self.dt
self.xref[2, 1:] = self.h_rotation_command
self.xref[8, 1:] = v_ref[2, 0]
self.flag_rotation_command = 1
elif (np.abs(v_ref[2, 0]) < step) and (self.flag_rotation_command == 1): # No command with joystick
self.xref[8, 1:] = 0.0
self.xref[9, 1:] = 0.0
self.xref[10, 1:] = 0.0
self.flag_rotation_command = 2
elif self.flag_rotation_command == 0: # Starting state of state machine
self.xref[2, 1:] = h_ref
self.xref[8, 1:] = 0.0
if self.flag_rotation_command != 0:
# Applying command to pitch and roll components
self.xref[3, 1:] = self.xref[3, 0].copy() + v_ref[3, 0].copy() * to
self.xref[4, 1:] = self.xref[4, 0].copy() + v_ref[4, 0].copy() * to
self.xref[9, 1:] = v_ref[3, 0].copy()
self.xref[10, 1:] = v_ref[4, 0].copy()
# Current state vector of the robot
self.x0 = self.xref[:, 0:1]
return 0
def update_viewer(self, viewer, initialisation):
"""Update display for visualization purpose
Create sphere objects during the first iteration of the main loop then only
update their location
Args:
viewer (gepetto-viewer): A gepetto viewer object
initialisation (bool): true if it is the first iteration of the main loop
"""
# Display non-locked target footholds with green spheres (gepetto gui)
rgbt = [0.0, 1.0, 0.0, 0.5]
for i in range(4):
if initialisation:
viewer.gui.addSphere("world/sphere"+str(i)+"_nolock", .02, rgbt) # .1 is the radius
viewer.gui.applyConfiguration(
"world/sphere"+str(i)+"_nolock", (self.footsteps_world[0, i],
self.footsteps_world[1, i], 0.0, 1., 0., 0., 0.))
return 0
def create_static(self):
"""Create the matrices used to handle the gait and initialize them to keep the 4 feet in contact
self.gait and self.fsteps matrices contains information about the gait
"""
# Number of timesteps in a half period of gait
N = np.int(0.5 * self.T_gait/self.dt)
# Starting status of the gait
# 4-stance phase, 2-stance phase, 4-stance phase, 2-stance phase
self.gait = np.zeros((6, 5))
self.gait[0:4, 0] = np.array([2*N, 0, 0, 0])
self.fsteps[0:4, 0] = self.gait[0:4, 0]
# Set stance and swing phases
# Coefficient (i, j) is equal to 0.0 if the j-th feet is in swing phase during the i-th phase
# Coefficient (i, j) is equal to 1.0 if the j-th feet is in stance phase during the i-th phase
self.gait[0, 1:] = np.ones((4,))
return 0
def create_walking_trot(self):
"""Create the matrices used to handle the gait and initialize them to perform a walking trot
self.gait and self.fsteps matrices contains information about the walking trot
"""
# Number of timesteps in a half period of gait
N = np.int(0.5 * self.T_gait/self.dt)
# Starting status of the gait
# 4-stance phase, 2-stance phase, 4-stance phase, 2-stance phase
self.gait = np.zeros((self.fsteps.shape[0], 5))
for i in range(self.n_periods):
self.gait[(4*i):(4*(i+1)), 0] = np.array([1, N-1, 1, N-1])
self.fsteps[(4*i):(4*(i+1)), 0] = self.gait[(4*i):(4*(i+1)), 0]
# Set stance and swing phases
# Coefficient (i, j) is equal to 0.0 if the j-th feet is in swing phase during the i-th phase
# Coefficient (i, j) is equal to 1.0 if the j-th feet is in stance phase during the i-th phase
self.gait[4*i+0, 1:] = np.ones((4,))
self.gait[4*i+1, [1, 4]] = np.ones((2,))
self.gait[4*i+2, 1:] = np.ones((4,))
self.gait[4*i+3, [2, 3]] = np.ones((2,))
return 0
def create_bounding(self):
"""Create the matrices used to handle the gait and initialize them to perform a bounding gait
self.gait and self.fsteps matrices contains information about the gait
"""
# Number of timesteps in a half period of gait
N = np.int(0.5 * self.T_gait/self.dt)
# Starting status of the gait
# 4-stance phase, 2-stance phase, 4-stance phase, 2-stance phase
self.gait = np.zeros((6, 5))
self.gait[0:4, 0] = np.array([1, N-1, 1, N-1])
self.fsteps[0:4, 0] = self.gait[0:4, 0]
# Set stance and swing phases
# Coefficient (i, j) is equal to 0.0 if the j-th feet is in swing phase during the i-th phase
# Coefficient (i, j) is equal to 1.0 if the j-th feet is in stance phase during the i-th phase
self.gait[0, 1:] = np.ones((4,))
self.gait[1, [1, 2]] = np.ones((2,))
self.gait[2, 1:] = np.ones((4,))
self.gait[3, [3, 4]] = np.ones((2,))
return 0
def create_side_walking(self):
"""Create the matrices used to handle the gait and initialize them to perform a walking gait
with feet on the same side in contact
self.gait and self.fsteps matrices contains information about the gait
"""
# Number of timesteps in a half period of gait
N = np.int(0.5 * self.T_gait/self.dt)
# Starting status of the gait
# 4-stance phase, 2-stance phase, 4-stance phase, 2-stance phase
self.gait = np.zeros((6, 5))
self.gait[0:4, 0] =
|
np.array([1, N-1, 1, N-1])
|
numpy.array
|
import argparse
import pickle
import time
from concurrent.futures import ProcessPoolExecutor
from functools import wraps
import numpy as np
import torch
from sklearn.decomposition import PCA
from sklearn.utils import shuffle
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
import cy_heuristics as heu
from linearsolver import LinearSolver
from linearsolver import sample_gumbel, get_rank
from sched_solver import Solver
test_module = heu.test_RTA_LC
parser = argparse.ArgumentParser()
parser.add_argument("--num_tasks", type=int, default=32)
parser.add_argument("--num_procs", type=int, default=4)
parser.add_argument("--num_test_dataset", type=int, default=2)
parser.add_argument("--embedding_size", type=int, default=128)
parser.add_argument("--hidden_size", type=int, default=128)
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--use_deadline", action="store_true")
parser.add_argument("--range_l", type=str, default="3.10")
parser.add_argument("--range_r", type=str, default="3.10")
parser.add_argument("--use_cuda", action="store_true")
confidence = 0.05
args = parser.parse_args()
use_deadline = args.use_deadline
SAVING_FILE_NAME = "p%d-t%d-d%d-l[%s, %s]" \
% (args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r)
NUM_TEST = 100
NET_COMPARE = False
def wrap(x):
_sample, num_proc, use_deadline = x
return heu.OPA(_sample, num_proc, None, use_deadline)
def dm_wrap(x):
_sample, num_proc, use_deadline = x
return heu.test_RTA_LC(_sample, num_proc, 1, use_deadline)
def timer(func):
@wraps(func)
def wrapper(*args):
start = time.time()
result = func(*args)
end = time.time()
print("Function {name}, Time : {time:.3f} with result {result}"
.format(name=func.__name__, time=end-start, result=result))
return result
return wrapper
def get_util_range(num_proc):
util = [str(x) for x in range(10, num_proc * 100, 10)]
ret = []
for x in util:
if len(x) == 2:
ret.append('0.' + x)
else:
ret.append(x[:len(x) - 2] + '.' + x[len(x) - 2:])
return ret
class Datasets(Dataset):
def __init__(self, l):
super(Datasets, self).__init__()
ret = []
for dd in l:
ret.append(dd.data_set)
self.data_set = np.vstack(ret)
def setlen(self, newlen):
self.data_set = shuffle(self.data_set)
self.data_set = self.data_set[:newlen]
def __len__(self):
return self.data_set.shape[0]
def __getitem__(self, idx):
return idx, self.data_set[idx]
# @timer
def test_heu(eval_dataset, mode="OPA", ignore=False):
if ignore:
return 0
with ProcessPoolExecutor(max_workers=10) as executor:
inputs = []
res_opa = np.zeros(len(eval_dataset), dtype=int).tolist()
for i, sample in eval_dataset:
inputs.append((sample, args.num_procs, use_deadline))
for i, ret in tqdm(enumerate(executor.map(wrap, inputs))):
res_opa[i] = ret
opares = np.sum(res_opa)
return opares
# @timer
def test_dm(eval_dataset):
# with ProcessPoolExecutor(max_workers=1) as executor:
inputs = []
res_dm = np.zeros(len(eval_dataset), dtype=int).tolist()
for i, sample in eval_dataset:
inputs.append((sample, args.num_procs, use_deadline))
# print(sample)
# print(inputs[0])
dm_wrap(inputs[0])
# print("run")
# for i, ret in tqdm(enumerate(executor.map(dm_wrap, inputs))):
# res_dm[i] = ret
# operas = np.sum(res_dm)
# return operas
# @timer
def test_gumbel(model, eval_loader, gumbel_number):
ret = []
val = 0
for i, batch in eval_loader:
with torch.no_grad():
linear_score = model(batch, normalize=True)
gumbel_score = sample_gumbel(linear_score, sampling_number=gumbel_number)
gumbel_rank = get_rank(gumbel_score) # [batch_size x num_gumbel_sample x num_tasks]
for j, order in enumerate(gumbel_rank): # j : ~batch size
for k, orderd in enumerate(gumbel_rank[j]): # k : ~ num_gumbel_sample
x = test_module(batch[j].numpy(), args.num_procs, orderd, False, False)
if x == 1:
val += 1
break
else:
continue
return val
# @timer
def test_global_reinforce(model, eval_loader):
ret = []
for i, batch in eval_loader:
with torch.no_grad():
_, _, actions = model(batch, argmax=True)
for j, chosen in enumerate(actions.cpu().numpy()):
order = np.zeros_like(chosen)
for p in range(args.num_tasks):
order[chosen[p]] = args.num_tasks - p - 1
ret.append(test_module(batch[j].numpy(), args.num_procs, order, use_deadline, False))
return sum(ret)
# @timer
def test_reinforce(model, eval_loader, ignore=False):
if ignore:
return 0
ret = []
for i, batch in eval_loader:
with torch.no_grad():
_, _, actions = model(batch, argmax=True)
for j, chosen in enumerate(actions.cpu().numpy()):
order = np.zeros_like(chosen)
for p in range(args.num_tasks):
order[chosen[p]] = args.num_tasks - p - 1
ret.append(test_module(batch[j].numpy(), args.num_procs, order, use_deadline, False))
return sum(ret)
def test_reinforce_sampling(model, eval_loader, ignore=False):
if ignore:
return 0
ret = 0
for i, batch in eval_loader:
with torch.no_grad():
actions = model(batch, argmax=False, multisampling=True) # [batch_size x Num_sampling x seq_len]
for idx in range(actions.size(0)): # idx ~ batch_size
for j, chosen in enumerate(actions[idx]): # j ~ Num_sampling
order = np.zeros_like(chosen)
for p in range(args.num_tasks):
order[chosen[p]] = args.num_tasks - p - 1
success = test_module(batch[idx].numpy(), args.num_procs, order, use_deadline, False)
if success:
ret += 1
break
return ret
# @timer
def test_distillation(model, eval_loader):
ret = []
for i, batch in eval_loader:
with torch.no_grad():
score = model(batch).detach().numpy()
argsort = np.argsort(-score)
for j, chosen in enumerate(argsort):
order = np.zeros_like(chosen).squeeze()
for p in range(args.num_tasks):
order[chosen[p]] = args.num_tasks - p - 1
ret.append(test_module(batch[j].numpy(), args.num_procs, order, use_deadline, False))
return sum(ret)
util_range = get_util_range(args.num_procs)
tesets = []
on = False
for util in util_range:
on = False
if util == args.range_l:
on = True
if on:
with open("../Pandadata/te/%d-%d/%s" % (args.num_procs, args.num_tasks, util), 'rb') as f:
ts = pickle.load(f)
tesets.append(ts)
if util == args.range_r:
break
def main(netcompare=False):
test_dataset = Datasets(tesets)
test_dataset.setlen(args.num_test_dataset)
test_loader = DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=True,
pin_memory=False
)
if netcompare:
"""SRD"""
srd_results = []
print("LINEAR")
dist_file_name = "LIN-p%d-t%d-d%d-l[%s, %s]" % (
args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r)
Distillation = LinearSolver(args.num_procs, args.num_tasks,
args.use_deadline, False)
with open("../Pandamodels/linearmodels/" + dist_file_name + ".torchmodel", "rb") as f:
tmp = torch.load(f, map_location=torch.device("cpu"))
Distillation.load_state_dict(tmp.state_dict())
Distillation.cpu()
Distillation.eval()
srd_value = test_distillation(Distillation, test_loader)
srd_results.append(srd_value)
print()
"""Ranknet model"""
rknet_results = []
print("RANKNET")
rknet_file_name = "RKNET-p%d-t%d-d%d-l[%s, %s]" % (
args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r
)
RanknetModel = LinearSolver(args.num_procs, args.num_tasks,
args.use_deadline, False)
with open("../Pandamodels/rknetmodels/" + rknet_file_name + ".torchmodel", "rb") as f:
tmp = torch.load(f, map_location=torch.device("cpu"))
RanknetModel.load_state_dict(tmp.state_dict())
RanknetModel.eval()
rknet_value = test_distillation(RanknetModel, test_loader)
rknet_results.append(rknet_value)
print()
"""Listnet model"""
listnet_results = []
print("LISTNET")
listnet_file_name = "LISTNET-p%d-t%d-d%d-l[%s, %s]" % (
args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r
)
listnet_model = LinearSolver(args.num_procs, args.num_tasks,
args.use_deadline, False)
with open("../Pandamodels/listnetmodels/" + listnet_file_name + ".torchmodel", "rb") as f:
tmp = torch.load(f, map_location=torch.device("cpu"))
listnet_model.load_state_dict(tmp.state_dict())
listnet_model.eval()
listnet_value = test_distillation(listnet_model, test_loader)
listnet_results.append(listnet_value)
print()
return srd_value, rknet_value, listnet_value
if not netcompare:
opa_time, rl_time, rl_sampling_time, srd_time, g3_time, g5_time, g7_time, g10_time = 0, 0, 0, 0, 0, 0, 0, 0
# """Heuristic Test"""
heu_results = []
print("OPA")
start = time.time()
heu_val = test_heu(test_dataset, "OPA", ignore=True)
end = time.time()
opa_time = end - start
heu_results.append(heu_val)
print()
"""Reinforcement Learning Model Test"""
print("Local REINFORCE")
rl_results = []
rl_file_name = "localRL-p%d-t%d-d%d-l[%s, %s]" % (
args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r)
RLModel = Solver(args.num_procs, args.embedding_size, args.hidden_size,
args.num_tasks, use_deadline=False, use_cuda=False).cpu()
with open("../Pandamodels/localrlmodels/" + rl_file_name + ".torchmodel", "rb") as f:
tmp = torch.load(f)
RLModel.load_state_dict(tmp.state_dict())
RLModel.eval()
start = time.time()
rl_value = test_reinforce(RLModel, test_loader, ignore=True)
end = time.time()
rl_time = end-start
rl_results.append(rl_value)
print()
"""RL - Sampling Test"""
print("RL-Sampling")
rl_sampling_results = []
start = time.time()
rl_sampling_value = test_reinforce_sampling(RLModel, test_loader, ignore=True)
print(rl_sampling_value)
end = time.time()
rl_sampling_time = end-start
rl_sampling_results.append(rl_sampling_value)
print()
# """SRD Model Test"""
srd_results = []
print("LINEAR")
dist_file_name = "LIN-p%d-t%d-d%d-l[%s, %s]" % (
args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r)
Distillation = LinearSolver(args.num_procs, args.num_tasks,
args.use_deadline, False)
with open("../Pandamodels/linearmodels/" + dist_file_name + ".torchmodel", "rb") as f:
tmp = torch.load(f)
Distillation.load_state_dict(tmp.state_dict())
Distillation.cpu()
Distillation.eval()
start = time.time()
srd_value = test_distillation(Distillation, test_loader)
end=time.time()
srd_time = end-start
srd_results.append(srd_value)
print()
"""Ranknet Model Test"""
# print("RANKNET")
# rknet_file_name = "RKNET-p%d-t%d-d%d-l[%s, %s]" % (
# args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r
# )
# RKModel = LinearSolver(args.num_procs, args.num_tasks,
# args.use_deadline, False)
# with open("../Pandamodels/rknetmodels/" + rknet_file_name + ".torchmodel", "rb") as f:
# tmp = torch.load(f)
# RKModel.load_state_dict(tmp.state_dict())
# RKModel.eval()
# test_distillation(RKModel, test_loader)
# print()
"""GumbelSearch Model Test"""
print("GUMBELSEARCH")
start = time.time()
gum_3_value = test_gumbel(Distillation, test_loader, 3)
end = time.time()
g3_time = end-start
start = time.time()
gum5_value = test_gumbel(Distillation, test_loader, 5)
end = time.time()
g5_time = end-start
start = time.time()
gum7_value = test_gumbel(Distillation, test_loader, 7)
end = time.time()
g7_time = end-start
gum_results = []
start = time.time()
gum_10_value = test_gumbel(Distillation, test_loader, 10)
end = time.time()
g10_time = end-start
gum_results.append(gum_10_value)
print()
start = time.time()
gum_15_value = test_gumbel(Distillation, test_loader, 15)
end=time.time()
g15_time = end-start
start = time.time()
gum_20_value = test_gumbel(Distillation, test_loader, 20)
end = time.time()
g20_time = end-start
start = time.time()
gum_30_value = test_gumbel(Distillation, test_loader, 30)
end = time.time()
g30_time = end-start
# return opa_time, rl_time, rl_sampling_time, srd_time, g3_time, g5_time, g7_time, g10_time
values = (heu_val, rl_value, rl_sampling_value, srd_value, gum_3_value, gum5_value, gum7_value, gum_10_value, gum_15_value, gum_20_value, gum_30_value)
times = (opa_time, rl_time, rl_sampling_time, srd_time, g3_time, g5_time, g7_time, g10_time, g15_time, g20_time, g30_time)
return values, times
def pca():
test_dataset = Datasets(tesets)
test_dataset.setlen(args.num_test_dataset)
test_loader = DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=True,
pin_memory=False
)
rl_file_name = "localRL-p%d-t%d-d%d-l[%s, %s]" % (
args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r)
RLModel = Solver(args.num_procs, args.embedding_size, args.hidden_size,
args.num_tasks, use_deadline=False, use_cuda=False, ret_embedded_vector=True).cpu()
with open("../Pandamodels/localrlmodels/" + rl_file_name + ".torchmodel", "rb") as f:
tmp = torch.load(f)
RLModel.load_state_dict(tmp.state_dict())
RLModel.eval()
for i, batch in test_loader:
with torch.no_grad():
_, actions, embedded_vectors = RLModel(batch)
break
embedded_vectors = embedded_vectors.numpy()
actions = actions.detach().cpu().numpy()
with open("pca/panda_emb", "wb") as f:
pickle.dump(embedded_vectors, f)
with open("pca/panda_actions", "wb") as f:
pickle.dump(actions, f)
if __name__ == "__main__":
if NET_COMPARE:
srd_results, ranknet_results, listnet_results = [], [], []
for i in range(NUM_TEST):
srd_value, ranknet_value, listnet_value = main(NET_COMPARE)
srd_results.append(srd_value)
ranknet_results.append(ranknet_value)
listnet_results.append(listnet_value)
srd_results = np.array(srd_results)
srd_avg = np.mean(srd_results)
srd_std = np.sqrt(np.var(srd_results)) / np.sqrt(NUM_TEST)
ranknet_results = np.array(ranknet_results)
ranknet_avg = np.mean(ranknet_results)
ranknet_std = np.sqrt(np.var(ranknet_results)) / np.sqrt(NUM_TEST)
listnet_results = np.array(listnet_results)
listnet_avg = np.mean(listnet_results)
listnet_std = np.sqrt(np.var(listnet_results)) / np.sqrt(NUM_TEST)
print("SRD : {}, STD : {:.2f}".format(srd_avg, srd_std))
print("RANKNET : {}, STD : {:.2f}".format(ranknet_avg, ranknet_std))
print("LISTNET : {}, STD : {:.2f}".format(listnet_avg, listnet_std))
with open("netcompare/" + SAVING_FILE_NAME, "a") as f:
print("SRD : {}, STD : {:.2f}".format(srd_avg, srd_std), file=f)
print("RANKNET : {}, STD : {:.2f}".format(ranknet_avg, ranknet_std), file=f)
print("LISTNET : {}, STD : {:.2f}".format(listnet_avg, listnet_std), file=f)
else:
OPA_v, RL_v, RLS_v, SRD_v, G3_v, G5_v, G7_v, G10_v, G15_v, G20_v, G30_v = [], [], [], [], [], [], [], [], [], [], []
OPA_t, RL_t, RLS_t, SRD_t, G3_t, G5_t, G7_t, G10_t, G15_t, G20_t, G30_t = [], [], [], [], [], [], [], [], [], [], []
for i in range(NUM_TEST):
values, times = main()
opav, rlv, rlsv, srdv, g3v, g5v, g7v, g10v, g15v, g20v, g30v = values
opat, rlt, rlst, srdt, g3t, g5t, g7t, g10t, g15t, g20t, g30t = times
OPA_v.append(opav)
OPA_t.append(opat)
RL_v.append(rlv)
RL_t.append(rlt)
RLS_v.append(rlsv)
RLS_t.append(rlst)
SRD_v.append(srdv)
SRD_t.append(srdt)
G3_v.append(g3v)
G3_t.append(g3t)
G5_v.append(g5v)
G5_t.append(g5t)
G7_v.append(g7v)
G7_t.append(g7t)
G10_v.append(g10v)
G10_t.append(g10t)
G15_v.append(g15v)
G15_t.append(g15t)
G20_v.append(g20v)
G20_t.append(g20t)
G30_v.append(g30v)
G30_t.append(g30t)
OPA_v_std = np.sqrt(np.var(np.array(OPA_v)))
OPA_t_std = np.sqrt(np.var(np.array(OPA_t)))
RL_v_std = np.sqrt(np.var(np.array(RL_v)))
RL_t_std = np.sqrt(np.var(np.array(RL_t)))
RLS_v_std = np.sqrt(np.var(np.array(RLS_v)))
RLS_t_std = np.sqrt(np.var(np.array(RLS_t)))
SRD_v_std = np.sqrt(np.var(np.array(SRD_v)))
SRD_t_std = np.sqrt(np.var(np.array(SRD_t)))
G3_v_std = np.sqrt(np.var(np.array(G3_v)))
G3_t_std = np.sqrt(np.var(np.array(G3_t)))
G5_v_std = np.sqrt(np.var(np.array(G5_v)))
G5_t_std = np.sqrt(np.var(np.array(G5_t)))
G7_v_std = np.sqrt(np.var(np.array(G7_v)))
G7_t_std = np.sqrt(np.var(np.array(G7_t)))
G10_v_std = np.sqrt(np.var(np.array(G10_v)))
G10_t_std = np.sqrt(np.var(np.array(G10_t)))
G15_v_std = np.sqrt(np.var(np.array(G15_v)))
G15_t_std = np.sqrt(np.var(np.array(G15_t)))
G20_v_std = np.sqrt(np.var(np.array(G20_v)))
G20_t_std = np.sqrt(np.var(np.array(G20_t)))
G30_v_std = np.sqrt(np.var(np.array(G30_v)))
G30_t_std = np.sqrt(np.var(
|
np.array(G30_t)
|
numpy.array
|
# --------------------------------------------------------
# P2ORM: Formulation, Inference & Application
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import cv2
import sys
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import itertools
import time
from PIL import Image
from scipy import pi, ndimage
from .edge_nms import *
sys.path.append('../..')
from math import atan, tan
PI = 3.1416
# ===================================== functions for dataset generation ============================================= #
def gen_occ_order(K, depth, label_map, invalid_mask, ROI_sz, thr_depth, normal=None, lbl_type='mask',
depth_avg=False, dataset='interiornet', thr_pix=False, fast_abs_diff=False):
"""
convert depth to pixel-wise occ edge and pairwise occ order with givendepth , then corrected
by normal map and instance mask edge(optional)
:param K: current image camera intrinsic
:param depth: Euclidean distance between camera center and relevant pixel's 3D point
:param label_map: instance mask or edge mask which indicates image edge
:param invalid_mask: invalid raw data mask; [valid:0, invalid:1]
:param ROI_sz: size of region to determine occlusion order, default=3
:param lbl_type: ['edge'|'mask']: labeled occlusion edge or semantic mask
:param thr_depth: neighbor pixels depth difference rate (depth_diff / pixel_dist) thresh to detect occlusion
:param depth_avg: whether use average depth over one pixel's neighbor as pixel depth
:param dataset: dataset name, for dataset-specific pre-processing
:param thr_pix: whether use pixel-wise discontinuity threshold
:return occ_label: [H, W, (1(edge) + 8(order))]
"""
# pre-process depth, normal, label
H, W = depth.shape
padding = 2 # padding for depth
depth_pad_2 = cv2.copyMakeBorder(depth, padding, padding, padding, padding, cv2.BORDER_REPLICATE) # H+2,W+2
invalid_mask_pad = cv2.copyMakeBorder(invalid_mask, padding, padding, padding, padding, cv2.BORDER_REPLICATE) # H+2,W+2
if normal is not None:
if normal.dtype == np.uint16: # [0,65535] => [-1, 1]
normal = normal.astype(np.float32) / 65535.0 * 2 - 1.0 # H,W,3
normal[:, :, 1] = -normal[:, :, 1] # y-down => y-up, interiornet case
normal[:, :, 2] = -normal[:, :, 2] # z-in => z-out
normal_pad = cv2.copyMakeBorder(normal, 1, 1, 1, 1, cv2.BORDER_REPLICATE)
if lbl_type == 'edge': # occ edge map
edge_mask = label_map
edge_mask_pad = cv2.copyMakeBorder(edge_mask, 1, 1, 1, 1, cv2.BORDER_REPLICATE)
# init for ray-casting method
occ_edge = np.zeros(depth.shape[:2]) # 2-pix width occ edge (fg+bg)
occ_label = np.zeros((depth.shape[0], depth.shape[1], 9)) # occ edge + occ order (-1,0,1) w.r.t. 8 neighbors
occ_label_tmp = np.zeros((depth.shape[0], depth.shape[1], 9))
diff_abs_depth = np.zeros((depth.shape[0], depth.shape[1], 8)) # depth diff w.r.t 8 neighbors
diff_adj_depth = np.zeros((depth.shape[0], depth.shape[1], 8)) # adjusted depth diff (mid-pix ray) w.r.t 8 neighbors
shifts_pix = [[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 1],
[1, -1], [1, 0], [1, 1]]
shift_midpix = np.array([[-0.5, -0.5], [-0.5, 0.0], [-0.5, 0.5],
[0.0, -0.5], [0.0, 0.5],
[0.5, -0.5], [0.5, 0.0], [0.5, 0.5]]) # shift from center pix to mid pix
origin = np.zeros((8, 3)) # I0 of rays, namely camera optical center
depth_err_map = np.zeros(depth.shape[:2]) # estimated GT depth error (only for real dataset)
# thr_depth_const = thr_depth
# firstly check absolute depth diff (avoid ROI op, check)
depth_pad_1 = depth_pad_2[1:-1, 1:-1] # H+1,W+1
for idx, shift_pix in enumerate(shifts_pix):
shift_h, shift_w = shift_pix
pix_dist = 1.414 if idx in [0, 2, 5, 7] else 1. # distance between neighbor pixels
depth_diff = (depth_pad_1[1 + shift_h:H + 1 + shift_h, 1 + shift_w:W + 1 + shift_w] - depth) / pix_dist # H,W
diff_abs_depth[:, :, idx] = depth_diff
occ_label_tmp[depth_diff > thr_depth, idx + 1] = 1. # fg
occ_label_tmp[depth_diff < -thr_depth, idx + 1] = -1. # bg
occ_exist_bool = np.any((occ_label_tmp != 0), axis=2) # H,W
if fast_abs_diff: # fast mode using only absolute depth difference as ablation study
occ_edge[occ_exist_bool] = 1.0
occ_label = occ_label_tmp
occ_label[occ_exist_bool, 0] = 1.0
return occ_edge, occ_label, diff_abs_depth, diff_adj_depth, depth_err_map
# gen occ order for each pixel over the image
for y_idx in range(0, depth.shape[0]):
for x_idx in range(0, depth.shape[1]):
if invalid_mask[y_idx, x_idx] == 1: continue # skip pixel
if occ_exist_bool[y_idx, x_idx] != 1: continue
ROI_depth_L = np.copy(depth_pad_2[y_idx:(y_idx + ROI_sz + padding), x_idx:(x_idx + ROI_sz + padding)])
ROI_invalid_L = np.copy(invalid_mask_pad[y_idx:(y_idx + ROI_sz + padding), x_idx:(x_idx + ROI_sz + padding)])
# ============================= special pre-processing for dataset ======================================= #
if dataset in ['interiornet', 'scenenet']:
if ROI_depth_L.min() == 0.0: # inf depth
ROI_depth_L[ROI_depth_L != 0.] = ROI_depth_L.max() # rm depth edge problem
ROI_depth_L[ROI_depth_L == 0.] = 65535.0 # max depth for inf depth
elif dataset == 'ibims':
if ROI_depth_L[2, 2] == 0:
continue # invalid center pixel, skip
else:
if thr_pix: # cal curr pixel depth discontinuity thresh
eta_d_ibims = 0.002 # depth angular err for ibims-1 dataset
err_d_ibims = 1. # depth translational err for ibims-1 dataset
center_2D = np.array([y_idx + 0.5, x_idx + 0.5], dtype=np.float32) # 2,
neighbors_2D = center_2D + 2. * shift_midpix # 8,2
ROI_2D = np.insert(neighbors_2D, int((ROI_sz ** 2 - 1) / 2),
center_2D, axis=0).astype(np.float32) # 9,2
center_ray = np.array([center_2D[1] - K[0, 2], K[1, 2] - center_2D[0], -K[0, 0]]) # 3,
# center_ray_unit = center_ray / np.linalg.norm(center_ray) # 3,
ROI_rays = np.stack((ROI_2D[:, 1] - K[0, 2],
K[1, 2] - ROI_2D[:, 0],
-K[0, 0].repeat(9)), axis=1) # 9,3
ROI_rays_unit = ROI_rays / np.linalg.norm(ROI_rays, axis=1).reshape(-1, 1) # 9,3
ROI_normal = np.copy(normal_pad[y_idx:(y_idx + ROI_sz), x_idx:(x_idx + ROI_sz), :]).reshape(-1, 3) # 3,3,3 => 9,3
ROI_normal_unit = ROI_normal / np.linalg.norm(ROI_normal, axis=1).reshape(-1, 1) # 9,3
center_normal = np.copy(normal_pad[y_idx+1, x_idx+1, :]) # 3,
center_normal_unit = center_normal / np.linalg.norm(center_normal)
# gazing angle between surface and line of sight
# gamma = np.arccos(np.sum(center_ray_unit * center_normal_unit)) - PI / 2
gamma_roi = np.arccos(np.sum(ROI_rays_unit * ROI_normal_unit, axis=1)) - PI / 2 # 9,
# if np.any(gamma_roi <= eta_d_ibims): continue # condition for depth err caused by angular err
tan_gamma = np.minimum(np.tan(gamma_roi), 1.) # consider possible normal estimation err
tan_gamma = np.maximum(tan_gamma, 0.0001) # required: tan(gamma) >> tan(err_d_ibims)
depth_err = eta_d_ibims / tan_gamma * ROI_depth_L[2, 2] + err_d_ibims
thr_depth = 25. + depth_err[4] + np.delete(depth_err, 4) # 8,
depth_err_map[y_idx, x_idx] = depth_err[4]
# guess zero-value neighbor depth by 3x3 average depth
if np.any(ROI_depth_L[1:-1, 1:-1] == 0):
for y in range(0, ROI_sz):
for x in range(0, ROI_sz):
if ROI_depth_L[y+1, x+1] == 0.:
ROI_depth_valid = ROI_depth_L[y:y + ROI_sz, x:x + ROI_sz]
ROI_depth_valid = ROI_depth_valid[ROI_depth_valid != 0]
ROI_depth_L[y+1, x+1] = ROI_depth_valid.mean()
# ======================================================================================================== #
ROI_depth = np.zeros((ROI_sz, ROI_sz))
if depth_avg: # avg each pixel depth in ROI
for y in range(0, ROI_sz):
for x in range(0, ROI_sz):
ROI_depth[y, x] = np.mean(ROI_depth_L[y:y + ROI_sz, x:x + ROI_sz])
else:
ROI_depth = ROI_depth_L[1:-1, 1:-1] # 3x3
ROI_invalid = ROI_invalid_L[1:-1, 1:-1] # 3x3
# pixel idx in flat vector and its relevant location in connectivity-8 neighborhood
# 0 1 2
# 3 4
# 5 6 7
center_depth = ROI_depth[int((ROI_sz - 1) / 2), int((ROI_sz - 1) / 2)]
ROI_depth_flat = ROI_depth.flatten()
neighbors_depth_flat = np.delete(ROI_depth_flat, (ROI_sz * ROI_sz - 1) / 2) # 8,
ROI_invalid_flat = ROI_invalid.flatten()
neighbors_invalid_flat = np.delete(ROI_invalid_flat, (ROI_sz * ROI_sz - 1) / 2) # 8,
ROI_depth_diff = ROI_depth - center_depth # cal abs depth diff
ROI_depth_diff_flat = ROI_depth_diff.flatten() # row-wise flatten
ROI_depth_diff_flat = np.delete(ROI_depth_diff_flat, (ROI_sz * ROI_sz - 1) / 2) # 8,
ROI_depth_diff_flat[[0, 2, 5, 7]] = ROI_depth_diff_flat[[0, 2, 5, 7]] / 1.414 # for diagonal neighbors
gen_occ_lbl = False
if lbl_type == 'edge' and edge_mask[y_idx, x_idx] == 1:
gen_occ_lbl = True
elif lbl_type == 'mask' and np.any(np.abs(ROI_depth_diff).max() > thr_depth):
gen_occ_lbl = True
if gen_occ_lbl: # gen occ edge/order
# ======================= cal relevant discontinuities if normal is available ======================== #
if normal is not None:
ROI_normal = np.copy(normal_pad[y_idx:(y_idx + ROI_sz), x_idx:(x_idx + ROI_sz), :]).reshape(-1, 3) # 3,3,3 => 9,3
center_normal = ROI_normal[int((ROI_sz ** 2 - 1) / 2), :] # 3,
neighbors_normal = np.delete(ROI_normal, int((ROI_sz ** 2 - 1) / 2), axis=0) # 8,3
# gen relevant pixels coordinates on image plane
center_2D =
|
np.array([y_idx + 0.5, x_idx + 0.5], dtype=np.float32)
|
numpy.array
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the weighted_blend.PercentileBlendingAggregator class."""
import unittest
import numpy as np
from iris.tests import IrisTest
from improver.blending.weighted_blend import PercentileBlendingAggregator
# The PERCENTILE_DATA below were generated using a call to np.random.rand
# The numbers were then scaled between 12 and 18, envisaged as Spring or
# Autumn temperatures in Celsius.
PERCENTILE_DATA = np.array(
[
17.458706,
13.732982,
15.138694,
13.966815,
16.187801,
15.125104,
12.560181,
14.662473,
13.505879,
14.229357,
16.645939,
16.348572,
17.298779,
17.408989,
14.526242,
17.002329,
17.33035,
16.923946,
16.454231,
16.48794,
15.292369,
14.879623,
16.818222,
16.288244,
14.501231,
15.792644,
14.74469,
13.747394,
16.2813,
15.025502,
16.620153,
15.497392,
14.028551,
16.490143,
12.824328,
16.97861,
17.247797,
15.923066,
16.534174,
14.043188,
15.108195,
15.579895,
16.051695,
16.475237,
13.344669,
15.433237,
13.313879,
15.678431,
17.403114,
13.770423,
17.443968,
17.0385,
15.021733,
16.863739,
15.647017,
16.435345,
12.968588,
13.497512,
14.2414055,
14.173083,
14.522574,
14.454596,
13.354028,
13.807901,
13.009074,
12.984587,
15.867088,
12.503394,
14.164387,
16.018044,
17.481287,
12.66411,
],
dtype=np.float32,
)
WEIGHTS = np.array(
[[[0.8, 0.8], [0.8, 0.8]], [[0.5, 0.5], [0.5, 0.5]], [[0.2, 0.2], [0.2, 0.2]]],
dtype=np.float32,
)
BLENDED_PERCENTILE_DATA = np.array(
[
[[12.968588, 12.984587], [12.560181, 12.503394]],
[[12.990671, 12.984587], [14.356173, 12.503394]],
[[14.164387, 13.835985], [14.607758, 12.66411]],
[[14.855347, 14.404217], [14.736798, 13.913844]],
[[16.250134, 15.728171], [16.480879, 15.219085]],
[[17.458706, 17.408989], [17.481287, 17.0385]],
],
dtype=np.float32,
)
BLENDED_PERCENTILE_DATA_EQUAL_WEIGHTS = np.array(
[
[[12.968588, 12.984587], [12.560181, 12.503394]],
[[12.968588, 12.984587], [14.439088, 12.503394]],
[[13.425274, 13.764813], [15.138694, 12.535538]],
[[14.096469, 14.454596], [16.454231, 12.631967]],
[[16.187801, 16.018042], [17.027607, 15.497392]],
[[17.458706, 17.408989], [17.481287, 17.0385]],
],
dtype=np.float32,
)
BLENDED_PERCENTILE_DATA_SPATIAL_WEIGHTS = np.array(
[
[[12.968588, 12.984587], [12.560181, 12.503394]],
[[13.138149, 12.984587], [14.172956, 12.503394]],
[[13.452143, 13.801561], [16.620153, 12.503394]],
[[14.07383, 14.909795], [16.723688, 13.807901]],
[[14.3716755, 15.994956], [17.06419, 15.497392]],
[[17.458706, 17.408989], [17.481287, 17.0385]],
],
dtype=np.float32,
)
PERCENTILE_VALUES = np.array(
[
[
12.70237152,
14.83664335,
16.23242317,
17.42014139,
18.42036664,
19.10276753,
19.61048008,
20.27459352,
20.886425,
21.41928051,
22.60297787,
],
[
17.4934137,
20.56739689,
20.96798405,
21.4865958,
21.53586395,
21.55643557,
22.31650746,
23.26993755,
23.62817599,
23.6783294,
24.64542338,
],
[
16.24727652,
17.57784376,
17.9637658,
18.52589225,
18.99357526,
20.50915582,
21.82791334,
21.90645982,
21.95860878,
23.52203933,
23.71409191,
],
]
)
def generate_matching_weights_array(weights, shape):
"""Create an array of weights that matches the shape of the cube.
Args:
weights (numpy.ndarray):
An array of weights that needs to be broadcast to match the
specified shape.
shape (tuple):
A tuple that specifies the shape to which weights should be
broadcast. If broadcasting to this shape is not possible numpy will
raise a broadcast error.
"""
weights_array = np.broadcast_to(weights, shape)
return weights_array.astype(np.float32)
class Test__repr__(IrisTest):
"""Test the repr method."""
def test_basic(self):
"""Test that the __repr__ returns the expected string."""
result = str(PercentileBlendingAggregator())
msg = "<PercentileBlendingAggregator>"
self.assertEqual(result, msg)
class Test_aggregate(IrisTest):
"""Test the aggregate method"""
def test_blend_percentile_aggregate(self):
"""Test blend_percentile_aggregate function works"""
weights = np.array([0.6, 0.3, 0.1])
weights = generate_matching_weights_array(weights, (4, 6, 3))
weights = np.moveaxis(weights, (0, 1, 2), (2, 1, 0))
percentiles = np.array([0, 20, 40, 60, 80, 100]).astype(np.float32)
result = PercentileBlendingAggregator.aggregate(
|
np.reshape(PERCENTILE_DATA, (6, 3, 2, 2))
|
numpy.reshape
|
import numpy as np
from .base_agent import BaseAgent
from cs285.policies.MLP_policy import MLPPolicyPG
from cs285.infrastructure.replay_buffer import ReplayBuffer
from cs285.infrastructure.utils import normalize
class PGAgent(BaseAgent):
def __init__(self, env, agent_params):
super(PGAgent, self).__init__()
# init vars
self.env = env
self.agent_params = agent_params
self.gamma = self.agent_params['gamma']
self.standardize_advantages = self.agent_params['standardize_advantages']
self.nn_baseline = self.agent_params['nn_baseline']
self.reward_to_go = self.agent_params['reward_to_go']
self.gae_lambda = self.agent_params['gae_lambda']
# actor/policy
self.actor = MLPPolicyPG(
self.agent_params['ac_dim'],
self.agent_params['ob_dim'],
self.agent_params['n_layers'],
self.agent_params['size'],
discrete=self.agent_params['discrete'],
learning_rate=self.agent_params['learning_rate'],
nn_baseline=self.agent_params['nn_baseline']
)
# replay buffer
self.replay_buffer = ReplayBuffer(1000000)
def train(self, observations, actions, rewards_list, next_observations, terminals):
"""
Training a PG agent refers to updating its actor using the given observations/actions
and the calculated qvals/advantages that come from the seen rewards.
"""
"""
TODO #1 ✅ : update the PG actor/policy using the given batch of data
---------------------------------------------------------------------
d[J_θ] = E[ Σ d[log(π(a|s))] * (Σ r(𝛕) -b)]
"""
""" step ⓵ : get Q(s_t, a_t) using rewards (r_0, ..., r_t, ..., r_{T-1}) """
q_values = self.calculate_q_vals(rewards_list)
""" step ⓶ : get A(s_t, a_t) = Q(s_t,a_t) - V(s_t) """
advantages = self.estimate_advantage(observations,rewards_list,q_values,terminals)
""" step ⓷ : update PG """
train_log = self.actor.update(observations, actions, advantages,q_values)
return train_log
def calculate_q_vals(self, rewards_list):
"""
Monte Carlo estimation of the Q function.
"""
"""
TODO #2 ✅ : return the estimated qvals based on the given rewards, using
full trajectory-based estimator or RTG estimator
---------------------------------------------------------------------
Return : Q^{π}(s_t, a_t)
"""
# Case 1: trajectory-based PG
# Estimate Q^{pi}(s_t, a_t) by the total discounted reward summed over entire trajectory
if not self.reward_to_go:
q_values = np.concatenate([self._discounted_return(r) for r in rewards_list])
# Case 2: reward-to-go PG
# Estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting from t
else:
q_values = np.concatenate([self._discounted_cumsum(r) for r in rewards_list])
return q_values
def estimate_advantage(self, obs, rews_list, q_values, terminals):
"""
Computes advantages by (possibly) using GAE, or subtracting a baseline from the estimated Q values
"""
# Estimate the advantage when nn_baseline is True,
# by querying the neural network that you're using to learn the value function
if self.nn_baseline:
values_unnormalized = self.actor.run_baseline_prediction(obs)
## ensure that the value predictions and q_values have the same dimensionality
## to prevent silent broadcasting errors
assert values_unnormalized.ndim == q_values.ndim
"""
TODO #3 ✅ : values were trained with standardized q_values
----------------------------------------------------------------------
The predictions have the same mean and standard deviation as
the current batch of q_values
"""
values = values_unnormalized * np.std(q_values) + np.mean(q_values)
if self.gae_lambda is not None:
## append a dummy T+1 value for simpler recursive calculation
values = np.append(values, [0])
## combine rews_list into a single array
rews = np.concatenate(rews_list)
## create empty numpy array to populate with GAE advantage
## estimates, with dummy T+1 value for simpler recursive calculation
batch_size = obs.shape[0]
advantages = np.zeros(batch_size + 1)
"""
TODO #4 ✅ : recursively compute advantage
-----------------------------------------
estimates starting from timestep T.
"""
for i in reversed(range(batch_size)):
if terminals[i] != 1:
advantages[i] = rews[i]+self.gamma*values[i+1]-values[i]
advantages[i] += self.gamma*self.gae_lambda*advantages[i+1]
else:
advantages[i] = rews[i]-values[i]
advantages = advantages[:-1]
else:
""" TODO #5 ✅ : compute advantage estimates using q_values, and values as baselines """
advantages = q_values - values
# Else, just set the advantage to [Q]
else:
advantages = q_values.copy()
# Normalize the resulting advantages
if self.standardize_advantages:
""" TODO #6 ✅ : standardize the advantages to have a mean of zero and a standard deviation of one """
advantages = normalize(advantages, np.mean(advantages), np.std(advantages))
return advantages
#####################################################
#####################################################
def add_to_replay_buffer(self, paths):
self.replay_buffer.add_rollouts(paths)
def sample(self, batch_size):
return self.replay_buffer.sample_recent_data(batch_size, concat_rew=False)
#####################################################
################## HELPER FUNCTIONS #################
#####################################################
def _discounted_return(self, rewards):
"""
Helper function
Input: list of rewards {r_0, r_1, ..., r_t', ... r_T} from a single rollout of length T
Output: list where each index t contains sum_{t'=0}^T gamma^t' r_{t'}
"""
"""
TODO #7 ✅: create list_of_discounted_returns
---------------------------------------------
"""
discounted_returns = np.array([(self.gamma**t)*rewards[t] for t in range(len(rewards))])
return [np.sum(discounted_returns)]*len(rewards)
def _discounted_cumsum(self, rewards):
"""
Helper function which
-takes a list of rewards {r_0, r_1, ..., r_t', ... r_T},
-and returns a list where the entry in each index t' is sum_{t'=t}^T gamma^(t'-t) * r_{t'}
"""
"""
TODO #8 ✅ : create `list_of_discounted_returns`
"""
T = len(rewards)
gammas = self.gamma ** np.arange(0, T)
list_of_discounted_cumsums = np.zeros(T)
for t in range(T):
list_of_discounted_cumsums[t] =
|
np.sum(gammas[:T-t] * rewards[t:])
|
numpy.sum
|
from __future__ import print_function
import errno
import gzip
import math
import os
import numpy as np
import skimage.color
from scipy import ndimage
from six.moves import zip_longest
from six.moves import cPickle as pickle
def split(image):
"""Split the image data into the top and bottom half."""
split_height = image.shape[0] // 2
return image[:split_height], image[split_height:]
BYTE_MAX = 255
CHANNEL_MAX = np.float32(8)
MAX_RED_VALUE = BYTE_MAX - CHANNEL_MAX
CHANNELS_MAX = CHANNEL_MAX * CHANNEL_MAX
MAX_DEPTH = MAX_RED_VALUE * CHANNELS_MAX
COLOR_CHANNELS = 3
def byteToUnit(value):
return ((2.0 * value) / BYTE_MAX) - 1
def decode_depth(image):
"""~14 bits of depth in millimeters is encoded with 8 bits in red and 3 bits in each of green and blue."""
orientation = [1, 0, 0, 0] # default orientation if not present in image.
attitude = {
"quaternion": [1, 0, 0, 0],
"euler": [0, 0, 0],
"matrix": [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
}
leading_nans = 0
if np.array_equal(image[0, 0], [BYTE_MAX, 0, 0, BYTE_MAX]):
# Orientation quaternion is present.
pixel = image[0, 1]
orientation = attitude["quaternion"]
for c in range(len(orientation)):
orientation[c] = byteToUnit(pixel[c])
leading_nans += 2
if np.array_equal(image[0, 2], [BYTE_MAX, 0, 0, BYTE_MAX]):
# Euler angles and 3x3 rotation matrix are present.
pixel = image[0, 3]
orientation = attitude["euler"]
for c in range(len(orientation)):
orientation[c] = math.pi * byteToUnit(pixel[c])
matrix = attitude["matrix"]
for r in range(len(matrix)):
pixel = image[0, 4 + r]
row = matrix[r]
for c in range(len(row)):
row[c] = byteToUnit(pixel[c])
leading_nans += 5
red = image[:, :, 0]
green = image[:, :, 1]
blue = image[:, :, 2]
depth = ((MAX_RED_VALUE - red) * CHANNELS_MAX) + ((green - red) * CHANNEL_MAX) + (blue - red)
# Zero in the red channel indicates the sensor provided no data.
depth[np.where(red == 0)] = np.nan
# Zero out garbage values from encoded attitude
depth[0, :leading_nans] = np.nan
return depth, attitude
def encode_normalized_depth(depth):
"""Given a single normalized depth value, encode it into an RGBA pixel."""
depth_in_mm = int(depth * MAX_DEPTH)
red_bits = depth_in_mm // int(CHANNELS_MAX)
lower_bits = depth_in_mm % int(CHANNELS_MAX)
green_bits = lower_bits // int(CHANNEL_MAX)
blue_bits = lower_bits % int(CHANNEL_MAX)
red = int(MAX_RED_VALUE) - red_bits
return [red, red + green_bits, red + blue_bits, BYTE_MAX]
def encode_normalized_depths(depths):
"""Given a normalized depth image, encode it into an RGBA image."""
channel_max = np.int32(CHANNEL_MAX)
channels_max = np.int32(CHANNELS_MAX)
int_depths = (depths * MAX_DEPTH).astype(np.int32)
red_bits = int_depths // channels_max
lower_bits = np.mod(int_depths, channels_max)
red = np.uint8(MAX_RED_VALUE) - red_bits.astype(np.uint8)
green_bits = (lower_bits // channel_max).astype(np.uint8)
blue_bits = np.mod(lower_bits, channel_max).astype(np.uint8)
alpha_bits = np.ones_like(red) * np.uint8(BYTE_MAX)
return np.concatenate(
[red, red + green_bits, red + blue_bits, alpha_bits],
axis=len(depths.shape) - 1
)
def load_image(image_path):
"""Load, split and decode an image."""
combined_image = ndimage.imread(image_path).astype(np.float32)
color_image, depth_image = split(combined_image)
color_image = color_image[:, :, 0 : COLOR_CHANNELS] / BYTE_MAX # Discard alpha and normalize
depths, attitude = decode_depth(depth_image)
return (color_image, depths, attitude)
def ascending_factors(number):
"""Calculate the prime factors of a number in ascending order."""
factor = 2
while number > 1:
if number % factor == 0:
yield factor
number = number // factor
else:
factor += 1
def compute_scales(height, width):
"""Compute the prime factors of a the specified image dimensions, padding with 1 if neccesary."""
height_scales = reversed(list(ascending_factors(height)))
width_scales = reversed(list(ascending_factors(width)))
return list(zip_longest(height_scales, width_scales, fillvalue=1))
def mipmap_imputer(image, strategy=np.mean, smooth=False, scales=None):
"""Fill NaNs with localized aggregate values using mipmaps"""
# Combination of: http://stackoverflow.com/questions/14549696/mipmap-of-image-in-numpy
# and: http://stackoverflow.com/questions/5480694/numpy-calculate-averages-with-nans-removed
# If we weren't provided with scale values, compute them.
scales = scales if scales else compute_scales(image.shape[0], image.shape[1])
# Calculate the mipmaps by averaging around NaNs.
mipmaps = []
mipmap = image
for y, x in scales:
mipmap = mipmap.copy()
size = mipmap.shape
reshaped = mipmap.reshape(size[0] // y, y, size[1] // x, x)
masked = np.ma.masked_array(reshaped, np.isnan(reshaped))
mipmap = strategy(strategy(masked, axis=3), axis=1).filled(np.nan)
mipmaps.append(mipmap)
# Progresively fill in holes in each mipmap scale from the next smaller one.
for index in reversed(range(len(mipmaps))):
y, x = scales[index]
if x > 1:
mipmap = np.repeat(mipmap, x, axis=1).reshape(mipmap.shape[0], mipmap.shape[1] * x)
if y > 1:
mipmap = np.repeat(mipmap, y, axis=0).reshape(mipmap.shape[0] * y, mipmap.shape[1])
target = mipmaps[index - 1] if index > 0 else image.copy()
nans = np.where(np.isnan(target))
target[nans] = mipmap[nans]
if index > 0 and smooth:
target = ndimage.filters.gaussian_filter(target, max(y, x))
mipmap = target
return target
def compute_mean_depth(files):
"""Given a set of image files, compute the mean of all the depth values."""
# NOTE: The original version of this function computed the mean of the image means.
# Since the images have different numbers of missing pixels, this skewed the result slightly.
depth_sum = np.int64(0)
depth_count =
|
np.int64(0)
|
numpy.int64
|
# License: BSD 3-clause
# Authors: <NAME>
# LTSD routine from jfsantos (<NAME>)
# Harvest, Cheaptrick, D4C, WORLD routines based on MATLAB code from <NAME>
# http://ml.cs.yamanashi.ac.jp/world/english/
# MGC code based on r9y9 (Ryuichi Yamamoto) MelGeneralizedCepstrums.jl
# Pieces also adapted from SPTK
from __future__ import division
import numpy as np
import scipy as sp
from numpy.lib.stride_tricks import as_strided
import scipy.signal as sg
from scipy.interpolate import interp1d
import wave
from scipy.cluster.vq import vq
from scipy import linalg, fftpack
from numpy.testing import assert_almost_equal
from scipy.linalg import svd
from scipy.io import wavfile
from scipy.signal import firwin
import zipfile
import tarfile
import os
import copy
import multiprocessing
from multiprocessing import Pool
import functools
import time
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib2 as urllib
def download(url, server_fname, local_fname=None, progress_update_percentage=5,
bypass_certificate_check=False):
"""
An internet download utility modified from
http://stackoverflow.com/questions/22676/
how-do-i-download-a-file-over-http-using-python/22776#22776
"""
if bypass_certificate_check:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
u = urllib.urlopen(url, context=ctx)
else:
u = urllib.urlopen(url)
if local_fname is None:
local_fname = server_fname
full_path = local_fname
meta = u.info()
with open(full_path, 'wb') as f:
try:
file_size = int(meta.get("Content-Length"))
except TypeError:
print("WARNING: Cannot get file size, displaying bytes instead!")
file_size = 100
print("Downloading: %s Bytes: %s" % (server_fname, file_size))
file_size_dl = 0
block_sz = int(1E7)
p = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if (file_size_dl * 100. / file_size) > p:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl *
100. / file_size)
print(status)
p += progress_update_percentage
def fetch_sample_speech_tapestry():
url = "https://www.dropbox.com/s/qte66a7haqspq2g/tapestry.wav?dl=1"
wav_path = "tapestry.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo? - just choose one channel
return fs, d
def fetch_sample_file(wav_path):
if not os.path.exists(wav_path):
raise ValueError("Unable to find file at path %s" % wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo - just choose one channel
if len(d.shape) > 1:
d = d[:, 0]
return fs, d
def fetch_sample_music():
url = "http://www.music.helsinki.fi/tmt/opetus/uusmedia/esim/"
url += "a2002011001-e02-16kHz.wav"
wav_path = "test.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo - just choose one channel
d = d[:, 0]
return fs, d
def fetch_sample_speech_fruit(n_samples=None):
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
wav_path = "audio.tar.gz"
if not os.path.exists(wav_path):
download(url, wav_path)
tf = tarfile.open(wav_path)
wav_names = [fname for fname in tf.getnames()
if ".wav" in fname.split(os.sep)[-1]]
speech = []
print("Loading speech files...")
for wav_name in wav_names[:n_samples]:
f = tf.extractfile(wav_name)
fs, d = wavfile.read(f)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
return fs, speech
def fetch_sample_speech_eustace(n_samples=None):
"""
http://www.cstr.ed.ac.uk/projects/eustace/download.html
"""
# data
url = "http://www.cstr.ed.ac.uk/projects/eustace/down/eustace_wav.zip"
wav_path = "eustace_wav.zip"
if not os.path.exists(wav_path):
download(url, wav_path)
# labels
url = "http://www.cstr.ed.ac.uk/projects/eustace/down/eustace_labels.zip"
labels_path = "eustace_labels.zip"
if not os.path.exists(labels_path):
download(url, labels_path)
# Read wavfiles
# 16 kHz wav
zf = zipfile.ZipFile(wav_path, 'r')
wav_names = [fname for fname in zf.namelist()
if ".wav" in fname.split(os.sep)[-1]]
fs = 16000
speech = []
print("Loading speech files...")
for wav_name in wav_names[:n_samples]:
wav_str = zf.read(wav_name)
d = np.frombuffer(wav_str, dtype=np.int16)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
zf = zipfile.ZipFile(labels_path, 'r')
label_names = [fname for fname in zf.namelist()
if ".lab" in fname.split(os.sep)[-1]]
labels = []
print("Loading label files...")
for label_name in label_names[:n_samples]:
label_file_str = zf.read(label_name)
labels.append(label_file_str)
return fs, speech
def stft(X, fftsize=128, step="half", mean_normalize=True, real=False,
compute_onesided=True):
"""
Compute STFT for 1D real valued input X
"""
if real:
local_fft = fftpack.rfft
cut = -1
else:
local_fft = fftpack.fft
cut = None
if compute_onesided:
cut = fftsize // 2 + 1
if mean_normalize:
X -= X.mean()
if step == "half":
X = halfoverlap(X, fftsize)
else:
X = overlap(X, fftsize, step)
size = fftsize
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
X = X * win[None]
X = local_fft(X)[:, :cut]
return X
def istft(X, fftsize=128, step="half", wsola=False, mean_normalize=True,
real=False, compute_onesided=True):
"""
Compute ISTFT for STFT transformed X
"""
if real:
local_ifft = fftpack.irfft
X_pad = np.zeros((X.shape[0], X.shape[1] + 1)) + 0j
X_pad[:, :-1] = X
X = X_pad
else:
local_ifft = fftpack.ifft
if compute_onesided:
X_pad = np.zeros((X.shape[0], 2 * X.shape[1])) + 0j
X_pad[:, :fftsize // 2 + 1] = X
X_pad[:, fftsize // 2 + 1:] = 0
X = X_pad
X = local_ifft(X).astype("float64")
if step == "half":
X = invert_halfoverlap(X)
else:
X = overlap_add(X, step, wsola=wsola)
if mean_normalize:
X -= np.mean(X)
return X
def mdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
X = halfoverlap(X, N)
X = sine_window(X)
n, k = np.meshgrid(np.arange(N), np.arange(M))
# Use transpose due to "samples as rows" convention
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M).T
return np.dot(X, tf)
def imdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
N_4 = N / 4
n, k = np.meshgrid(np.arange(N), np.arange(M))
# inverse *is not* transposed
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M)
X_r = np.dot(X, tf) / N_4
X_r = sine_window(X_r)
X = invert_halfoverlap(X_r)
return X
def nsgcwin(fmin, fmax, n_bins, fs, signal_len, gamma):
"""
Nonstationary Gabor window calculation
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
# use a hanning window
# no fractional shifts
fftres = fs / signal_len
fmin = float(fmin)
fmax = float(fmax)
gamma = float(gamma)
nyq = fs / 2.
b = np.floor(n_bins * np.log2(fmax / fmin))
fbas = fmin * 2 ** (np.arange(b + 1) / float(n_bins))
Q = 2 ** (1. / n_bins) - 2 ** (-1. / n_bins)
cqtbw = Q * fbas + gamma
cqtbw = cqtbw.ravel()
maxidx = np.where(fbas + cqtbw / 2. > nyq)[0]
if len(maxidx) > 0:
# replicate bug in MATLAB version...
# or is it a feature
if sum(maxidx) == 0:
first = len(cqtbw) - 1
else:
first = maxidx[0]
fbas = fbas[:first]
cqtbw = cqtbw[:first]
minidx = np.where(fbas - cqtbw / 2. < 0)[0]
if len(minidx) > 0:
fbas = fbas[minidx[-1]+1:]
cqtbw = cqtbw[minidx[-1]+1:]
fbas_len = len(fbas)
fbas_new = np.zeros((2 * (len(fbas) + 1)))
fbas_new[1:len(fbas) + 1] = fbas
fbas = fbas_new
fbas[fbas_len + 1] = nyq
fbas[fbas_len + 2:] = fs - fbas[1:fbas_len + 1][::-1]
bw = np.zeros_like(fbas)
bw[0] = 2 * fmin
bw[1:len(cqtbw) + 1] = cqtbw
bw[len(cqtbw) + 1] = fbas[fbas_len + 2] - fbas[fbas_len]
bw[-len(cqtbw):] = cqtbw[::-1]
bw = bw / fftres
fbas = fbas / fftres
posit = np.zeros_like(fbas)
posit[:fbas_len + 2] = np.floor(fbas[:fbas_len + 2])
posit[fbas_len + 2:] = np.ceil(fbas[fbas_len + 2:])
base_shift = -posit[-1] % signal_len
shift = np.zeros_like(posit).astype("int32")
shift[1:] = (posit[1:] - posit[:-1]).astype("int32")
shift[0] = base_shift
bw = np.round(bw)
bwfac = 1
M = bw
min_win = 4
for ii in range(len(bw)):
if bw[ii] < min_win:
bw[ii] = min_win
M[ii] = bw[ii]
def _win(numel):
if numel % 2 == 0:
s1 = np.arange(0, .5, 1. / numel)
if len(s1) != numel // 2:
# edge case with small floating point numbers...
s1 = s1[:-1]
s2 = np.arange(-.5, 0, 1. / numel)
if len(s2) != numel // 2:
# edge case with small floating point numbers...
s2 = s2[:-1]
x = np.concatenate((s1, s2))
else:
s1 = np.arange(0, .5, 1. / numel)
s2 = np.arange(-.5 + .5 / numel, 0, 1. / numel)
if len(s2) != numel // 2: # assume integer truncate 27 // 2 = 13
s2 = s2[:-1]
x = np.concatenate((s1, s2))
assert len(x) == numel
g = .5 + .5 * np.cos(2 * np.pi * x)
return g
multiscale = [_win(bi) for bi in bw]
bw = bwfac * np.ceil(M / bwfac)
for kk in [0, fbas_len + 1]:
if M[kk] > M[kk + 1]:
multiscale[kk] = np.ones(M[kk]).astype(multiscale[0].dtype)
i1 = np.floor(M[kk] / 2) - np.floor(M[kk + 1] / 2)
i2 = np.floor(M[kk] / 2) + np.ceil(M[kk + 1] / 2)
# Very rarely, gets an off by 1 error? Seems to be at the end...
# for now, slice
multiscale[kk][i1:i2] = _win(M[kk + 1])
multiscale[kk] = multiscale[kk] / np.sqrt(M[kk])
return multiscale, shift, M
def nsgtf_real(X, multiscale, shift, window_lens):
"""
Nonstationary Gabor Transform for real values
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
# This will break with multchannel input
signal_len = len(X)
N = len(shift)
X_fft = np.fft.fft(X)
fill = np.sum(shift) - signal_len
if fill > 0:
X_fft_tmp = np.zeros((signal_len + shift))
X_fft_tmp[:len(X_fft)] = X_fft
X_fft = X_fft_tmp
posit = np.cumsum(shift) - shift[0]
scale_lens = np.array([len(m) for m in multiscale])
N = np.where(posit - np.floor(scale_lens) <= (signal_len + fill) / 2)[0][-1]
c = []
# c[0] is almost exact
for ii in range(N):
idx_l = np.arange(np.ceil(scale_lens[ii] / 2), scale_lens[ii])
idx_r = np.arange(np.ceil(scale_lens[ii] / 2))
idx = np.concatenate((idx_l, idx_r))
idx = idx.astype("int32")
subwin_range = posit[ii] + np.arange(-np.floor(scale_lens[ii] / 2),
np.ceil(scale_lens[ii] / 2))
win_range = subwin_range % (signal_len + fill)
win_range = win_range.astype("int32")
if window_lens[ii] < scale_lens[ii]:
raise ValueError("Not handling 'not enough channels' case")
else:
temp = np.zeros((window_lens[ii],)).astype(X_fft.dtype)
temp_idx_l = np.arange(len(temp) - np.floor(scale_lens[ii] / 2),
len(temp))
temp_idx_r = np.arange(np.ceil(scale_lens[ii] / 2))
temp_idx = np.concatenate((temp_idx_l, temp_idx_r))
temp_idx = temp_idx.astype("int32")
temp[temp_idx] = X_fft[win_range] * multiscale[ii][idx]
fs_new_bins = window_lens[ii]
fk_bins = posit[ii]
displace = fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins
displace = displace.astype("int32")
temp = np.roll(temp, displace)
c.append(np.fft.ifft(temp))
if 0:
# cell2mat concatenation
c = np.concatenate(c)
return c
def nsdual(multiscale, shift, window_lens):
"""
Calculation of nonstationary inverse gabor filters
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
N = len(shift)
posit = np.cumsum(shift)
seq_len = posit[-1]
posit = posit - shift[0]
diagonal = np.zeros((seq_len,))
win_range = []
for ii in range(N):
filt_len = len(multiscale[ii])
idx = np.arange(-np.floor(filt_len / 2), np.ceil(filt_len / 2))
win_range.append((posit[ii] + idx) % seq_len)
subdiag = window_lens[ii] * np.fft.fftshift(multiscale[ii]) ** 2
ind = win_range[ii].astype(np.int)
diagonal[ind] = diagonal[ind] + subdiag
dual_multiscale = multiscale
for ii in range(N):
ind = win_range[ii].astype(np.int)
dual_multiscale[ii] = np.fft.ifftshift(
np.fft.fftshift(dual_multiscale[ii]) / diagonal[ind])
return dual_multiscale
def nsgitf_real(c, c_dc, c_nyq, multiscale, shift):
"""
Nonstationary Inverse Gabor Transform on real valued signal
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
c_l = []
c_l.append(c_dc)
c_l.extend([ci for ci in c])
c_l.append(c_nyq)
posit = np.cumsum(shift)
seq_len = posit[-1]
posit -= shift[0]
out = np.zeros((seq_len,)).astype(c_l[1].dtype)
for ii in range(len(c_l)):
filt_len = len(multiscale[ii])
win_range = posit[ii] + np.arange(-np.floor(filt_len / 2),
np.ceil(filt_len / 2))
win_range = (win_range % seq_len).astype(np.int)
temp = np.fft.fft(c_l[ii]) * len(c_l[ii])
fs_new_bins = len(c_l[ii])
fk_bins = posit[ii]
displace = int(fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins)
temp = np.roll(temp, -displace)
l = np.arange(len(temp) - np.floor(filt_len / 2), len(temp))
r = np.arange(np.ceil(filt_len / 2))
temp_idx = (np.concatenate((l, r)) % len(temp)).astype(np.int)
temp = temp[temp_idx]
lf = np.arange(filt_len - np.floor(filt_len / 2), filt_len)
rf = np.arange(np.ceil(filt_len / 2))
filt_idx = np.concatenate((lf, rf)).astype(np.int)
m = multiscale[ii][filt_idx]
out[win_range] = out[win_range] + m * temp
nyq_bin = np.floor(seq_len / 2) + 1
out_idx = np.arange(
nyq_bin - np.abs(1 - seq_len % 2) - 1, 0, -1).astype(np.int)
out[nyq_bin:] = np.conj(out[out_idx])
t_out = np.real(np.fft.ifft(out)).astype(np.float64)
return t_out
def cqt(X, fs, n_bins=48, fmin=27.5, fmax="nyq", gamma=20):
"""
Constant Q Transform
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
if fmax == "nyq":
fmax = fs / 2.
multiscale, shift, window_lens = nsgcwin(fmin, fmax, n_bins, fs,
len(X), gamma)
fbas = fs * np.cumsum(shift[1:]) / len(X)
fbas = fbas[:len(window_lens) // 2 - 1]
bins = window_lens.shape[0] // 2 - 1
window_lens[1:bins + 1] = window_lens[bins + 2]
window_lens[bins + 2:] = window_lens[1:bins + 1][::-1]
norm = 2. * window_lens[:bins + 2] / float(len(X))
norm = np.concatenate((norm, norm[1:-1][::-1]))
multiscale = [norm[ii] * multiscale[ii] for ii in range(2 * (bins + 1))]
c = nsgtf_real(X, multiscale, shift, window_lens)
c_dc = c[0]
c_nyq = c[-1]
c_sub = c[1:-1]
c = np.vstack(c_sub)
return c, c_dc, c_nyq, multiscale, shift, window_lens
def icqt(X_cq, c_dc, c_nyq, multiscale, shift, window_lens):
"""
Inverse constant Q Transform
References
----------
<NAME>., <NAME>., <NAME>., <NAME>.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
<NAME>., <NAME>., <NAME>. and <NAME>.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : <NAME>, <NAME>, <NAME>, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
new_multiscale = nsdual(multiscale, shift, window_lens)
X = nsgitf_real(X_cq, c_dc, c_nyq, new_multiscale, shift)
return X
def rolling_mean(X, window_size):
w = 1.0 / window_size * np.ones((window_size))
return np.correlate(X, w, 'valid')
def rolling_window(X, window_size):
# for 1d data
shape = X.shape[:-1] + (X.shape[-1] - window_size + 1, window_size)
strides = X.strides + (X.strides[-1],)
return np.lib.stride_tricks.as_strided(X, shape=shape, strides=strides)
def voiced_unvoiced(X, window_size=256, window_step=128, copy=True):
"""
Voiced unvoiced detection from a raw signal
Based on code from:
https://www.clear.rice.edu/elec532/PROJECTS96/lpc/code.html
Other references:
http://www.seas.ucla.edu/spapl/code/harmfreq_MOLRT_VAD.m
Parameters
----------
X : ndarray
Raw input signal
window_size : int, optional (default=256)
The window size to use, in samples.
window_step : int, optional (default=128)
How far the window steps after each calculation, in samples.
copy : bool, optional (default=True)
Whether to make a copy of the input array or allow in place changes.
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = n_points // window_step
# Padding
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], pad_sizes[0])), X,
np.zeros((X.shape[0], pad_sizes[1]))))
clipping_factor = 0.6
b, a = sg.butter(10, np.pi * 9 / 40)
voiced_unvoiced = np.zeros((n_windows, 1))
period = np.zeros((n_windows, 1))
for window in range(max(n_windows - 1, 1)):
XX = X.ravel()[window * window_step + np.arange(window_size)]
XX *= sg.hamming(len(XX))
XX = sg.lfilter(b, a, XX)
left_max = np.max(np.abs(XX[:len(XX) // 3]))
right_max = np.max(np.abs(XX[-len(XX) // 3:]))
clip_value = clipping_factor * np.min([left_max, right_max])
XX_clip = np.clip(XX, clip_value, -clip_value)
XX_corr = np.correlate(XX_clip, XX_clip, mode='full')
center = np.argmax(XX_corr)
right_XX_corr = XX_corr[center:]
prev_window = max([window - 1, 0])
if voiced_unvoiced[prev_window] > 0:
# Want it to be harder to turn off than turn on
strength_factor = .29
else:
strength_factor = .3
start = np.where(right_XX_corr < .3 * XX_corr[center])[0]
# 20 is hardcoded but should depend on samplerate?
try:
start = np.max([20, start[0]])
except IndexError:
start = 20
search_corr = right_XX_corr[start:]
index = np.argmax(search_corr)
second_max = search_corr[index]
if (second_max > strength_factor * XX_corr[center]):
voiced_unvoiced[window] = 1
period[window] = start + index - 1
else:
voiced_unvoiced[window] = 0
period[window] = 0
return np.array(voiced_unvoiced), np.array(period)
def lpc_analysis(X, order=8, window_step=128, window_size=2 * 128,
emphasis=0.9, voiced_start_threshold=.9,
voiced_stop_threshold=.6, truncate=False, copy=True):
"""
Extract LPC coefficients from a signal
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
_rParameters
----------
X : ndarray
Signals to extract LPC coefficients from
order : int, optional (default=8)
Order of the LPC coefficients. For speech, use the general rule that the
order is two times the expected number of formants plus 2.
This can be formulated as 2 + 2 * (fs // 2000). For approx. signals
with fs = 7000, this is 8 coefficients - 2 + 2 * (7000 // 2000).
window_step : int, optional (default=128)
The size (in samples) of the space between each window
window_size : int, optional (default=2 * 128)
The size of each window (in samples) to extract coefficients over
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
voiced_start_threshold : float, optional (default=0.9)
Upper power threshold for estimating when speech has started
voiced_stop_threshold : float, optional (default=0.6)
Lower power threshold for estimating when speech has stopped
truncate : bool, optional (default=False)
Whether to cut the data at the last window or do zero padding.
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
lp_coefficients : ndarray
lp coefficients to describe the frame
per_frame_gain : ndarray
calculated gain for each frame
residual_excitation : ndarray
leftover energy which is not described by lp coefficents and gain
voiced_frames : ndarray
array of [0, 1] values which holds voiced/unvoiced decision for each
frame.
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = int(n_points // window_step)
if not truncate:
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], int(pad_sizes[0]))), X,
np.zeros((X.shape[0], int(pad_sizes[1])))))
else:
pad_sizes = [0, 0]
X = X[0, :n_windows * window_step]
lp_coefficients = np.zeros((n_windows, order + 1))
per_frame_gain = np.zeros((n_windows, 1))
residual_excitation = np.zeros(
int(((n_windows - 1) * window_step + window_size)))
# Pre-emphasis high-pass filter
X = sg.lfilter([1, -emphasis], 1, X)
# stride_tricks.as_strided?
autocorr_X = np.zeros((n_windows, int(2 * window_size - 1)))
for window in range(max(n_windows - 1, 1)):
wtws = int(window * window_step)
XX = X.ravel()[wtws + np.arange(window_size, dtype="int32")]
WXX = XX * sg.hanning(window_size)
autocorr_X[window] = np.correlate(WXX, WXX, mode='full')
center = np.argmax(autocorr_X[window])
RXX = autocorr_X[window,
np.arange(center, window_size + order, dtype="int32")]
R = linalg.toeplitz(RXX[:-1])
solved_R = linalg.pinv(R).dot(RXX[1:])
filter_coefs = np.hstack((1, -solved_R))
residual_signal = sg.lfilter(filter_coefs, 1, WXX)
gain = np.sqrt(np.mean(residual_signal ** 2))
lp_coefficients[window] = filter_coefs
per_frame_gain[window] = gain
assign_range = wtws + np.arange(window_size, dtype="int32")
residual_excitation[assign_range] += residual_signal / gain
# Throw away first part in overlap mode for proper synthesis
residual_excitation = residual_excitation[int(pad_sizes[0]):]
return lp_coefficients, per_frame_gain, residual_excitation
def lpc_to_frequency(lp_coefficients, per_frame_gain):
"""
Extract resonant frequencies and magnitudes from LPC coefficients and gains.
Parameters
----------
lp_coefficients : ndarray
LPC coefficients, such as those calculated by ``lpc_analysis``
per_frame_gain : ndarray
Gain calculated for each frame, such as those calculated
by ``lpc_analysis``
Returns
-------
frequencies : ndarray
Resonant frequencies calculated from LPC coefficients and gain. Returned
frequencies are from 0 to 2 * pi
magnitudes : ndarray
Magnitudes of resonant frequencies
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
n_windows, order = lp_coefficients.shape
frame_frequencies = np.zeros((n_windows, (order - 1) // 2))
frame_magnitudes = np.zeros_like(frame_frequencies)
for window in range(n_windows):
w_coefs = lp_coefficients[window]
g_coefs = per_frame_gain[window]
roots = np.roots(np.hstack(([1], w_coefs[1:])))
# Roots doesn't return the same thing as MATLAB... agh
frequencies, index = np.unique(
np.abs(np.angle(roots)), return_index=True)
# Make sure 0 doesn't show up...
gtz = np.where(frequencies > 0)[0]
frequencies = frequencies[gtz]
index = index[gtz]
magnitudes = g_coefs / (1. - np.abs(roots))
sort_index = np.argsort(frequencies)
frame_frequencies[window, :len(sort_index)] = frequencies[sort_index]
frame_magnitudes[window, :len(sort_index)] = magnitudes[sort_index]
return frame_frequencies, frame_magnitudes
def lpc_to_lsf(all_lpc):
if len(all_lpc.shape) < 2:
all_lpc = all_lpc[None]
order = all_lpc.shape[1] - 1
all_lsf = np.zeros((len(all_lpc), order))
for i in range(len(all_lpc)):
lpc = all_lpc[i]
lpc1 = np.append(lpc, 0)
lpc2 = lpc1[::-1]
sum_filt = lpc1 + lpc2
diff_filt = lpc1 - lpc2
if order % 2 != 0:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, -1])
deconv_sum, _ = sg.deconvolve(sum_filt, [1, 1])
roots_diff = np.roots(deconv_diff)
roots_sum = np.roots(deconv_sum)
angle_diff = np.angle(roots_diff[::2])
angle_sum = np.angle(roots_sum[::2])
lsf = np.sort(np.hstack((angle_diff, angle_sum)))
if len(lsf) != 0:
all_lsf[i] = lsf
return np.squeeze(all_lsf)
def lsf_to_lpc(all_lsf):
if len(all_lsf.shape) < 2:
all_lsf = all_lsf[None]
order = all_lsf.shape[1]
all_lpc = np.zeros((len(all_lsf), order + 1))
for i in range(len(all_lsf)):
lsf = all_lsf[i]
zeros = np.exp(1j * lsf)
sum_zeros = zeros[::2]
diff_zeros = zeros[1::2]
sum_zeros = np.hstack((sum_zeros, np.conj(sum_zeros)))
diff_zeros = np.hstack((diff_zeros, np.conj(diff_zeros)))
sum_filt = np.poly(sum_zeros)
diff_filt = np.poly(diff_zeros)
if order % 2 != 0:
deconv_diff = sg.convolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff = sg.convolve(diff_filt, [1, -1])
deconv_sum = sg.convolve(sum_filt, [1, 1])
lpc = .5 * (deconv_sum + deconv_diff)
# Last coefficient is 0 and not returned
all_lpc[i] = lpc[:-1]
return np.squeeze(all_lpc)
def lpc_synthesis(lp_coefficients, per_frame_gain, residual_excitation=None,
voiced_frames=None, window_step=128, emphasis=0.9):
"""
Synthesize a signal from LPC coefficients
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
http://web.uvic.ca/~tyoon/resource/auditorytoolbox/auditorytoolbox/synlpc.html
Parameters
----------
lp_coefficients : ndarray
Linear prediction coefficients
per_frame_gain : ndarray
Gain coefficients
residual_excitation : ndarray or None, optional (default=None)
Residual excitations. If None, this will be synthesized with white noise
voiced_frames : ndarray or None, optional (default=None)
Voiced frames. If None, all frames assumed to be voiced.
window_step : int, optional (default=128)
The size (in samples) of the space between each window
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
overlap_add : bool, optional (default=True)
What type of processing to use when joining windows
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
# TODO: Incorporate better synthesis from
# http://eecs.oregonstate.edu/education/docs/ece352/CompleteManual.pdf
window_size = 2 * window_step
[n_windows, order] = lp_coefficients.shape
n_points = (n_windows + 1) * window_step
n_excitation_points = n_points + window_step + window_step // 2
random_state = np.random.RandomState(1999)
if residual_excitation is None:
# Need to generate excitation
if voiced_frames is None:
# No voiced/unvoiced info
voiced_frames = np.ones((lp_coefficients.shape[0], 1))
residual_excitation = np.zeros((n_excitation_points))
f, m = lpc_to_frequency(lp_coefficients, per_frame_gain)
t = np.linspace(0, 1, window_size, endpoint=False)
hanning = sg.hanning(window_size)
for window in range(n_windows):
window_base = window * window_step
index = window_base + np.arange(window_size)
if voiced_frames[window]:
sig = np.zeros_like(t)
cycles = np.cumsum(f[window][0] * t)
sig += sg.sawtooth(cycles, 0.001)
residual_excitation[index] += hanning * sig
residual_excitation[index] += hanning * 0.01 * random_state.randn(
window_size)
else:
n_excitation_points = residual_excitation.shape[0]
n_points = n_excitation_points + window_step + window_step // 2
residual_excitation = np.hstack((residual_excitation,
np.zeros(window_size)))
if voiced_frames is None:
voiced_frames = np.ones_like(per_frame_gain)
synthesized = np.zeros((n_points))
for window in range(n_windows):
window_base = window * window_step
oldbit = synthesized[window_base + np.arange(window_step)]
w_coefs = lp_coefficients[window]
if not np.all(w_coefs):
# Hack to make lfilter avoid
# ValueError: BUG: filter coefficient a[0] == 0 not supported yet
# when all coeffs are 0
w_coefs = [1]
g_coefs = voiced_frames[window] * per_frame_gain[window]
index = window_base + np.arange(window_size)
newbit = g_coefs * sg.lfilter([1], w_coefs,
residual_excitation[index])
synthesized[index] = np.hstack((oldbit, np.zeros(
(window_size - window_step))))
synthesized[index] += sg.hanning(window_size) * newbit
synthesized = sg.lfilter([1], [1, -emphasis], synthesized)
return synthesized
def soundsc(X, gain_scale=.9, copy=True):
"""
Approximate implementation of soundsc from MATLAB without the audio playing.
Parameters
----------
X : ndarray
Signal to be rescaled
gain_scale : float
Gain multipler, default .9 (90% of maximum representation)
copy : bool, optional (default=True)
Whether to make a copy of input signal or operate in place.
Returns
-------
X_sc : ndarray
(-32767, 32767) scaled version of X as int16, suitable for writing
with scipy.io.wavfile
"""
X = np.array(X, copy=copy)
X = (X - X.min()) / (X.max() - X.min())
X = 2 * X - 1
X = gain_scale * X
X = X * 2 ** 15
return X.astype('int16')
def _wav2array(nchannels, sampwidth, data):
# wavio.py
# Author: <NAME>
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
"""data must be the string containing the bytes from the wav file."""
num_samples, remainder = divmod(len(data), sampwidth * nchannels)
if remainder > 0:
raise ValueError('The length of data is not a multiple of '
'sampwidth * num_channels.')
if sampwidth > 4:
raise ValueError("sampwidth must not be greater than 4.")
if sampwidth == 3:
a = np.empty((num_samples, nchannels, 4), dtype=np.uint8)
raw_bytes = np.fromstring(data, dtype=np.uint8)
a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth)
a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255
result = a.view('<i4').reshape(a.shape[:-1])
else:
# 8 bit samples are stored as unsigned ints; others as signed ints.
dt_char = 'u' if sampwidth == 1 else 'i'
a = np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth))
result = a.reshape(-1, nchannels)
return result
def readwav(file):
# wavio.py
# Author: <NAME>
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
"""
Read a wav file.
Returns the frame rate, sample width (in bytes) and a numpy array
containing the data.
This function does not read compressed wav files.
"""
wav = wave.open(file)
rate = wav.getframerate()
nchannels = wav.getnchannels()
sampwidth = wav.getsampwidth()
nframes = wav.getnframes()
data = wav.readframes(nframes)
wav.close()
array = _wav2array(nchannels, sampwidth, data)
return rate, sampwidth, array
def csvd(arr):
"""
Do the complex SVD of a 2D array, returning real valued U, S, VT
http://stemblab.github.io/complex-svd/
"""
C_r = arr.real
C_i = arr.imag
block_x = C_r.shape[0]
block_y = C_r.shape[1]
K = np.zeros((2 * block_x, 2 * block_y))
# Upper left
K[:block_x, :block_y] = C_r
# Lower left
K[:block_x, block_y:] = C_i
# Upper right
K[block_x:, :block_y] = -C_i
# Lower right
K[block_x:, block_y:] = C_r
return svd(K, full_matrices=False)
def icsvd(U, S, VT):
"""
Invert back to complex values from the output of csvd
U, S, VT = csvd(X)
X_rec = inv_csvd(U, S, VT)
"""
K = U.dot(np.diag(S)).dot(VT)
block_x = U.shape[0] // 2
block_y = U.shape[1] // 2
arr_rec = np.zeros((block_x, block_y)) + 0j
arr_rec.real = K[:block_x, :block_y]
arr_rec.imag = K[:block_x, block_y:]
return arr_rec
def sinusoid_analysis(X, input_sample_rate, resample_block=128, copy=True):
"""
Contruct a sinusoidal model for the input signal.
Parameters
----------
X : ndarray
Input signal to model
input_sample_rate : int
The sample rate of the input signal
resample_block : int, optional (default=128)
Controls the step size of the sinusoidal model
Returns
-------
frequencies_hz : ndarray
Frequencies for the sinusoids, in Hz.
magnitudes : ndarray
Magnitudes of sinusoids returned in ``frequencies``
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
resample_to = 8000
if input_sample_rate != resample_to:
if input_sample_rate % resample_to != 0:
raise ValueError("Input sample rate must be a multiple of 8k!")
# Should be able to use resample... ?
# resampled_count = round(len(X) * resample_to / input_sample_rate)
# X = sg.resample(X, resampled_count, window=sg.hanning(len(X)))
X = sg.decimate(X, input_sample_rate // resample_to, zero_phase=True)
step_size = 2 * round(resample_block / input_sample_rate * resample_to / 2.)
a, g, e = lpc_analysis(X, order=8, window_step=step_size,
window_size=2 * step_size)
f, m = lpc_to_frequency(a, g)
f_hz = f * resample_to / (2 * np.pi)
return f_hz, m
def slinterp(X, factor, copy=True):
"""
Slow-ish linear interpolation of a 1D numpy array. There must be some
better function to do this in numpy.
Parameters
----------
X : ndarray
1D input array to interpolate
factor : int
Integer factor to interpolate by
Return
------
X_r : ndarray
"""
sz = np.product(X.shape)
X = np.array(X, copy=copy)
X_s = np.hstack((X[1:], [0]))
X_r = np.zeros((factor, sz))
for i in range(factor):
X_r[i, :] = (factor - i) / float(factor) * X + (i / float(factor)) * X_s
return X_r.T.ravel()[:(sz - 1) * factor + 1]
def sinusoid_synthesis(frequencies_hz, magnitudes, input_sample_rate=16000,
resample_block=128):
"""
Create a time series based on input frequencies and magnitudes.
Parameters
----------
frequencies_hz : ndarray
Input signal to model
magnitudes : int
The sample rate of the input signal
input_sample_rate : int, optional (default=16000)
The sample rate parameter that the sinusoid analysis was run with
resample_block : int, optional (default=128)
Controls the step size of the sinusoidal model
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
rows, cols = frequencies_hz.shape
synthesized = np.zeros((1 + ((rows - 1) * resample_block),))
for col in range(cols):
mags = slinterp(magnitudes[:, col], resample_block)
freqs = slinterp(frequencies_hz[:, col], resample_block)
cycles = np.cumsum(2 * np.pi * freqs / float(input_sample_rate))
sines = mags * np.cos(cycles)
synthesized += sines
return synthesized
def dct_compress(X, n_components, window_size=128):
"""
Compress using the DCT
Parameters
----------
X : ndarray, shape=(n_samples,)
The input signal to compress. Should be 1-dimensional
n_components : int
The number of DCT components to keep. Setting n_components to about
.5 * window_size can give compression with fairly good reconstruction.
window_size : int
The input X is broken into windows of window_size, each of which are
then compressed with the DCT.
Returns
-------
X_compressed : ndarray, shape=(num_windows, window_size)
A 2D array of non-overlapping DCT coefficients. For use with uncompress
Reference
---------
http://nbviewer.ipython.org/github/craffel/crucialpython/blob/master/week3/stride_tricks.ipynb
"""
if len(X) % window_size != 0:
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_size
X_strided = X.reshape((num_frames, window_size))
X_dct = fftpack.dct(X_strided, norm='ortho')
if n_components is not None:
X_dct = X_dct[:, :n_components]
return X_dct
def dct_uncompress(X_compressed, window_size=128):
"""
Uncompress a DCT compressed signal (such as returned by ``compress``).
Parameters
----------
X_compressed : ndarray, shape=(n_samples, n_features)
Windowed and compressed array.
window_size : int, optional (default=128)
Size of the window used when ``compress`` was called.
Returns
-------
X_reconstructed : ndarray, shape=(n_samples)
Reconstructed version of X.
"""
if X_compressed.shape[1] % window_size != 0:
append = np.zeros((X_compressed.shape[0],
window_size - X_compressed.shape[1] % window_size))
X_compressed = np.hstack((X_compressed, append))
X_r = fftpack.idct(X_compressed, norm='ortho')
return X_r.ravel()
def sine_window(X):
"""
Apply a sinusoid window to X.
Parameters
----------
X : ndarray, shape=(n_samples, n_features)
Input array of samples
Returns
-------
X_windowed : ndarray, shape=(n_samples, n_features)
Windowed version of X.
"""
i = np.arange(X.shape[1])
win = np.sin(np.pi * (i + 0.5) / X.shape[1])
row_stride = 0
col_stride = win.itemsize
strided_win = as_strided(win, shape=X.shape,
strides=(row_stride, col_stride))
return X * strided_win
def kaiserbessel_window(X, alpha=6.5):
"""
Apply a Kaiser-Bessel window to X.
Parameters
----------
X : ndarray, shape=(n_samples, n_features)
Input array of samples
alpha : float, optional (default=6.5)
Tuning parameter for Kaiser-Bessel function. alpha=6.5 should make
perfect reconstruction possible for DCT.
Returns
-------
X_windowed : ndarray, shape=(n_samples, n_features)
Windowed version of X.
"""
beta = np.pi * alpha
win = sg.kaiser(X.shape[1], beta)
row_stride = 0
col_stride = win.itemsize
strided_win = as_strided(win, shape=X.shape,
strides=(row_stride, col_stride))
return X * strided_win
def overlap(X, window_size, window_step):
"""
Create an overlapped version of X
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
window_step : int
Step size between windows
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
overlap_sz = window_size - window_step
new_shape = X.shape[:-1] + ((X.shape[-1] - overlap_sz) // window_step, window_size)
new_strides = X.strides[:-1] + (window_step * X.strides[-1],) + X.strides[-1:]
X_strided = as_strided(X, shape=new_shape, strides=new_strides)
return X_strided
def halfoverlap(X, window_size):
"""
Create an overlapped version of X using 50% of window_size as overlap.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
window_step = window_size // 2
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_step - 1
row_stride = X.itemsize * window_step
col_stride = X.itemsize
X_strided = as_strided(X, shape=(num_frames, window_size),
strides=(row_stride, col_stride))
return X_strided
def invert_halfoverlap(X_strided):
"""
Invert ``halfoverlap`` function to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
# Hardcoded 50% overlap! Can generalize later...
n_rows, n_cols = X_strided.shape
X = np.zeros((((int(n_rows // 2) + 1) * n_cols),)).astype(X_strided.dtype)
start_index = 0
end_index = n_cols
window_step = n_cols // 2
for row in range(X_strided.shape[0]):
X[start_index:end_index] += X_strided[row]
start_index += window_step
end_index += window_step
return X
def overlap_add(X_strided, window_step, wsola=False):
"""
overlap add to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
window_step : int
step size for overlap add
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
n_rows, window_size = X_strided.shape
# Start with largest size (no overlap) then truncate after we finish
# +2 for one window on each side
X = np.zeros(((n_rows + 2) * window_size,)).astype(X_strided.dtype)
start_index = 0
total_windowing_sum = np.zeros((X.shape[0]))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(window_size) / (
window_size - 1))
for i in range(n_rows):
end_index = start_index + window_size
if wsola:
offset_size = window_size - window_step
offset = xcorr_offset(X[start_index:start_index + offset_size],
X_strided[i, :offset_size])
ss = start_index - offset
st = end_index - offset
if start_index - offset < 0:
ss = 0
st = 0 + (end_index - start_index)
X[ss:st] += X_strided[i]
total_windowing_sum[ss:st] += win
start_index = start_index + window_step
else:
X[start_index:end_index] += X_strided[i]
total_windowing_sum[start_index:end_index] += win
start_index += window_step
# Not using this right now
#X = np.real(X) / (total_windowing_sum + 1)
X = X[:end_index]
return X
def overlap_dct_compress(X, n_components, window_size):
"""
Overlap (at 50% of window_size) and compress X.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to compress
n_components : int
number of DCT components to keep
window_size : int
Size of windows to take
Returns
-------
X_dct : ndarray, shape=(n_windows, n_components)
Windowed and compressed version of X
"""
X_strided = halfoverlap(X, window_size)
X_dct = fftpack.dct(X_strided, norm='ortho')
if n_components is not None:
X_dct = X_dct[:, :n_components]
return X_dct
# Evil voice is caused by adding double the zeros before inverse DCT...
# Very cool bug but makes sense
def overlap_dct_uncompress(X_compressed, window_size):
"""
Uncompress X as returned from ``overlap_compress``.
Parameters
----------
X_compressed : ndarray, shape=(n_windows, n_components)
Windowed and compressed version of X
window_size : int
Size of windows originally used when compressing X
Returns
-------
X_reconstructed : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
if X_compressed.shape[1] % window_size != 0:
append = np.zeros((X_compressed.shape[0], window_size -
X_compressed.shape[1] % window_size))
X_compressed = np.hstack((X_compressed, append))
X_r = fftpack.idct(X_compressed, norm='ortho')
return invert_halfoverlap(X_r)
def herz_to_mel(freqs):
"""
Based on code by <NAME>
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
f_0 = 0 # 133.33333
f_sp = 200 / 3. # 66.66667
bark_freq = 1000.
bark_pt = (bark_freq - f_0) / f_sp
# The magic 1.0711703 which is the ratio needed to get from 1000 Hz
# to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz
# and the preceding linear filter center at 933.33333 Hz
# (actually 1000/933.33333 = 1.07142857142857 and
# exp(log(6.4)/27) = 1.07117028749447)
if not isinstance(freqs, np.ndarray):
freqs = np.array(freqs)[None]
log_step = np.exp(np.log(6.4) / 27)
lin_pts = (freqs < bark_freq)
mel = 0. * freqs
mel[lin_pts] = (freqs[lin_pts] - f_0) / f_sp
mel[~lin_pts] = bark_pt + np.log(freqs[~lin_pts] / bark_freq) / np.log(
log_step)
return mel
def mel_to_herz(mel):
"""
Based on code by <NAME>
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
f_0 = 0 # 133.33333
f_sp = 200 / 3. # 66.66667
bark_freq = 1000.
bark_pt = (bark_freq - f_0) / f_sp
# The magic 1.0711703 which is the ratio needed to get from 1000 Hz
# to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz
# and the preceding linear filter center at 933.33333 Hz
# (actually 1000/933.33333 = 1.07142857142857 and
# exp(log(6.4)/27) = 1.07117028749447)
if not isinstance(mel, np.ndarray):
mel = np.array(mel)[None]
log_step = np.exp(np.log(6.4) / 27)
lin_pts = (mel < bark_pt)
freqs = 0. * mel
freqs[lin_pts] = f_0 + f_sp * mel[lin_pts]
freqs[~lin_pts] = bark_freq * np.exp(np.log(log_step) * (
mel[~lin_pts] - bark_pt))
return freqs
def mel_freq_weights(n_fft, fs, n_filts=None, width=None):
"""
Based on code by <NAME>
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
min_freq = 0
max_freq = fs // 2
if width is None:
width = 1.
if n_filts is None:
n_filts = int(herz_to_mel(max_freq) / 2) + 1
else:
n_filts = int(n_filts)
assert n_filts > 0
weights = np.zeros((n_filts, n_fft))
fft_freqs = np.arange(n_fft // 2) / n_fft * fs
min_mel = herz_to_mel(min_freq)
max_mel = herz_to_mel(max_freq)
partial = np.arange(n_filts + 2) / (n_filts + 1.) * (max_mel - min_mel)
bin_freqs = mel_to_herz(min_mel + partial)
bin_bin = np.round(bin_freqs / fs * (n_fft - 1))
for i in range(n_filts):
fs_i = bin_freqs[i + np.arange(3)]
fs_i = fs_i[1] + width * (fs_i - fs_i[1])
lo_slope = (fft_freqs - fs_i[0]) / float(fs_i[1] - fs_i[0])
hi_slope = (fs_i[2] - fft_freqs) / float(fs_i[2] - fs_i[1])
weights[i, :n_fft // 2] = np.maximum(
0, np.minimum(lo_slope, hi_slope))
# Constant amplitude multiplier
weights = np.diag(2. / (bin_freqs[2:n_filts + 2]
- bin_freqs[:n_filts])).dot(weights)
weights[:, n_fft // 2:] = 0
return weights
def time_attack_agc(X, fs, t_scale=0.5, f_scale=1.):
"""
AGC based on code by <NAME>
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
# 32 ms grid for FFT
n_fft = 2 ** int(np.log(0.032 * fs) / np.log(2))
f_scale = float(f_scale)
window_size = n_fft
window_step = window_size // 2
X_freq = stft(X, window_size, mean_normalize=False)
fft_fs = fs / window_step
n_bands = max(10, 20 / f_scale)
mel_width = f_scale * n_bands / 10.
f_to_a = mel_freq_weights(n_fft, fs, n_bands, mel_width)
f_to_a = f_to_a[:, :n_fft // 2 + 1]
audiogram = np.abs(X_freq).dot(f_to_a.T)
fbg = np.zeros_like(audiogram)
state = np.zeros((audiogram.shape[1],))
alpha = np.exp(-(1. / fft_fs) / t_scale)
for i in range(len(audiogram)):
state = np.maximum(alpha * state, audiogram[i])
fbg[i] = state
sf_to_a = np.sum(f_to_a, axis=0)
E = np.diag(1. / (sf_to_a + (sf_to_a == 0)))
E = E.dot(f_to_a.T)
E = fbg.dot(E.T)
E[E <= 0] = np.min(E[E > 0])
ts = istft(X_freq / E, window_size, mean_normalize=False)
return ts, X_freq, E
def hebbian_kmeans(X, n_clusters=10, n_epochs=10, W=None, learning_rate=0.01,
batch_size=100, random_state=None, verbose=True):
"""
Modified from existing code from <NAME>
See http://www.cs.toronto.edu/~rfm/code/hebbian_kmeans.py
"""
if W is None:
if random_state is None:
random_state = np.random.RandomState()
W = 0.1 * random_state.randn(n_clusters, X.shape[1])
else:
assert n_clusters == W.shape[0]
X2 = (X ** 2).sum(axis=1, keepdims=True)
last_print = 0
for e in range(n_epochs):
for i in range(0, X.shape[0], batch_size):
X_i = X[i: i + batch_size]
X2_i = X2[i: i + batch_size]
D = -2 * np.dot(W, X_i.T)
D += (W ** 2).sum(axis=1, keepdims=True)
D += X2_i.T
S = (D == D.min(axis=0)[None, :]).astype("float").T
W += learning_rate * (
np.dot(S.T, X_i) - S.sum(axis=0)[:, None] * W)
if verbose:
if e == 0 or e > (.05 * n_epochs + last_print):
last_print = e
print("Epoch %i of %i, cost %.4f" % (
e + 1, n_epochs, D.min(axis=0).sum()))
return W
def complex_to_real_view(arr_c):
# Inplace view from complex to r, i as separate columns
assert arr_c.dtype in [np.complex64, np.complex128]
shp = arr_c.shape
dtype = np.float64 if arr_c.dtype == np.complex128 else np.float32
arr_r = arr_c.ravel().view(dtype=dtype).reshape(shp[0], 2 * shp[1])
return arr_r
def real_to_complex_view(arr_r):
# Inplace view from real, image as columns to complex
assert arr_r.dtype not in [np.complex64, np.complex128]
shp = arr_r.shape
dtype = np.complex128 if arr_r.dtype == np.float64 else np.complex64
arr_c = arr_r.ravel().view(dtype=dtype).reshape(shp[0], shp[1] // 2)
return arr_c
def complex_to_abs(arr_c):
return np.abs(arr_c)
def complex_to_angle(arr_c):
return np.angle(arr_c)
def abs_and_angle_to_complex(arr_abs, arr_angle):
# abs(f_c2 - f_c) < 1E-15
return arr_abs * np.exp(1j * arr_angle)
def angle_to_sin_cos(arr_angle):
return np.hstack((np.sin(arr_angle), np.cos(arr_angle)))
def sin_cos_to_angle(arr_sin, arr_cos):
return np.arctan2(arr_sin, arr_cos)
def polyphase_core(x, m, f):
# x = input data
# m = decimation rate
# f = filter
# Hack job - append zeros to match decimation rate
if x.shape[0] % m != 0:
x = np.append(x, np.zeros((m - x.shape[0] % m,)))
if f.shape[0] % m != 0:
f = np.append(f, np.zeros((m - f.shape[0] % m,)))
polyphase = p = np.zeros((m, (x.shape[0] + f.shape[0]) / m), dtype=x.dtype)
p[0, :-1] = np.convolve(x[::m], f[::m])
# Invert the x values when applying filters
for i in range(1, m):
p[i, 1:] = np.convolve(x[m - i::m], f[i::m])
return p
def polyphase_single_filter(x, m, f):
return np.sum(polyphase_core(x, m, f), axis=0)
def polyphase_lowpass(arr, downsample=2, n_taps=50, filter_pad=1.1):
filt = firwin(downsample * n_taps, 1 / (downsample * filter_pad))
filtered = polyphase_single_filter(arr, downsample, filt)
return filtered
def window(arr, window_size, window_step=1, axis=0):
"""
Directly taken from <NAME> post to numpy-discussion.
<http://www.mail-archive.com/<EMAIL>/msg29450.html>
<http://stackoverflow.com/questions/4936620/using-strides-for-an-efficient-moving-average-filter>
"""
if window_size < 1:
raise ValueError("`window_size` must be at least 1.")
if window_size > arr.shape[-1]:
raise ValueError("`window_size` is too long.")
orig = list(range(len(arr.shape)))
trans = list(range(len(arr.shape)))
trans[axis] = orig[-1]
trans[-1] = orig[axis]
arr = arr.transpose(trans)
shape = arr.shape[:-1] + (arr.shape[-1] - window_size + 1, window_size)
strides = arr.strides + (arr.strides[-1],)
strided = as_strided(arr, shape=shape, strides=strides)
if window_step > 1:
strided = strided[..., ::window_step, :]
orig = list(range(len(strided.shape)))
trans = list(range(len(strided.shape)))
trans[-2] = orig[-1]
trans[-1] = orig[-2]
trans = trans[::-1]
strided = strided.transpose(trans)
return strided
def unwindow(arr, window_size, window_step=1, axis=0):
# undo windows by broadcast
if axis != 0:
raise ValueError("axis != 0 currently unsupported")
shp = arr.shape
unwindowed = np.tile(arr[:, None, ...], (1, window_step, 1, 1))
unwindowed = unwindowed.reshape(shp[0] * window_step, *shp[1:])
return unwindowed.mean(axis=1)
def xcorr_offset(x1, x2):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
<NAME> and <NAME>. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
<NAME>, <NAME> and <NAME>. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
<NAME>, <NAME>, <NAME>. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
x1 = x1 - x1.mean()
x2 = x2 - x2.mean()
frame_size = len(x2)
half = frame_size // 2
corrs = np.convolve(x1.astype('float32'), x2[::-1].astype('float32'))
corrs[:half] = -1E30
corrs[-half:] = -1E30
offset = corrs.argmax() - len(x1)
return offset
def invert_spectrogram(X_s, step, calculate_offset=True, set_zero_phase=True):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
<NAME> and <NAME>. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
<NAME>, <NAME> and <NAME>. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
<NAME>, <NAME>, <NAME>. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
size = int(X_s.shape[1] // 2)
wave = np.zeros((X_s.shape[0] * step + size))
# Getting overflow warnings with 32 bit...
wave = wave.astype('float64')
total_windowing_sum = np.zeros((X_s.shape[0] * step + size))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
est_start = int(size // 2) - 1
est_end = est_start + size
for i in range(X_s.shape[0]):
wave_start = int(step * i)
wave_end = wave_start + size
if set_zero_phase:
spectral_slice = X_s[i].real + 0j
else:
# already complex
spectral_slice = X_s[i]
# Don't need fftshift due to different impl.
wave_est = np.real(np.fft.ifft(spectral_slice))[::-1]
if calculate_offset and i > 0:
offset_size = size - step
if offset_size <= 0:
print("WARNING: Large step size >50\% detected! "
"This code works best with high overlap - try "
"with 75% or greater")
offset_size = step
offset = xcorr_offset(wave[wave_start:wave_start + offset_size],
wave_est[est_start:est_start + offset_size])
else:
offset = 0
wave[wave_start:wave_end] += win * wave_est[
est_start - offset:est_end - offset]
total_windowing_sum[wave_start:wave_end] += win
wave = np.real(wave) / (total_windowing_sum + 1E-6)
return wave
def iterate_invert_spectrogram(X_s, fftsize, step, n_iter=10, verbose=False,
complex_input=False):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
<NAME> and <NAME>. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
<NAME>, <NAME> and <NAME>. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
<NAME>, <NAME>, <NAME>. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
reg = np.max(X_s) / 1E8
X_best = copy.deepcopy(X_s)
try:
for i in range(n_iter):
if verbose:
print("Runnning iter %i" % i)
if i == 0 and not complex_input:
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=True)
else:
# Calculate offset was False in the MATLAB version
# but in mine it massively improves the result
# Possible bug in my impl?
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
est = stft(X_t, fftsize=fftsize, step=step, compute_onesided=False)
phase = est / np.maximum(reg, np.abs(est))
phase = phase[:len(X_s)]
X_s = X_s[:len(phase)]
X_best = X_s * phase
except ValueError:
raise ValueError("The iterate_invert_spectrogram algorithm requires"
" stft(..., compute_onesided=False),",
" be sure you have calculated stft with this argument")
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
return np.real(X_t)
def harvest_get_downsampled_signal(x, fs, target_fs):
decimation_ratio = np.round(fs / target_fs)
offset = np.ceil(140. / decimation_ratio) * decimation_ratio
start_pad = x[0] * np.ones(int(offset), dtype=np.float32)
end_pad = x[-1] * np.ones(int(offset), dtype=np.float32)
x = np.concatenate((start_pad, x, end_pad), axis=0)
if fs < target_fs:
raise ValueError("CASE NOT HANDLED IN harvest_get_downsampled_signal")
else:
try:
y0 = sg.decimate(x, int(decimation_ratio), 3, zero_phase=True)
except:
y0 = sg.decimate(x, int(decimation_ratio), 3)
actual_fs = fs / decimation_ratio
y = y0[int(offset / decimation_ratio):-int(offset / decimation_ratio)]
y = y - np.mean(y)
return y, actual_fs
def harvest_get_raw_f0_candidates(number_of_frames, boundary_f0_list,
y_length, temporal_positions, actual_fs, y_spectrum, f0_floor,
f0_ceil):
raw_f0_candidates = np.zeros((len(boundary_f0_list), number_of_frames), dtype=np.float32)
for i in range(len(boundary_f0_list)):
raw_f0_candidates[i, :] = harvest_get_f0_candidate_from_raw_event(
boundary_f0_list[i], actual_fs, y_spectrum, y_length,
temporal_positions, f0_floor, f0_ceil)
return raw_f0_candidates
def harvest_nuttall(N):
t = np.arange(0, N) * 2 * np.pi / (N - 1)
coefs = np.array([0.355768, -0.487396, 0.144232, -0.012604])
window = np.cos(t[:, None].dot(np.array([0., 1., 2., 3.])[None])).dot( coefs[:, None])
# 1D window...
return window.ravel()
def harvest_get_f0_candidate_from_raw_event(boundary_f0,
fs, y_spectrum, y_length, temporal_positions, f0_floor,
f0_ceil):
filter_length_half = int(np.round(fs / boundary_f0 * 2))
band_pass_filter_base = harvest_nuttall(filter_length_half * 2 + 1)
shifter = np.cos(2 * np.pi * boundary_f0 * np.arange(-filter_length_half, filter_length_half + 1) / float(fs))
band_pass_filter = band_pass_filter_base * shifter
index_bias = filter_length_half
# possible numerical issues if 32 bit
spectrum_low_pass_filter = np.fft.fft(band_pass_filter.astype("float64"), len(y_spectrum))
filtered_signal = np.real(np.fft.ifft(spectrum_low_pass_filter * y_spectrum))
index_bias = filter_length_half + 1
filtered_signal = filtered_signal[index_bias + np.arange(y_length).astype("int32")]
negative_zero_cross = harvest_zero_crossing_engine(filtered_signal, fs)
positive_zero_cross = harvest_zero_crossing_engine(-filtered_signal, fs)
d_filtered_signal = filtered_signal[1:] - filtered_signal[:-1]
peak = harvest_zero_crossing_engine(d_filtered_signal, fs)
dip = harvest_zero_crossing_engine(-d_filtered_signal, fs)
f0_candidate = harvest_get_f0_candidate_contour(negative_zero_cross,
positive_zero_cross, peak, dip, temporal_positions)
f0_candidate[f0_candidate > (boundary_f0 * 1.1)] = 0.
f0_candidate[f0_candidate < (boundary_f0 * .9)] = 0.
f0_candidate[f0_candidate > f0_ceil] = 0.
f0_candidate[f0_candidate < f0_floor] = 0.
return f0_candidate
def harvest_get_f0_candidate_contour(negative_zero_cross_tup,
positive_zero_cross_tup, peak_tup, dip_tup, temporal_positions):
# 0 is inteval locations
# 1 is interval based f0
usable_channel = max(0, len(negative_zero_cross_tup[0]) - 2)
usable_channel *= max(0, len(positive_zero_cross_tup[0]) - 2)
usable_channel *= max(0, len(peak_tup[0]) - 2)
usable_channel *= max(0, len(dip_tup[0]) - 2)
if usable_channel > 0:
interpolated_f0_list = np.zeros((4, len(temporal_positions)))
nz = interp1d(negative_zero_cross_tup[0], negative_zero_cross_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
pz = interp1d(positive_zero_cross_tup[0], positive_zero_cross_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
pkz = interp1d(peak_tup[0], peak_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
dz = interp1d(dip_tup[0], dip_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
interpolated_f0_list[0, :] = nz(temporal_positions)
interpolated_f0_list[1, :] = pz(temporal_positions)
interpolated_f0_list[2, :] = pkz(temporal_positions)
interpolated_f0_list[3, :] = dz(temporal_positions)
f0_candidate = np.mean(interpolated_f0_list, axis=0)
else:
f0_candidate = temporal_positions * 0
return f0_candidate
def harvest_zero_crossing_engine(x, fs, debug=False):
# negative zero crossing, going from positive to negative
x_shift = x.copy()
x_shift[:-1] = x_shift[1:]
x_shift[-1] = x[-1]
# +1 here to avoid edge case at 0
points = np.arange(len(x)) + 1
negative_going_points = points * ((x_shift * x < 0) * (x_shift < x))
edge_list = negative_going_points[negative_going_points > 0]
# -1 to correct index
fine_edge_list = edge_list - x[edge_list - 1] / (x[edge_list] - x[edge_list - 1]).astype("float32")
interval_locations = (fine_edge_list[:-1] + fine_edge_list[1:]) / float(2) / fs
interval_based_f0 = float(fs) / (fine_edge_list[1:] - fine_edge_list[:-1])
return interval_locations, interval_based_f0
def harvest_detect_official_f0_candidates(raw_f0_candidates):
number_of_channels, number_of_frames = raw_f0_candidates.shape
f0_candidates = np.zeros((int(np.round(number_of_channels / 10.)), number_of_frames))
number_of_candidates = 0
threshold = 10
for i in range(number_of_frames):
tmp = raw_f0_candidates[:, i].copy()
tmp[tmp > 0] = 1.
tmp[0] = 0
tmp[-1] = 0
tmp = tmp[1:] - tmp[:-1]
st = np.where(tmp == 1)[0]
ed = np.where(tmp == -1)[0]
count = 0
for j in range(len(st)):
dif = ed[j] - st[j]
if dif >= threshold:
tmp_f0 = raw_f0_candidates[st[j] + 1: ed[j] + 1, i]
f0_candidates[count, i] = np.mean(tmp_f0)
count = count + 1
number_of_candidates = max(number_of_candidates, count)
return f0_candidates, number_of_candidates
def harvest_overlap_f0_candidates(f0_candidates, max_number_of_f0_candidates):
n = 3 # this is the optimized parameter... apparently
number_of_candidates = n * 2 + 1
new_f0_candidates = f0_candidates[number_of_candidates, :].copy()
new_f0_candidates = new_f0_candidates[None]
# hack to bypass magic matlab-isms of allocating when indexing OOB
new_f0_candidates = np.vstack([new_f0_candidates] + (new_f0_candidates.shape[-1] - 1) * [np.zeros_like(new_f0_candidates)])
# this indexing is megagross, possible source for bugs!
all_nonzero = []
for i in range(number_of_candidates):
st = max(-(i - n), 0)
ed = min(-(i - n), 0)
f1_b = np.arange(max_number_of_f0_candidates).astype("int32")
f1 = f1_b + int(i * max_number_of_f0_candidates)
all_nonzero = list(set(all_nonzero + list(f1)))
f2 = None if ed == 0 else ed
f3 = -ed
f4 = None if st == 0 else -st
new_f0_candidates[f1, st:f2] = f0_candidates[f1_b, f3:f4]
new_f0_candidates = new_f0_candidates[all_nonzero, :]
return new_f0_candidates
def harvest_refine_candidates(x, fs, temporal_positions, f0_candidates,
f0_floor, f0_ceil):
new_f0_candidates = f0_candidates.copy()
f0_scores = f0_candidates * 0.
for i in range(len(temporal_positions)):
for j in range(len(f0_candidates)):
tmp_f0 = f0_candidates[j, i]
if tmp_f0 == 0:
continue
res = harvest_get_refined_f0(x, fs, temporal_positions[i],
tmp_f0, f0_floor, f0_ceil)
new_f0_candidates[j, i] = res[0]
f0_scores[j, i] = res[1]
return new_f0_candidates, f0_scores
def harvest_get_refined_f0(x, fs, current_time, current_f0, f0_floor,
f0_ceil):
half_window_length = np.ceil(3. * fs / current_f0 / 2.)
window_length_in_time = (2. * half_window_length + 1) / float(fs)
base_time = np.arange(-half_window_length, half_window_length + 1) / float(fs)
fft_size = int(2 ** np.ceil(np.log2((half_window_length * 2 + 1)) + 1))
frequency_axis = np.arange(fft_size) / fft_size * float(fs)
base_index = np.round((current_time + base_time) * fs + 0.001)
index_time = (base_index - 1) / float(fs)
window_time = index_time - current_time
part1 = np.cos(2 * np.pi * window_time / window_length_in_time)
part2 = np.cos(4 * np.pi * window_time / window_length_in_time)
main_window = 0.42 + 0.5 * part1 + 0.08 * part2
ext = np.zeros((len(main_window) + 2))
ext[1:-1] = main_window
diff_window = -((ext[1:-1] - ext[:-2]) + (ext[2:] - ext[1:-1])) / float(2)
safe_index = np.maximum(1, np.minimum(len(x), base_index)).astype("int32") - 1
spectrum = np.fft.fft(x[safe_index] * main_window, fft_size)
diff_spectrum = np.fft.fft(x[safe_index] * diff_window, fft_size)
numerator_i = np.real(spectrum) * np.imag(diff_spectrum) - np.imag(spectrum) * np.real(diff_spectrum)
power_spectrum = np.abs(spectrum) ** 2
instantaneous_frequency = frequency_axis + numerator_i / power_spectrum * float(fs) / 2. / np.pi
number_of_harmonics = int(min(np.floor(float(fs) / 2. / current_f0), 6.))
harmonics_index = np.arange(number_of_harmonics) + 1
index_list = np.round(current_f0 * fft_size / fs * harmonics_index).astype("int32")
instantaneous_frequency_list = instantaneous_frequency[index_list]
amplitude_list = np.sqrt(power_spectrum[index_list])
refined_f0 = np.sum(amplitude_list * instantaneous_frequency_list)
refined_f0 /= np.sum(amplitude_list * harmonics_index.astype("float32"))
variation = np.abs(((instantaneous_frequency_list / harmonics_index.astype("float32")) - current_f0) / float(current_f0))
refined_score = 1. / (0.000000000001 + np.mean(variation))
if (refined_f0 < f0_floor) or (refined_f0 > f0_ceil) or (refined_score < 2.5):
refined_f0 = 0.
redined_score = 0.
return refined_f0, refined_score
def harvest_select_best_f0(reference_f0, f0_candidates, allowed_range):
best_f0 = 0
best_error = allowed_range
for i in range(len(f0_candidates)):
tmp = np.abs(reference_f0 - f0_candidates[i]) / reference_f0
if tmp > best_error:
continue
best_f0 = f0_candidates[i]
best_error = tmp
return best_f0, best_error
def harvest_remove_unreliable_candidates(f0_candidates, f0_scores):
new_f0_candidates = f0_candidates.copy()
new_f0_scores = f0_scores.copy()
threshold = 0.05
f0_length = f0_candidates.shape[1]
number_of_candidates = len(f0_candidates)
for i in range(1, f0_length - 1):
for j in range(number_of_candidates):
reference_f0 = f0_candidates[j, i]
if reference_f0 == 0:
continue
_, min_error1 = harvest_select_best_f0(reference_f0, f0_candidates[:, i + 1], 1)
_, min_error2 = harvest_select_best_f0(reference_f0, f0_candidates[:, i - 1], 1)
min_error = min([min_error1, min_error2])
if min_error > threshold:
new_f0_candidates[j, i] = 0
new_f0_scores[j, i] = 0
return new_f0_candidates, new_f0_scores
def harvest_search_f0_base(f0_candidates, f0_scores):
f0_base = f0_candidates[0, :] * 0.
for i in range(len(f0_base)):
max_index = np.argmax(f0_scores[:, i])
f0_base[i] = f0_candidates[max_index, i]
return f0_base
def harvest_fix_step_1(f0_base, allowed_range):
# Step 1: Rapid change of f0 contour is replaced by 0
f0_step1 = f0_base.copy()
f0_step1[0] = 0.
f0_step1[1] = 0.
for i in range(2, len(f0_base)):
if f0_base[i] == 0:
continue
reference_f0 = f0_base[i - 1] * 2 - f0_base[i - 2]
c1 = np.abs((f0_base[i] - reference_f0) / reference_f0) > allowed_range
c2 = np.abs((f0_base[i] - f0_base[i - 1]) / f0_base[i - 1]) > allowed_range
if c1 and c2:
f0_step1[i] = 0.
return f0_step1
def harvest_fix_step_2(f0_step1, voice_range_minimum):
f0_step2 = f0_step1.copy()
boundary_list = harvest_get_boundary_list(f0_step1)
for i in range(1, int(len(boundary_list) / 2.) + 1):
distance = boundary_list[(2 * i) - 1] - boundary_list[(2 * i) - 2]
if distance < voice_range_minimum:
# need one more due to range not including last index
lb = boundary_list[(2 * i) - 2]
ub = boundary_list[(2 * i) - 1] + 1
f0_step2[lb:ub] = 0.
return f0_step2
def harvest_fix_step_3(f0_step2, f0_candidates, allowed_range, f0_scores):
f0_step3 = f0_step2.copy()
boundary_list = harvest_get_boundary_list(f0_step2)
multichannel_f0 = harvest_get_multichannel_f0(f0_step2, boundary_list)
rrange = np.zeros((int(len(boundary_list) / 2), 2))
threshold1 = 100
threshold2 = 2200
count = 0
for i in range(1, int(len(boundary_list) / 2) + 1):
# changed to 2 * i - 2
extended_f0, tmp_range_1 = harvest_extend_f0(multichannel_f0[i - 1, :],
boundary_list[(2 * i) - 1],
min([len(f0_step2) - 1, boundary_list[(2 * i) - 1] + threshold1]),
1, f0_candidates, allowed_range)
tmp_f0_sequence, tmp_range_0 = harvest_extend_f0(extended_f0,
boundary_list[(2 * i) - 2],
max([2, boundary_list[(2 * i) - 2] - threshold1]), -1,
f0_candidates, allowed_range)
mean_f0 = np.mean(tmp_f0_sequence[tmp_range_0 : tmp_range_1 + 1])
if threshold2 / mean_f0 < (tmp_range_1 - tmp_range_0):
multichannel_f0[count, :] = tmp_f0_sequence
rrange[count, :] = np.array([tmp_range_0, tmp_range_1])
count = count + 1
if count > 0:
multichannel_f0 = multichannel_f0[:count, :]
rrange = rrange[:count, :]
f0_step3 = harvest_merge_f0(multichannel_f0, rrange, f0_candidates,
f0_scores)
return f0_step3
def harvest_merge_f0(multichannel_f0, rrange, f0_candidates, f0_scores):
number_of_channels = len(multichannel_f0)
sorted_order = np.argsort(rrange[:, 0])
f0 = multichannel_f0[sorted_order[0], :]
for i in range(1, number_of_channels):
if rrange[sorted_order[i], 0] - rrange[sorted_order[0], 1] > 0:
# no overlapping
f0[int(rrange[sorted_order[i], 0]):int(rrange[sorted_order[i], 1])] = multichannel_f0[sorted_order[i], int(rrange[sorted_order[i], 0]):int(rrange[sorted_order[i], 1])]
cp = rrange.copy()
rrange[sorted_order[0], 0] = cp[sorted_order[i], 0]
rrange[sorted_order[0], 1] = cp[sorted_order[i], 1]
else:
cp = rrange.copy()
res = harvest_merge_f0_sub(f0, cp[sorted_order[0], 0],
cp[sorted_order[0], 1],
multichannel_f0[sorted_order[i], :],
cp[sorted_order[i], 0],
cp[sorted_order[i], 1], f0_candidates, f0_scores)
f0 = res[0]
rrange[sorted_order[0], 1] = res[1]
return f0
def harvest_merge_f0_sub(f0_1, st1, ed1, f0_2, st2, ed2, f0_candidates,
f0_scores):
merged_f0 = f0_1
if (st1 <= st2) and (ed1 >= ed2):
new_ed = ed1
return merged_f0, new_ed
new_ed = ed2
score1 = 0.
score2 = 0.
for i in range(int(st2), int(ed1) + 1):
score1 = score1 + harvest_serach_score(f0_1[i], f0_candidates[:, i], f0_scores[:, i])
score2 = score2 + harvest_serach_score(f0_2[i], f0_candidates[:, i], f0_scores[:, i])
if score1 > score2:
merged_f0[int(ed1):int(ed2) + 1] = f0_2[int(ed1):int(ed2) + 1]
else:
merged_f0[int(st2):int(ed2) + 1] = f0_2[int(st2):int(ed2) + 1]
return merged_f0, new_ed
def harvest_serach_score(f0, f0_candidates, f0_scores):
score = 0
for i in range(len(f0_candidates)):
if (f0 == f0_candidates[i]) and (score < f0_scores[i]):
score = f0_scores[i]
return score
def harvest_extend_f0(f0, origin, last_point, shift, f0_candidates,
allowed_range):
threshold = 4
extended_f0 = f0.copy()
tmp_f0 = extended_f0[origin]
shifted_origin = origin
count = 0
for i in np.arange(origin, last_point + shift, shift):
# off by 1 issues
if (i + shift) >= f0_candidates.shape[1]:
continue
bf0, bs = harvest_select_best_f0(tmp_f0,
f0_candidates[:, i + shift], allowed_range)
extended_f0[i + shift] = bf0
if extended_f0[i + shift] != 0:
tmp_f0 = extended_f0[i + shift]
count = 0
shifted_origin = i + shift
else:
count = count + 1
if count == threshold:
break
return extended_f0, shifted_origin
def harvest_get_multichannel_f0(f0, boundary_list):
multichannel_f0 = np.zeros((int(len(boundary_list) / 2), len(f0)))
for i in range(1, int(len(boundary_list) / 2) + 1):
sl = boundary_list[(2 * i) - 2]
el = boundary_list[(2 * i) - 1] + 1
multichannel_f0[i - 1, sl:el] = f0[sl:el]
return multichannel_f0
def harvest_get_boundary_list(f0):
vuv = f0.copy()
vuv[vuv != 0] = 1.
vuv[0] = 0
vuv[-1] = 0
diff_vuv = vuv[1:] - vuv[:-1]
boundary_list = np.where(diff_vuv != 0)[0]
boundary_list[::2] = boundary_list[::2] + 1
return boundary_list
def harvest_fix_step_4(f0_step3, threshold):
f0_step4 = f0_step3.copy()
boundary_list = harvest_get_boundary_list(f0_step3)
for i in range(1, int(len(boundary_list) / 2.)):
distance = boundary_list[(2 * i)] - boundary_list[(2 * i) - 1] - 1
if distance >= threshold:
continue
boundary0 = f0_step3[boundary_list[(2 * i) - 1]] + 1
boundary1 = f0_step3[boundary_list[(2 * i)]] - 1
coefficient = (boundary1 - boundary0) / float((distance + 1))
count = 1
st = boundary_list[(2 * i) - 1] + 1
ed = boundary_list[(2 * i)]
for j in range(st, ed):
f0_step4[j] = boundary0 + coefficient * count
count = count + 1
return f0_step4
def harvest_fix_f0_contour(f0_candidates, f0_scores):
f0_base = harvest_search_f0_base(f0_candidates, f0_scores)
f0_step1 = harvest_fix_step_1(f0_base, 0.008) # optimized?
f0_step2 = harvest_fix_step_2(f0_step1, 6) # optimized?
f0_step3 = harvest_fix_step_3(f0_step2, f0_candidates, 0.18, f0_scores) # optimized?
f0 = harvest_fix_step_4(f0_step3, 9) # optimized
vuv = f0.copy()
vuv[vuv != 0] = 1.
return f0, vuv
def harvest_filter_f0_contour(f0, st, ed, b, a):
smoothed_f0 = f0.copy()
smoothed_f0[:st] = smoothed_f0[st]
smoothed_f0[ed + 1:] = smoothed_f0[ed]
aaa = sg.lfilter(b, a, smoothed_f0)
bbb = sg.lfilter(b, a, aaa[::-1])
smoothed_f0 = bbb[::-1].copy()
smoothed_f0[:st] = 0.
smoothed_f0[ed + 1:] = 0.
return smoothed_f0
def harvest_smooth_f0_contour(f0):
b = np.array([0.0078202080334971724, 0.015640416066994345, 0.0078202080334971724])
a = np.array([1.0, -1.7347257688092754, 0.76600660094326412])
smoothed_f0 = np.concatenate([np.zeros(300,), f0, np.zeros(300,)])
boundary_list = harvest_get_boundary_list(smoothed_f0)
multichannel_f0 = harvest_get_multichannel_f0(smoothed_f0, boundary_list)
for i in range(1, int(len(boundary_list) / 2) + 1):
tmp_f0_contour = harvest_filter_f0_contour(multichannel_f0[i - 1, :],
boundary_list[(2 * i) - 2], boundary_list[(2 * i) - 1], b, a)
st = boundary_list[(2 * i) - 2]
ed = boundary_list[(2 * i) - 1] + 1
smoothed_f0[st:ed] = tmp_f0_contour[st:ed]
smoothed_f0 = smoothed_f0[300:-300]
return smoothed_f0
def _world_get_temporal_positions(x_len, fs):
frame_period = 5
basic_frame_period = 1
basic_temporal_positions = np.arange(0, x_len / float(fs), basic_frame_period / float(1000))
temporal_positions = np.arange(0,
x_len / float(fs),
frame_period / float(1000))
return basic_temporal_positions, temporal_positions
def harvest(x, fs):
f0_floor = 71
f0_ceil = 800
target_fs = 8000
channels_in_octave = 40.
basic_temporal_positions, temporal_positions = _world_get_temporal_positions(len(x), fs)
adjusted_f0_floor = f0_floor * 0.9
adjusted_f0_ceil = f0_ceil * 1.1
boundary_f0_list = np.arange(1, np.ceil(np.log2(adjusted_f0_ceil / adjusted_f0_floor) * channels_in_octave) + 1) / float(channels_in_octave)
boundary_f0_list = adjusted_f0_floor * 2.0 ** boundary_f0_list
y, actual_fs = harvest_get_downsampled_signal(x, fs, target_fs)
fft_size = 2. ** np.ceil(np.log2(len(y) + np.round(fs / f0_floor * 4) + 1))
y_spectrum = np.fft.fft(y, int(fft_size))
raw_f0_candidates = harvest_get_raw_f0_candidates(
len(basic_temporal_positions),
boundary_f0_list, len(y), basic_temporal_positions, actual_fs,
y_spectrum, f0_floor, f0_ceil)
f0_candidates, number_of_candidates = harvest_detect_official_f0_candidates(raw_f0_candidates)
f0_candidates = harvest_overlap_f0_candidates(f0_candidates, number_of_candidates)
f0_candidates, f0_scores = harvest_refine_candidates(y, actual_fs,
basic_temporal_positions, f0_candidates, f0_floor, f0_ceil)
f0_candidates, f0_scores = harvest_remove_unreliable_candidates(f0_candidates, f0_scores)
connected_f0, vuv = harvest_fix_f0_contour(f0_candidates, f0_scores)
smoothed_f0 = harvest_smooth_f0_contour(connected_f0)
idx = np.minimum(len(smoothed_f0) - 1, np.round(temporal_positions * 1000)).astype("int32")
f0 = smoothed_f0[idx]
vuv = vuv[idx]
f0_candidates = f0_candidates
return temporal_positions, f0, vuv, f0_candidates
def cheaptrick_get_windowed_waveform(x, fs, current_f0, current_position):
half_window_length = np.round(1.5 * fs / float(current_f0))
base_index = np.arange(-half_window_length, half_window_length + 1)
index = np.round(current_position * fs + 0.001) + base_index + 1
safe_index = np.minimum(len(x), np.maximum(1, np.round(index))).astype("int32")
safe_index = safe_index - 1
segment = x[safe_index]
time_axis = base_index / float(fs) / 1.5
window1 = 0.5 * np.cos(np.pi * time_axis * float(current_f0)) + 0.5
window1 = window1 / np.sqrt(np.sum(window1 ** 2))
waveform = segment * window1 - window1 * np.mean(segment * window1) / np.mean(window1)
return waveform
def cheaptrick_get_power_spectrum(waveform, fs, fft_size, f0):
power_spectrum = np.abs(np.fft.fft(waveform, fft_size)) ** 2
frequency_axis = np.arange(fft_size) / float(fft_size) * float(fs)
ind = frequency_axis < (f0 + fs / fft_size)
low_frequency_axis = frequency_axis[ind]
low_frequency_replica = interp1d(f0 - low_frequency_axis,
power_spectrum[ind], kind="linear",
fill_value="extrapolate")(low_frequency_axis)
p1 = low_frequency_replica[(frequency_axis < f0)[:len(low_frequency_replica)]]
p2 = power_spectrum[(frequency_axis < f0)[:len(power_spectrum)]]
power_spectrum[frequency_axis < f0] = p1 + p2
lb1 = int(fft_size / 2) + 1
lb2 = 1
ub2 = int(fft_size / 2)
power_spectrum[lb1:] = power_spectrum[lb2:ub2][::-1]
return power_spectrum
def cheaptrick_linear_smoothing(power_spectrum, f0, fs, fft_size):
double_frequency_axis = np.arange(2 * fft_size) / float(fft_size ) * fs - fs
double_spectrum = np.concatenate([power_spectrum, power_spectrum])
double_segment = np.cumsum(double_spectrum * (fs / float(fft_size)))
center_frequency = np.arange(int(fft_size / 2) + 1) / float(fft_size ) * fs
low_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency - f0 / 3.)
high_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency + f0 / 3.)
smoothed_spectrum = (high_levels - low_levels) * 1.5 / f0
return smoothed_spectrum
def cheaptrick_interp1h(x, y, xi):
delta_x = float(x[1] - x[0])
xi = np.maximum(x[0], np.minimum(x[-1], xi))
xi_base = (np.floor((xi - x[0]) / delta_x)).astype("int32")
xi_fraction = (xi - x[0]) / delta_x - xi_base
delta_y = np.zeros_like(y)
delta_y[:-1] = y[1:] - y[:-1]
yi = y[xi_base] + delta_y[xi_base] * xi_fraction
return yi
def cheaptrick_smoothing_with_recovery(smoothed_spectrum, f0, fs, fft_size, q1):
quefrency_axis = np.arange(fft_size) / float(fs)
# 0 is NaN
smoothing_lifter = np.sin(np.pi * f0 * quefrency_axis) / (np.pi * f0 * quefrency_axis)
p = smoothing_lifter[1:int(fft_size / 2)][::-1].copy()
smoothing_lifter[int(fft_size / 2) + 1:] = p
smoothing_lifter[0] = 1.
compensation_lifter = (1 - 2. * q1) + 2. * q1 * np.cos(2 * np.pi * quefrency_axis * f0)
p = compensation_lifter[1:int(fft_size / 2)][::-1].copy()
compensation_lifter[int(fft_size / 2) + 1:] = p
tandem_cepstrum = np.fft.fft(np.log(smoothed_spectrum))
tmp_spectral_envelope = np.exp(np.real(np.fft.ifft(tandem_cepstrum * smoothing_lifter * compensation_lifter)))
spectral_envelope = tmp_spectral_envelope[:int(fft_size / 2) + 1]
return spectral_envelope
def cheaptrick_estimate_one_slice(x, fs, current_f0,
current_position, fft_size, q1):
waveform = cheaptrick_get_windowed_waveform(x, fs, current_f0,
current_position)
power_spectrum = cheaptrick_get_power_spectrum(waveform, fs, fft_size,
current_f0)
smoothed_spectrum = cheaptrick_linear_smoothing(power_spectrum, current_f0,
fs, fft_size)
comb_spectrum = np.concatenate([smoothed_spectrum, smoothed_spectrum[1:-1][::-1]])
spectral_envelope = cheaptrick_smoothing_with_recovery(comb_spectrum,
current_f0, fs, fft_size, q1)
return spectral_envelope
def cheaptrick(x, fs, temporal_positions, f0_sequence,
vuv, fftlen="auto", q1=-0.15):
f0_sequence = f0_sequence.copy()
f0_low_limit = 71
default_f0 = 500
if fftlen == "auto":
fftlen = int(2 ** np.ceil(np.log2(3. * float(fs) / f0_low_limit + 1)))
#raise ValueError("Only fftlen auto currently supported")
fft_size = fftlen
f0_low_limit = fs * 3.0 / (fft_size - 3.0)
f0_sequence[vuv == 0] = default_f0
spectrogram = np.zeros((int(fft_size / 2.) + 1, len(f0_sequence)))
for i in range(len(f0_sequence)):
if f0_sequence[i] < f0_low_limit:
f0_sequence[i] = default_f0
spectrogram[:, i] = cheaptrick_estimate_one_slice(x, fs, f0_sequence[i],
temporal_positions[i], fft_size, q1)
return temporal_positions, spectrogram.T, fs
def d4c_love_train(x, fs, current_f0, current_position, threshold):
vuv = 0
if current_f0 == 0:
return vuv
lowest_f0 = 40
current_f0 = max([current_f0, lowest_f0])
fft_size = int(2 ** np.ceil(np.log2(3. * fs / lowest_f0 + 1)))
boundary0 = int(np.ceil(100 / (float(fs) / fft_size)))
boundary1 = int(np.ceil(4000 / (float(fs) / fft_size)))
boundary2 = int(np.ceil(7900 / (float(fs) / fft_size)))
waveform = d4c_get_windowed_waveform(x, fs, current_f0, current_position,
1.5, 2)
power_spectrum = np.abs(np.fft.fft(waveform, int(fft_size)) ** 2)
power_spectrum[0:boundary0 + 1] = 0.
cumulative_spectrum = np.cumsum(power_spectrum)
if (cumulative_spectrum[boundary1] / cumulative_spectrum[boundary2]) > threshold:
vuv = 1
return vuv
def d4c_get_windowed_waveform(x, fs, current_f0, current_position, half_length,
window_type):
half_window_length = int(np.round(half_length * fs / current_f0))
base_index = np.arange(-half_window_length, half_window_length + 1)
index =
|
np.round(current_position * fs + 0.001)
|
numpy.round
|
import numpy as np
import pytest
from pyqecc.qecc import *
DIFF_THRESHOLD = 1e-4
TEST_FIVE_DATA = {
"T_ind": [[1, 1, 0, 0], [1, 0, 0, 1]],
"T": [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0]],
"S_ind": [3, 0, 15],
"S": [
[1, 1, 1, 1, 0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 1, 0, 0, 0],
],
"L_ind": [0, 2, 1, [0, 0], [1, 0], [0, 1]],
"L": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
],
}
TEST_SET_ERROR_PROBABILITY_DATA = {
"NUM_OF_CASE": 3,
"P_BITWISE": np.array([[1, 0, 0, 0], [0.9, 0.2, 0.1, 0], [1, 0, 0, 0]]),
"TEST_CASE_P_BITWISE": [[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 0]],
"RESULT_P_BITWISE": [0.9, 0, 0.1],
"P_IID":
|
np.array([0.7, 0.1, 0.1, 0.1])
|
numpy.array
|
from sklearn.neighbors import KernelDensity as kde
import numpy as np
import pickle as pkl
import matplotlib.pyplot as plt
class gaussian_kde(object):
def __init__(self, data, bandwidth=0.03):
self.training_data = data
self.bandwidth = bandwidth
self.kde = kde(kernel='gaussian', bandwidth=self.bandwidth).fit(self.training_data)
def update(self, new_data):
self.training_data = np.concatenate([self.training_data, new_data], axis = 0)
self.kde.fit(self.training_data)
return self
def comp_prob(self, x):
if isinstance(x, (float, np.float, np.float32, np.float64)):
x = np.array([[x]])
elif isinstance(x, (list, np.ndarray)):
x = np.expand_dims(np.array(x), axis=-1)
x = np.exp(self.kde.score_samples(x))
return x.squeeze()
class object_belief(object):
def __init__(self):
self.belief = np.array([0.5, 0.5])
def update(self, score, kde):
neg_prob = kde[0].comp_prob(score)
pos_prob = kde[1].comp_prob(score)
self.belief *= [neg_prob, pos_prob]
self.belief /= self.belief.sum()
return self.belief
def reset(self):
self.belief =
|
np.array([0.5, 0.5])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 4 12:56:40 2019
@author: andrew
Original by <NAME> (modified by <NAME>, converted to python by <NAME>)
https://www.cs.cmu.edu/afs/cs/academic/class/15782-f06/matlab/
"""
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
data = pd.read_csv("sunspot.csv", header = None)
year = data.iloc[:, 0]
relNums = data.iloc[:, 1]
year = year.to_numpy()
relNums = relNums.to_numpy()
nrmY = relNums
ymin = np.amin(nrmY)
ymax = np.amax(nrmY)
relNums = 2.0* ((nrmY - ymin) / (ymax - ymin) -0.5)
Ss = np.transpose(relNums)
idim=10
odim=len(Ss)-idim
y =[]
for i in range(0, odim, 1):
y.append(Ss[i+idim])
y = np.asarray(y)
x = np.zeros((278, 10))
for i in range(0, odim, 1):
for j in range(0,idim, 1):
x[i][j] = Ss[i -j + idim]
# Problem here for some reason x(2,1) is switched with x(1,1)
Patterns = np.transpose(x)
NINPUTS = idim; NPATS = odim; NOUTPUTS = 1; NP = odim;
Desired = y; NHIDDENS = 5; prnout=Desired;
LR = 0.001; Momentum = 0; DerivIncr = 0; deltaW1 = 0; deltaW2 = 0;
Inputs1 = np.concatenate((Patterns, np.ones((1, NPATS))))
Weights1 = 0.5 * (np.random.rand(NHIDDENS, 1+NINPUTS)-0.5)
Weights2 = 0.5 * (np.random.rand(1, 1+NHIDDENS)-0.5)
TSS_Limit = 0.02;
epochs = 100
for epoch in range(1, epochs+1, 1):
# Feedforward
NetIn1 = np.dot(Weights1, Inputs1)
Hidden=np.divide(1-2, (np.exp(2*NetIn1)+1));
Inputs2 = np.concatenate((Hidden, np.ones((1,NPATS))))
NetIn2 = np.dot(Weights2, Inputs2)
Out = NetIn2; prnout=Out;
Error = Desired - Out
TSS = sum(sum((Error**2)))
# Backpropogation
Beta = Error
bperr = np.dot(np.transpose(Weights2), Beta );
HiddenBeta = np.multiply((1.0 - Hidden**2), bperr[0:-1,:]);
dW2 = np.dot(Beta,
|
np.transpose(Inputs2)
|
numpy.transpose
|
import numpy as np
import pandas as pd
import networkx as nx
from abc import ABCMeta, abstractmethod
from sklearn.neighbors import KDTree, BallTree
class BaseConstructor(metaclass=ABCMeta):
"""
This class allows to transform a dataset into a networkx
complex network by using the several different transformation
methods
Do not use this abstract class, use the derived classes instead
"""
def __init__(self, k, epsilon, metric, leaf_size=40, sep_comp=True):
self.k = k
self.epsilon = epsilon
self.metric = metric
self.leaf_size = leaf_size
self.sep_comp = sep_comp
self.X_ = None
self.y_ = None
@abstractmethod
def add_nodes(self, X, y=None):
"""Add nodes to an existing network inside a fitted transformer
object
Parameters
----------
X : {array-like, pandas dataframe} of shape (n_samples, n_features)
The input data.
y : {ndarray, pandas series}, shape (n_samples,) or
(n_samples, n_classes), default=None
The true classes.
Notes
-----
If y is set, then the class of each node will be inserted into
the node information under the label 'class'. If sep_comp is true
then each class will be a separated component of the network.
If by some reason the transformer is not fitted, this will generate
an error.
After the new nodes are added, one should use the get_network
function to retrieve the network with the new nodes.
"""
pass
def fit(self, X, y=None):
"""Fit the constructor creating the NetworkX graph
Parameters
----------
X : {array-like, pandas dataframe} of shape (n_samples, n_features)
The input data.
y : {ndarray, pandas series}, shape (n_samples,) or
(n_samples, n_classes), default=None
The true classes.
Notes
-----
If y is set, then the class of each node will be inserted into
the node information under the label 'class' and each class will
be a separated component of the network
"""
self.G_ = nx.Graph()
self.node_count_ = 0
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
X = np.array(X)
self.X_ = X
self.y_ = y
self.fitting = True
self.add_nodes(self.X_, self.y_)
self.fitting = False
return self
def transform(self):
"""Returns the networkX graph after the constructor is fitted
Returns
-----
G : NetworkX graph
The network version of the inserted tabular data
"""
try:
return self.G_
except AttributeError:
raise Exception("Transformer is not fitted")
def fit_transform(self, X, y=None):
"""Fit the constructor creating the NetworkX graph and returns the graph
Parameters
----------
X : {array-like, pandas dataframe} of shape (n_samples, n_features)
The input data.
y : {ndarray, pandas series}, shape (n_samples,) or
(n_samples, n_classes), default=None
The predicted classes.
Returns
-------
G : NetworkX graph
The network version of the inserted tabular data
Notes
-----
If y is set, then the class of each node will be inserted
into the node information under the label 'class'
"""
self.fit(X, y)
return self.G_
def get_network(self):
"""Retrieves the network generated in the constructor class
"""
return self.G_
def set_sep_comp(self, sep_comp):
self.sep_comp = sep_comp
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def get_params(self, deep=True):
return {"k": self.k, "epsilon": self.epsilon,
"metric": self.metric, "leaf_size": self.leaf_size,
"sep_comp": self.sep_comp}
class KNNConstructor(BaseConstructor):
"""
Using a k-nearest neighbors algorithm, defines an
networkx complex network
Parameters
----------
k : int, default=5
The number of neighbors to be connected to any given node
of the network.
metric : str or DistanceMetric object, default='minkowski'
The distance metric to use for the neighborhood tree. Refer
to the DistanceMetric class documentation from sklearn for a list
of available metrics
leaf_size : int, default=40
Number of points to switch to brute-force search of neighbors
sep_comp : boolean, default=True
If True and if y is not None, then each class of the dataset
will be a separated component, so nodes from one class will only
be connected to those of the same class. If False then this
restriction is not applied.
Attributes
----------
k : int
The k being used to construct the network
metric : str or DistanceMetric object
The distance metric being used
leaf_size : int
The leaf_size being used
G : NetworkX graph
The network version of the inserted tabular data
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from dataset_constructors import KNNConstructor
>>> X, y = load_iris(return_X_y = True)
>>> knn_c = KNNConstructor(k=3)
>>> knn_c.fit(X, y)
>>> G = knn_c.transform()
>>> print(len(G.nodes))
150
Notes
-----
References
----------
<NAME> & <NAME>. (2016). Machine Learning in
Complex Networks. 10.1007/978-3-319-17290-3.
"""
def __init__(self, k=5, metric='minkowski', leaf_size=40, sep_comp=True):
super().__init__(k, None, metric, leaf_size, sep_comp)
def add_nodes(self, X, y=None):
"""Add nodes to an existing network inside a fitted transformer
object
Parameters
----------
X : {array-like, pandas dataframe} of shape (n_samples, n_features)
The input data.
y : {ndarray, pandas series}, shape (n_samples,) or
(n_samples, n_classes), default=None
The true classes.
Notes
-----
If y is set, then the class of each node will be inserted into
the node information under the label 'class'. If sep_comp is true
then each class will be a separated component of the network.
If by some reason the transformer is not fitted, this will generate
an error.
After the new nodes are added, one should use the get_network
function to retrieve the network with the new nodes.
"""
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
X = np.array(X)
# Each class will be a separated component
if self.y_ is None:
classes = [0]
else:
classes = np.unique(self.y_)
for class_ in classes:
if self.y_ is None:
nodes = [node for node in range(self.node_count_, len(X) + self.node_count_)] # noqa: E501
X_ = X
self.tree_ = _tree_selector(self.X_, self.leaf_size)
label_ind = [i for i in range(len(X))]
else:
if self.sep_comp:
# Verifies if someone to be added is from class
X_component = np.take(X, np.where(y == class_), axis=0)[0]
if len(X_component) == 0:
continue
# Calculating the distances for guys on the same component
if self.fitting:
total_y = self.y_
total_X = self.X_
else:
total_y = np.append(self.y_, y)
total_X = np.vstack((self.X_, X))
label_ind = np.where(total_y == class_)
X_ = np.take(total_X, label_ind, axis=0)[0]
nodes = [(node, {'class': class_}) for node in range(self.node_count_, len(X_component) + self.node_count_)] # noqa: E501
label_ind = label_ind[0].tolist()
else:
X_ = X
label_ind = [i for i in range(len(X))]
nodes = [(node, {'class': y[node - self.node_count_]}) for node in range(self.node_count_, len(X_) + self.node_count_)] # noqa: E501
self.tree_ = _tree_selector(X_, self.leaf_size)
neighbors = [self.tree_.query(x.reshape(1, -1), k=self.k+1, return_distance=True) for x in X_] # noqa: E501
distances_aux = [neigh[0] for neigh in neighbors]
indexes_aux = [neigh[1] for neigh in neighbors]
indexes = [node[0] for node in indexes_aux]
distances = [node[0] for node in distances_aux]
edges = [(label_ind[node[0]], label_ind[node[j]], distances[i][j]) for i, node in enumerate(indexes) for j in range(1, self.k+1)] # noqa: E501
self.G_.add_nodes_from(nodes)
self.G_.add_weighted_edges_from(edges)
self.node_count_ += len(nodes)
if self.sep_comp is False:
break
if not np.array_equal(self.X_, X):
self.X_ = np.vstack((self.X_, X))
if self.y_ is not None:
self.y_ = np.append(self.y_, y)
class EpsilonRadiusConstructor(BaseConstructor):
"""
Using an epsilon-radius algorithm, defines an
networkx complex network
Parameters
----------
epsilon : float
The radius to define which neighbors should be connected.
metric : str or DistanceMetric object, default='minkowski'
The distance metric to use for the neighborhood tree. Refer
to the DistanceMetric class documentation from sklearn for a list
of available metrics
leaf_size : int, default=40
Number of points to switch to brute-force search of neighbors
sep_comp : boolean, default=True
If True and if y is not None, then each class of the dataset
will be a separated component, so nodes from one class will only
be connected to those of the same class. If False then this
restriction is not applied.
Attributes
----------
epsilon : float
The epsilon being used to construct the network
metric : str or DistanceMetric object
The distance metric being used
leaf_size : int
The leaf_size being used
G : NetworkX graph
The network version of the inserted tabular data
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from dataset_constructors import EpsilonRadiusConstructor
>>> X, y = load_iris(return_X_y = True)
>>> eps_c = EpsilonRadiusConstructor(epsilon=3)
>>> eps_c.fit(X, y)
>>> G = eps_c.transform()
>>> print(len(G.nodes))
150
Notes
-----
References
----------
<NAME> & <NAME>. (2016). Machine Learning in
Complex Networks. 10.1007/978-3-319-17290-3.
"""
def __init__(self, epsilon=0.1, metric='minkowski', leaf_size=40,
sep_comp=True):
super().__init__(None, epsilon, metric, leaf_size, sep_comp)
def add_nodes(self, X, y=None):
"""Add nodes to an existing network inside a fitted transformer
object
Parameters
----------
X : {array-like, pandas dataframe} of shape (n_samples, n_features)
The input data.
y : {ndarray, pandas series}, shape (n_samples,) or
(n_samples, n_classes), default=None
The true classes.
Notes
-----
If y is set, then the class of each node will be inserted into
the node information under the label 'class'. If sep_comp is true
then each class will be a separated component of the network.
If by some reason the transformer is not fitted, this will generate
an error.
After the new nodes are added, one should use the get_network
function to retrieve the network with the new nodes.
"""
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
X = np.array(X)
# Each class will be a separated component
if self.y_ is None:
classes = [0]
else:
classes = np.unique(self.y_)
for class_ in classes:
if self.y_ is None:
nodes = [node for node in range(self.node_count_, len(X) + self.node_count_)] # noqa: E501
X_ = X
self.tree_ = _tree_selector(self.X_, self.leaf_size)
label_ind = [i for i in range(len(X))]
else:
if self.sep_comp:
# Verifies if someone to be added is from class
X_component = np.take(X, np.where(y == class_), axis=0)[0]
if len(X_component) == 0:
continue
# Calculating the distances for guys on the same component
if self.fitting:
total_y = self.y_
total_X = self.X_
else:
total_y = np.append(self.y_, y)
total_X = np.vstack((self.X_, X))
label_ind = np.where(total_y == class_)
X_ = np.take(total_X, label_ind, axis=0)[0]
nodes = [(node, {'class': class_}) for node in range(self.node_count_, len(X_component) + self.node_count_)] # noqa: E501
label_ind = label_ind[0].tolist()
else:
X_ = X
label_ind = [i for i in range(len(X))]
nodes = [(node, {'class': y[node - self.node_count_]}) for node in range(self.node_count_, len(X_) + self.node_count_)] # noqa: E501
self.tree_ = _tree_selector(X_, self.leaf_size)
neighbors = [self.tree_.query_radius(x.reshape(1, -1), r=self.epsilon, return_distance=True, sort_results=True) for x in X_] # noqa: E501
indexes_aux = [neigh[0] for neigh in neighbors]
distances_aux = [neigh[1] for neigh in neighbors]
distances = [node[0] for node in distances_aux]
indexes = [node[0] for node in indexes_aux]
edges = [(label_ind[node[0]], label_ind[node[j]], distances[i][j]) for i, node in enumerate(indexes) for j in range(1, len(node))] # noqa: E501
self.G_.add_nodes_from(nodes)
self.G_.add_weighted_edges_from(edges)
# Removing self-loops
self.G_.remove_edges_from(nx.selfloop_edges(self.G_))
self.node_count_ += len(nodes) + 1
if self.sep_comp is False:
break
if not np.array_equal(self.X_, X):
self.X_ = np.vstack((self.X_, X))
if self.y_ is not None:
self.y_ = np.vstack((self.y_, y))
class KNNEpislonRadiusConstructor(BaseConstructor):
"""
Using a k-nearest neighbors algorithm, defines an
networkx complex network
Parameters
----------
k : int, default=5
The number of neighbors to be connected to any given node
of the network.
epsilon : float, default=0.1
The radius to define which neighbors should be connected.
metric : str or DistanceMetric object, default='minkowski'
The distance metric to use for the neighborhood tree. Refer
to the DistanceMetric class documentation from sklearn for a list
of available metrics
leaf_size : int, default=40
Number of points to switch to brute-force search of neighbors
sep_comp : boolean, default=True
If True and if y is not None, then each class of the dataset
will be a separated component, so nodes from one class will only
be connected to those of the same class. If False then this
restriction is not applied.
Attributes
----------
k : int
The k being used to construct the network
epsilon : float
The epsilon being used to construct the network
metric : str or DistanceMetric object
The distance metric being used
leaf_size : int
The leaf_size being used
G : NetworkX graph
The network version of the inserted tabular data
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from dataset_constructors import KNNEpislonRadiusConstructor
>>> X, y = load_iris(return_X_y = True)
>>> ke_c = KNNEpislonRadiusConstructor(k=3, epsilon=0.3)
>>> ke_c.fit(X, y)
>>> G = ke_c.transform()
>>> print(len(G.nodes))
150
Notes
-----
The KNN is used for sparse regions while the Epsilon-Radius is used for
dense regions. This approach hopes to overcome the limitations of the
individual components, allowing for a better network construction. The
equation that runs this method is defined as:
``neighbor(v_i) = epsilon-radius(v_i) if |epsilon-radius(v_i)| >
k else knn(v_i)``
References
----------
<NAME>.; <NAME> (2012). Network-Based High Level Data
Classification., 23(6), –. doi:10.1109/tnnls.2012.2195027
<NAME> & <NAME>. (2016). Machine Learning in Complex Networks.
10.1007/978-3-319-17290-3.
"""
def __init__(self, k=5, epsilon=0.1, metric='minkowski', leaf_size=40,
sep_comp=True):
super().__init__(k, epsilon, metric, leaf_size, sep_comp)
def add_nodes(self, X, y=None):
"""Add nodes to an existing network inside a fitted transformer
object
Parameters
----------
X : {array-like, pandas dataframe} of shape (n_samples, n_features)
The input data.
y : {ndarray, pandas series}, shape (n_samples,) or
(n_samples, n_classes), default=None
The true classes.
Notes
-----
If y is set, then the class of each node will be inserted into
the node information under the label 'class'. If sep_comp is true
then each class will be a separated component of the network.
If by some reason the transformer is not fitted, this will generate
an error.
After the new nodes are added, one should use the get_network
function to retrieve the network with the new nodes.
"""
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
X = np.array(X)
# Each class will be a separated component
if self.y_ is None:
classes = [0]
else:
classes = np.unique(self.y_)
for class_ in classes:
if self.y_ is None:
nodes = [node for node in range(self.node_count_, len(X) + self.node_count_)] # noqa: E501
X_ = X
self.tree_ = _tree_selector(self.X_, self.leaf_size)
label_ind = [i for i in range(len(X))]
else:
if self.sep_comp:
# Verifies if someone to be added is from class
X_component = np.take(X, np.where(y == class_), axis=0)[0]
if len(X_component) == 0:
continue
# Calculating the distances for guys on the same component
if self.fitting:
total_y = self.y_
total_X = self.X_
else:
total_y = np.append(self.y_, y)
total_X = np.vstack((self.X_, X))
label_ind = np.where(total_y == class_)
X_ = np.take(total_X, label_ind, axis=0)[0]
nodes = [(node, {'class': class_}) for node in range(self.node_count_, len(X_component) + self.node_count_)] # noqa: E501
label_ind = label_ind[0].tolist()
else:
X_ = X
label_ind = [i for i in range(len(X))]
nodes = [(node, {'class': y[node - self.node_count_]}) for node in range(self.node_count_, len(X_) + self.node_count_)] # noqa: E501
self.tree_ = _tree_selector(X_, self.leaf_size)
radius_neighbors = [self.tree_.query_radius(x.reshape(1, -1), r=self.epsilon, return_distance=True, sort_results=True) for x in X_] # noqa: E501
k_neighbors = [self.tree_.query(x.reshape(1, -1), k=self.k+1, return_distance=True) for x in X_] # noqa: E501
# Auxiliar lists
indexes_radius_aux = [neigh[0] for neigh in radius_neighbors]
distances_radius_aux = [neigh[1] for neigh in radius_neighbors] # noqa: E501
distances_radius = [node[0] for node in distances_radius_aux]
indexes_radius = [node[0] for node in indexes_radius_aux]
distances_k_aux = [neigh[0] for neigh in k_neighbors]
indexes_k_aux = [neigh[1] for neigh in k_neighbors] # noqa: E501
indexes_k = [node[0] for node in indexes_k_aux]
distances_k = [node[0] for node in distances_k_aux]
# Nodes with neighbors inside radius greater than k
greater_than_k_indices = [index for index, neighbors in enumerate(indexes_radius) if len(neighbors) - 1 > self.k] # noqa: E501
final_k = [neighbors for index, neighbors in enumerate(indexes_k) if index not in greater_than_k_indices] # noqa: E501
final_radius = [neighbors for index, neighbors in enumerate(indexes_radius) if index in greater_than_k_indices] # noqa: E501
final_k_distances = [dist for index, dist in enumerate(distances_k) if index not in greater_than_k_indices] # noqa: E501
final_radius_distances = [distance for index, distance in enumerate(distances_radius) if index in greater_than_k_indices] # noqa: E501
assert len(final_k) + len(final_radius) == len(nodes)
edges_radius = [(label_ind[node[0]], label_ind[node[j]], final_radius_distances[i][j]) for i, node in enumerate(final_radius) for j in range(1, len(node))] # noqa: E501
edges_k = [(label_ind[node[0]], label_ind[node[j]], final_k_distances[i][j]) for i, node in enumerate(final_k) for j in range(1, self.k+1)] # noqa: E501
self.G_ = nx.Graph()
self.G_.add_nodes_from(nodes)
self.G_.add_weighted_edges_from(edges_radius)
self.G_.add_weighted_edges_from(edges_k)
# Removing self-loops
self.G_.remove_edges_from(nx.selfloop_edges(self.G_))
self.node_count_ += len(nodes) + 1
if self.sep_comp is False:
break
if not np.array_equal(self.X_, X):
self.X_ =
|
np.vstack((self.X_, X))
|
numpy.vstack
|
#- This simulation with gpu (with the below parameters) took 14h
#- In this experiment we also set lr from 0.01 to 0.0025
# but here with masking is like the no masking case (exp2a-d) with 0.03 to 0.0075
# thefactor of corecction is approx 3.
# So: probably we should set the next time for masking case: lr=0.005-0.001
# ssh no100
# screen -S exp1d
# cd /export/lv4/user/jfajardourbina/dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/notebooks/convlstm/predict_displacement
# conda activate phd_parcelsv221
# python3 convlstm_dws_exp1d_wind_bathymetry_to_pred_displacement_standarized_3std_train_test_adaptive_lr_masking_loss_batch_size_continuous_lstm_states_gpu.py &
# to comeback: screen -r exp1d
import os
import sys
import numpy as np
import torch
import torch.nn.functional
from torch.autograd import Variable
import matplotlib.pyplot as plt
from copy import deepcopy
import matplotlib as mpl
import glob
import xarray as xr
import dask as da
from tqdm import tqdm
# import convlstm---
home_dir = "/export/lv4/user/jfajardourbina/"
ml_dir=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/"
convlstm_model_dir=f"{ml_dir}src"
sys.path.append(convlstm_model_dir)
import convlstm
import convlstm_continuous_states
#path to files---
home_dir = "/export/lv4/user/jfajardourbina/"
ml_dir=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/"
dir_post_proc_data=f"{ml_dir}post_proc_data/"
#
dir_displacement="net_displacement/"
dir_interp_wind="wind/"
dir_interp_bathymetry="bathymetry/"
file_interp_bathymetry="bathymetry_interp_to_particle_grid_for_convlstm.nc"
#for output after train and test---
exp="exp1d"
dir_convlstm_model_out="ouput_convlstm_model_data/"
case_train="training"; file_out_train=f"{exp}_train.nc"
case_test="testing"; file_out_test=f"{exp}_test.nc"
#for plotting---
#dir_wind="{home_dir}dws_ulf_getm_2D_depth_avg/data/atmosphere/" #winds
dir_dws_bound=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/analysis_eulerian_data_36years/data_dws_boundaries/" #DWS boundarie with contour0
file_dws_bound0="dws_boundaries_contour0.nc"
dir_topo=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/analysis_eulerian_data_36years/data_bathy_grid/" #topo data
file_topo="DWS200m.2012.v03.nc"
#
#parameters
#npa_per_dep=12967 #number of particles per deployment
m2=int(12.42*3600+2) #period in seconds
#dx=400/1e3;dy=400/1e3 #particle grid reso
#
#open DWS contours
dsb0=xr.open_dataset(dir_dws_bound+file_dws_bound0)
bdr_dws0=dsb0.bdr_dws.values #points that define DWS with contour0
#
#open topo file
dsto=xr.open_dataset(dir_topo+file_topo)
xct0=dsto.xc.min().values/1e3; yct0=dsto.yc.min().values/1e3 #=(0,0)
mask_topo=dsto.bathymetry.copy(); mask_topo=xr.where(np.isfinite(mask_topo),1,0) #mask ocean=1, land=0
#Hyper-parameter of neural network---
input_channels = 3 # number of input channels: u10,v10 wind
output_channels = 2 #number of output channels: dx, dy displacement
#hidden_channels = [6, 3, output_channels] # the last digit is the output channel of each ConvLSTMCell (so we are using 3 layers)
hidden_channels = [4, output_channels] # the last digit is the output channel of each ConvLSTMCell (so we are using 2 layers)
kernel_size = 3 #3, does not work with kernel=2
mini_batch_size = 25 #Amount of samples for performing forward-backward propagation during 1 iteration (total iterations per epoch = train samples / mini_batch_size)
#mini_batch_size = 706 #aproox 1year. Amount of samples for performing forward-backward propagation during 1 iteration (total iterations per epoch = train samples / mini_batch_size)
#mini_batch_size = -1 #use all data for performing forward-backward propagation at once during 1 epoch. Memory issues for large samples during training.
num_epochs = 200 #3000
#learning parameters:
adaptive_learning=False #False: lr=learning_rate; True: lr=[learning_rate - learning_rate_end]
#learning_rate = 0.0025 #too slow convergence if used since the beginning of simulation
learning_rate = 0.01 #initial lr
learning_rate_end=0.0025 #final lr
save_data_from_model = True #save some outputs from model in NetCDF Format
#
std_fac_dis=3 #standarize using "std_fac_dis" times the standard deviation
std_fac_wind=3 #standarize using "std_fac_wind" times the standard deviation
#
#if: hidden_channels = [6, 3, output_channels]
#the model will create 6GB of data in GPU memory after 400 training time steps
#so, after nt_steps=2000 (around 3y) we will exceed the mem limit of GPU (around 30GB)
#2.5years for training needs approx 26GB for the above model and with: input_channels = 2; output_channels = 2; kernel_size = 3
#this is because in every time step the graph of computations is stored in the cummulative lost (after calling the model), to perform then a backpropagation
#for this reason is sometimes important to use mini_batches and perform backpropagation after finish with 1.
#then use the next mini_batch and so on until using all the data and finishes 1 eppoch.
#open files----
#open net displacement files---
files_displacement=sorted(glob.glob(f'{dir_post_proc_data}{dir_displacement}/*.nc',recursive=True))
#files_displacement=files_displacement[29:31] #2009-2010
#concat all the files
dsdis=xr.open_mfdataset(files_displacement,concat_dim="time",parallel='True',chunks={'time': -1},
decode_cf=True, decode_times=True)#.load() #this are default decodes
#data_vars='minimal', coords='minimal', compat='override') #takes 1second more with this, see https://xarray.pydata.org/en/stable/io.html#reading-multi-file-datasets
#open interp files for wind---
files_interp_wind=sorted(glob.glob(f'{dir_post_proc_data}{dir_interp_wind}/*.nc',recursive=True))
#files_interp_wind=files_interp_wind[29:31]
#concat all the files
dswi=xr.open_mfdataset(files_interp_wind,concat_dim="time",parallel='True',chunks={'time': -1},
decode_cf=True, decode_times=True)#.load() #this are default decodes
#data_vars='minimal', coords='minimal', compat='override') #takes 1second more with this, see https://xarray.pydata.org/en/stable/io.html#reading-multi-file-datasets
#open interp bathymetry---
dsh=xr.open_dataset(dir_post_proc_data+dir_interp_bathymetry+file_interp_bathymetry).load()
#set bathymetry as input data---
in_h=dsh.bathymetry.values
#set training data---
#
#inputs---
#in_tini_train="2004-01-01"; in_tend_train="2009-12-31"
in_tini_train="2009-11-01"; in_tend_train="2009-12-31"
#u10,v10 wind in model coordinates---
#dswi_train=dswi.sel(time=slice("2009-06-01","2011-12-31"))#,x=slice(70000,80000),y=slice(60000,70000))
dswi_train=dswi.sel(time=slice(in_tini_train,in_tend_train))#,x=slice(60000,80000),y=slice(60000,70000))
in_u10_train,in_v10_train=da.compute(dswi_train.u10.values.astype('float32'),dswi_train.v10.values.astype('float32'))
#
#outputs---
#out_tini_train="2004-01-01"; out_tend_train="2009-12-31"
out_tini_train="2009-11-01"; out_tend_train="2009-12-31"
#dx,dy displacement in model coordinates---
#dsdis_train=dsdis.sel(time=slice("2009-06-01","2011-12-31"))#,x=slice(70000,80000),y=slice(60000,70000))#dsdis_train=dsdis_train.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
dsdis_train=dsdis.sel(time=slice(out_tini_train,out_tend_train))#,x=slice(70000,80000),y=slice(60000,70000))#dsdis_train=dsdis_train.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
out_dx_train,out_dy_train=da.compute(dsdis_train.dx.values.astype('float32'),dsdis_train.dy.values.astype('float32'))
#
times_train=dsdis_train.time.values
nt_train,ny,nx=out_dx_train.shape
print(times_train[[0,-1]],out_dx_train.shape)
#set testing data---
#
#inputs---
in_tini_test="2010-01-01"; in_tend_test="2010-02-28"
#u10,v10 wind in model coordinates---
#dswi_test=dswi.sel(time=slice("2012-01-01",None))#,x=slice(70000,80000),y=slice(60000,70000))
dswi_test=dswi.sel(time=slice(in_tini_test,in_tend_test))#,x=slice(60000,80000),y=slice(60000,70000))
in_u10_test,in_v10_test=da.compute(dswi_test.u10.values.astype('float32'),dswi_test.v10.values.astype('float32'))
#
#outputs---
out_tini_test="2010-01-01"; out_tend_test="2010-02-28"
#dx,dy displacement in model coordinates---
#dsdis_test=dsdis.sel(time=slice("2012-01-01",None))#,x=slice(70000,80000),y=slice(60000,70000))#dsdis_test=dsdis_test.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
dsdis_test=dsdis.sel(time=slice(out_tini_test,out_tend_test))#,x=slice(70000,80000),y=slice(60000,70000))#dsdis_test=dsdis_test.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
out_dx_test,out_dy_test=da.compute(dsdis_test.dx.values.astype('float32'),dsdis_test.dy.values.astype('float32'))
#
times_test=dsdis_test.time.values
nt_test,ny,nx=out_dx_test.shape
print(times_test[[0,-1]],out_dx_test.shape)
#for plotting maps of predictions---
#mask: ocean=1, land=nan
mask=out_dx_train[0,...]*1.; mask[np.isfinite(mask)]=1.; mask[np.isnan(mask)]=np.nan
xx=dsdis_train.x/1e3; yy=dsdis_train.y/1e3; xx,yy=np.meshgrid(xx,yy)
#for masking values on land when computing loss---
mask_torch=torch.tensor(np.where(np.isnan(mask),0,1)[np.newaxis,np.newaxis,...]*np.ones((output_channels,ny,nx)))*1.
mask_numpy=mask_torch.numpy()*1.
def standarization(var,fac=3):
mean=np.nanmean(var)
std=np.nanstd(var)*fac #using 3 times std (seems to works better than just 1std)
var[np.isnan(var)]=0. #fill with 0 in case of nan. This is modifing our input array
return ((var-mean)/std),mean,std #.astype('float32')
def de_standarization(var,mean,std):
return (var*std+mean) #.astype('float32')
def min_max_normalization(var):
minn=np.nanmin(var); maxx=np.nanmax(var)
var[np.isnan(var)]=0. #fill with 0 in case of nan. This is modifing our input array
return (var-minn)/(maxx-minn),minn,maxx #.astype('float32')
def de_min_max_normalization(var,minn,maxx):
return var*(maxx-minn)+minn #.astype('float32')
#min-max normalization of data---
#input: bathymetry
in_h, in_h_min, in_h_max = min_max_normalization(in_h)
#standarization of data---
#training---
#inputs
in_u10_train, in_u10_mean_train, in_u10_std_train = standarization(in_u10_train,std_fac_wind)
in_v10_train, in_v10_mean_train, in_v10_std_train = standarization(in_v10_train,std_fac_wind)
#outputs
out_dx_train, out_dx_mean_train, out_dx_std_train = standarization(out_dx_train,std_fac_dis)
out_dy_train, out_dy_mean_train, out_dy_std_train = standarization(out_dy_train,std_fac_dis)
print("train info:")
print(f"steps={nt_train}; (ny,nx)=({ny},{nx})")
print("input")
print(f"u10_mean, u10_std*{std_fac_wind}, v10_mean, v10_std*{std_fac_wind}:")
print(in_u10_mean_train, in_u10_std_train, in_v10_mean_train, in_v10_std_train)
print("output")
print(f"dx_mean, dx_std*{std_fac_dis}, dy_mean, dy_std*{std_fac_dis}:")
print(out_dx_mean_train, out_dx_std_train, out_dy_mean_train, out_dy_std_train)
print()
#testing---
#inputs
in_u10_test, in_u10_mean_test, in_u10_std_test = standarization(in_u10_test,std_fac_wind)
in_v10_test, in_v10_mean_test, in_v10_std_test = standarization(in_v10_test,std_fac_wind)
#outputs
out_dx_test, out_dx_mean_test, out_dx_std_test = standarization(out_dx_test,std_fac_dis)
out_dy_test, out_dy_mean_test, out_dy_std_test = standarization(out_dy_test,std_fac_dis)
print("test info:")
print(f"steps={nt_test}; (ny,nx)=({ny},{nx})")
print("input")
print(f"u10_mean, u10_std*{std_fac_wind}, v10_mean, v10_std*{std_fac_wind}:")
print(in_u10_mean_test, in_u10_std_test, in_v10_mean_test, in_v10_std_test)
print("output")
print(f"dx_mean, dx_std*{std_fac_dis}, dy_mean, dy_std*{std_fac_dis}:")
print(out_dx_mean_test, out_dx_std_test, out_dy_mean_test, out_dy_std_test)
print()
#MODEL configuration and helper functions---
#loss functions with and without masking---
class initialization:
def __init__(self, masking=False, mask=None):
self.masking=masking
self.mask=mask
class loss_function:
class mse(initialization):
#we call this function without using its name
def __call__(self, predict=torch.zeros(1), target=torch.zeros(1)):
if self.masking:
#masking land points---
#
#- the masking affect:
# the value of the total loss (that only includes points inside DWS) and hence the last gradient of the backpropagation
# loss=sum(prediction-output)**2/N; dlos/dpred=2*sum(prediction-output)/N,
# with masking N is smaller because we dont consider land points, so seems that its like increasing the overall lr
#- similar effect to masking without using it:
# if we use another custom loss like torch.nn.MSELoss(reduction='sum')
# masking is irrelevant since we dont divide with N
#
#disregard land points (=0) for the mean, so the loss value will increase
#mask_torch: 0=land, 1=ocean
#however, because we only have particles inside DWS, mask_torch=0 for the land and all points outside DWS
loss_val = torch.mean(((predict-target)[self.mask==1])**2)
else:
#original---
loss_val = torch.mean((predict-target)**2) #=torch.nn.MSELoss()
#
return loss_val
class mse_numpy(initialization):
#we call this function without using its name
def __call__(self, predict=np.zeros(1), target=np.zeros(1)):
if self.masking:
#masking land points---
#disregard land points (=0) for the mean, so the loss value will increase
#probably because land points decrease the loss, the model don't perform so well
#mask_torch: 0=land, 1=ocean
#however, because we only have particles inside DWS, mask_torch=0 all points except inside it
loss_val = np.mean(((predict-target)[self.mask==1])**2)
else:
#original---
loss_val = np.mean((predict-target)**2) #=torch.nn.MSELoss()
#
return loss_val
#get times for backward propagation when using mini-batch---
def get_times_for_backward(nt,mini_batch_size=30):
#times relative to t=0
if nt < mini_batch_size: mini_batch_size = nt
t_last = np.mod(nt,mini_batch_size) #remainder of nt
t_backward=np.arange(mini_batch_size,nt+1,mini_batch_size)-1
#iterations = int(nt/mini_batch_size)
#t_backward=np.arange(iterations)*mini_batch_size+mini_batch_size-1
if t_backward[-1]!=nt-1: t_backward[-1]+=t_last
return t_backward
#training---
def training(epoch,num_epochs,nt,t_backward,model):
# Clear stored gradient
model.zero_grad()
optimizer.zero_grad()
# loop through all timesteps
predict=[]; loss0=0. #; pred_bug=[]
for t in range(nt):
#stack data---
#
#old method using torch.autograd.Variable and .view()---
#data_in=np.stack((in_u10_train[t,...],in_v10_train[t,...]))
#data_out=np.stack((out_dx_train[t,...],out_dy_train[t,...]))
#data_in = torch.autograd.Variable(torch.Tensor(data_in).view(-1,input_channels,ny,nx)).to(device)
#data_out = torch.autograd.Variable(torch.Tensor(data_out).view(-1,input_channels,ny,nx)).to(device)
#
#new method using torch.tensor and np.newaxis (the same results as above)---
data_in = torch.tensor(np.stack((in_u10_train[t,...],
in_v10_train[t,...],
in_h),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
data_out = torch.tensor(np.stack((out_dx_train[t,...],
out_dy_train[t,...]),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
# Forward process and loss for:---
# - the entire batch (all the samples). Problems with memory.
# - mini-batch (subset of the full samples).
#
if t==0 or t in t_backward+1:
if t==0: # start hidden and cell states from a normal distribution
predict0, _ = model(data_in, 0)
mae0 = np.mean(abs(predict0-data_out).detach().cpu().numpy())
#mape0 = np.mean( abs((predict0-data_out)/data_out).detach().numpy() ) #problems with mape if denominator = 0
else: #use the last state of the previous mini-batch
if epoch == num_epochs-1: print(f"give init states to model at time-step: {t}")
#print(f"give init states to model at time-step: {t}")
predict0, _ = model(data_in, 0, states) #data_in=(1,input_channels,ny,nx) #predict0=(1,output_channels,ny,nx)
#loss
lossbp0 = loss_fn(predict0, data_out) #data_out=(1,output_channels,ny,nx)
tt0=t
#check if prediction uses random-init states after a backward propgation of a mini-batch
#if epoch == num_epochs-1: pred_bug.append(np.squeeze(predict0.detach().cpu().numpy()))
else:
if t in t_backward:
if epoch == num_epochs-1: print(f"getting states from model at time-step: {t}")
#print(f"getting states from model at time-step: {t}")
predict0, states = model(data_in, t-tt0)
else:
predict0, _ = model(data_in, t-tt0)
#loss
lossbp0 += loss_fn(predict0, data_out)
mae0 += np.mean(abs(predict0-data_out).detach().cpu().numpy())
#mape0 += np.mean( abs((predict0-data_out)/data_out).detach().numpy() )
#Backward propagation for:---
# - the entire batch (all the samples). Problems with memory.
# - mini-batch (subset of the full samples).
if t in t_backward:
if epoch == num_epochs-1:
print(f"performing backward propagation at time-step: {t}")
# Zero out gradient, else they will accumulate between epochs---
model.zero_grad()
optimizer.zero_grad()
# Backward pass---
lossbp0.backward()
# Update parameters---
optimizer.step() #to initiate gradient descent
# Zero out gradient again, in case starting the model for the next mini-batch
model.zero_grad()
optimizer.zero_grad()
#
loss0 += lossbp0.item(); del lossbp0
#cumulative loss from all the time steps (the loss we use for backward propagation)---
if epoch % 50 == 0:
print("Train epoch ", epoch, "; mean(MSE(t)) = ", loss0/nt*std_fac_dis**2, "; mean(MAE(t)) = ", mae0/nt*std_fac_dis)
#print(np.sum(abs((states[-1][0]-predict0).detach().cpu().numpy())))
# save lr
lr0=optimizer.param_groups[0]["lr"]
#predict train data for the last epoch, after updating model parameters
if epoch == num_epochs-1:
with torch.no_grad():
for t in range(nt):
data_in = torch.from_numpy(np.stack((in_u10_train[t,...],
in_v10_train[t,...],
in_h),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
predict0, _ = model(data_in, t) #data_in=(1,input_channels,ny,nx) predict=(1,output_channels,ny,nx)
predict0 = np.squeeze(predict0.detach().cpu().numpy()) #delete the first dim=1
predict.append(predict0) #save the predictions for the last epoch
predict=np.array(predict) #(nt,output_channels,ny,nx)
#predict=np.reshape(predict,(nt,output_channels,ny,nx)) #(nt,output_channels,ny,nx)
return loss0, mae0, predict, model, lr0 #,np.array(pred_bug)
#testing---
def testing(epoch,num_epochs,nt,model):
#this function avoid gradient storage (memory increases with time despite setting requires_grad=False)
#https://discuss.pytorch.org/t/requires-grad-or-no-grad-in-prediction-phase/35759/2
with torch.no_grad():
predict=[]
# loop through all timesteps
for t in range(nt):
# Forward process---
#by default torch tensor: requires_grad=False---
data_in = torch.tensor(np.stack((in_u10_test[t,...],
in_v10_test[t,...],
in_h),axis=0)[np.newaxis,...],requires_grad=False).to(device) #(1,input_channels,ny,nx)
#
predict0, _ = model(data_in, t) #data_in=(1,input_channels,ny,nx) pred_y=(1,output_channels,ny,nx)
predict0 = predict0.detach().cpu().numpy()
# Compute loss (and the cumulative loss from all the time steps)---
data_out = np.stack((out_dx_test[t,...],
out_dy_test[t,...]),axis=0)[np.newaxis,...] #(1,input_channels,ny,nx)
if t == 0:
#loss0 = np.mean((predict0-data_out)**2)
loss0 = loss_fn_np(predict0, data_out) #MSE numpy loss with mask on land points
mae0 = np.mean(abs(predict0-data_out))
#mape0=np.mean( abs((predict-data_out)/data_out).detach().numpy() ) #problems with mape if denominator = 0
else:
#loss0 += np.mean((predict0-data_out)**2)
loss0 += loss_fn_np(predict0, data_out) #MSE numpy loss with mask on land points
mae0 += np.mean(abs(predict0-data_out))
if epoch == num_epochs-1:
predict.append(np.squeeze(predict0)) #save the predictions for the last epoch
if epoch % 50 == 0: print("Test epoch ", epoch, "; mean(MSE(t)) = ", loss0/nt*std_fac_dis**2, "; mean(MAE(t)) = ", mae0/nt*std_fac_dis)
#predict test data for the last epoch
#if epoch == num_epochs-1:
# for t in range(nt):
# data_in = torch.tensor(np.stack((in_u10_test[t,...],
# in_v10_test[t,...],
# in_h),axis=0)[np.newaxis,...],requires_grad=False).to(device) #(1,input_channels,ny,nx)
# predict0, _ = model(data_in, t) #data_in=(1,input_channels,ny,nx) predict=(1,output_channels,ny,nx)
# predict0 = np.squeeze(predict0.detach().cpu().numpy()) #delete the first dim=1
# predict.append(predict0) #save the predictions for the last epoch
# predict=np.array(predict) #(nt,output_channels,ny,nx)
# #predict=np.reshape(predict,(nt,output_channels,ny,nx)) #(nt,output_channels,ny,nx)
return loss0, mae0, np.array(predict)
#run simulation----
#use cuda if possible---
print ("Pytorch version {}".format(torch.__version__))
use_cuda = torch.cuda.is_available() # check if CUDA is available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # use GPU if possible
print("Device to be used for computation: {}".format(device))
print(f"{torch.cuda.get_device_name(0)}")
print()
#initialize model---
model = convlstm_continuous_states.ConvLSTM(input_channels, hidden_channels, kernel_size).to(device)
#model = convlstm.ConvLSTM(input_channels, hidden_channels, kernel_size).to(device)
#choose loss function---
#loss_fn = torch.nn.MSELoss()
#loss_fn = loss_function.mse() #for training (the same as above)
masking=True
loss_fn = loss_function.mse(masking=masking,mask=mask_torch) #for training (masking land points)
#loss_fn_np = loss_function.mse_numpy() #for testing
loss_fn_np = loss_function.mse_numpy(masking=masking,mask=mask_numpy) #for testing
#choose optimizer---
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#check the model and loss function and optimizer---
print(model)
print(loss_fn.__class__.__name__) #this works for pytorch, but also for our custom class
#print(loss_fn) #only works for pytorch
print(optimizer)
print()
#mini-batch size to perform backward propagation---
#mini_batch_size=25
if mini_batch_size==-1: mini_batch_size = nt_train
nt_backward = get_times_for_backward(nt_train,mini_batch_size)
print(f"time steps to activate backward propagation: {nt_backward} (len={len(nt_backward)})")
print()
#training output data---
lr = np.zeros(num_epochs)
loss_train =
|
np.zeros(num_epochs)
|
numpy.zeros
|
# munk.py Implementation of MUNK
# Load required modules
import argparse
import json
import random
import time
import networkx as nx
import numpy as np
import scipy as sp
from sklearn.externals import joblib
import munk.util as util
from munk.io import get_logger
###############################################################################
# utility to seperate scores
###############################################################################
def separate_scores(scores, landmark_pair_idxs, homolog_pair_idxs):
'''
Separate scores into the following 5 categories:
1. Landmark - Landmark pair scores
2. Off diagonal entries in the landmark-landmark submatrix
3. Landmark- (non-landmark) entries in rows and columns that correspond
to one landmark
4. Homolog-homolog pairs
5. Other pairs
'''
source_landmark_idxs, target_landmark_idxs = zip(*landmark_pair_idxs)
source_homolog_idxs, target_homolog_idxs = zip(*homolog_pair_idxs)
landmark_mask = np.zeros_like(scores, dtype=bool)
source_landmark_target_all_mask = np.zeros_like(scores, dtype=bool)
source_landmark_target_all_mask[source_landmark_idxs, :] = True
source_all_target_landmark_mask = np.zeros_like(scores, dtype=bool)
source_all_target_landmark_mask[:, target_landmark_idxs] = True
landmark_landmark_mask = np.zeros_like(scores, dtype=bool)
landmark_landmark_mask[source_landmark_idxs, target_landmark_idxs] = True
# Obtain landmark-landmark pairs
L_L_diag_scores = scores[source_landmark_idxs, target_landmark_idxs]
# Obtain landmark-landmark off diag pairs
L_L_off_diag_mask = np.logical_and(source_landmark_target_all_mask, source_all_target_landmark_mask)
L_L_off_diag_mask &= ~landmark_landmark_mask
L_L_off_diag_scores = scores[L_L_off_diag_mask]
# Landmark - Non-landmark pairs
L_non_L_mask = source_landmark_target_all_mask ^ source_all_target_landmark_mask
L_non_L_scores = scores[L_non_L_mask]
# Hom - Hom pairs
H_H_mask = np.zeros_like(scores, dtype=bool)
H_H_mask[source_homolog_idxs, target_homolog_idxs] = True
H_H_mask[source_landmark_idxs, target_landmark_idxs] = False
H_H_scores = scores[H_H_mask]
# Obtain other scores
other_mask = np.ones_like(scores, dtype=np.bool)
other_mask &= ~(source_landmark_target_all_mask | source_all_target_landmark_mask | H_H_mask )
other_scores = scores[other_mask]
return L_L_diag_scores,\
L_L_off_diag_scores,\
L_non_L_scores,\
H_H_scores,\
other_scores
###############################################################################
# DIFFUSION MEASURES
###############################################################################
def regularized_laplacian(G, nodes, lam):
'''
Computes regularized laplacian from networkx graph corresponding to
given node ordering
Note: index of regularized laplacian rows/columns correspond to indices of
the sorted list of nodes in the given graph.
'''
L = np.array(nx.laplacian_matrix(G, nodelist=nodes).todense())
return np.linalg.inv(np.eye( *np.shape(L) ) + (lam * L))
def rkhs_factor(D):
''' Computes RKHS embeddings for given kernel matrix '''
e, v = sp.linalg.eigh(D)
return v.dot(np.diag(np.sqrt(e)))
def non_landmark_idxs(n, landmark_idxs):
return [i for i in range(n) if i not in set(landmark_idxs)]
###############################################################################
# MUNK embedding
###############################################################################
def embed_matrices(source_C, target_D, landmark_idxs):
'''
Computes MUNK embeddings of source and target matrices given corresponding
indices of landmarks
:param source_C: 2D array of rkhs vectors from source species
:param source_D: 2D array of diffusion values from target species
:param landmark_idxs: list of landmark tuples like (source_idx, target_idx)
j
:return: tuple of MUNK embeddings for source and target species
'''
source_idxs, target_idxs = zip(*landmark_idxs)
target_C_hat =
|
np.linalg.pinv(source_C[source_idxs,:])
|
numpy.linalg.pinv
|
import numpy as np
from scipy.special import logsumexp
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAS= actionAngleStaeckel(delta=0.4,pot=MWPotential2014,c=True)
_R0 = 8.
_z0 = 0.025
def gaussian_1d(v_R, v_z, R, z, med_z, params=[30.]):
sigmaR = params[0]
vo = 0.
A_R = (1./(sigmaR*np.sqrt(2*np.pi)))
E_R = (-(v_R-vo)**2)/(2*sigmaR**2)
p = A_R*np.exp(E_R)
logp = np.log(p)
logp = np.sum(logp[np.isfinite(logp)])
return logp
def gaussian_fixedv0(v_R, v_z, R, z, med_z, params=[np.log10(30.),np.log10(30.),0.1]):
sigmaR, sigmaz, contfrac = params
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
vo = 0.
contA = (contfrac)*(1./(100*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*100**2))
contE_z = (-(v_z-vo)**2/(2*100**2))
A_R = (1-contfrac)*(1./(sigmaR*np.sqrt(2*np.pi)))
A_z = (1-contfrac)*(1./(sigmaz*np.sqrt(2*np.pi)))
E_R = (-(v_R-vo)**2)/(2*sigmaR**2)
E_z = (-(v_z-vo)**2)/(2*sigmaz**2)
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
As = np.dstack([A_R, A_z, contA, contA])[0]
logp = logsumexp(Es, b=As, axis=1)
logp = np.sum(logp)
return logp
def gaussian_expR_quadz_fixedv0(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.01], return_each=False):
vo = 0.
sigmacont = 100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
sigmacontR, sigmacontz = np.sqrt(sigmacont**2+cov_vRvTvz[:,0,0]), np.sqrt(sigmacont**2+cov_vRvTvz[:,2,2])
z = np.fabs(z)
sigma_fRz_R = np.sqrt(((a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR)))**2+cov_vRvTvz[:,0,0])
A_R = (1-contfrac)*(1./(sigma_fRz_R*np.sqrt(2*np.pi)))
E_R = (-(v_R-vo)**2)/(2*sigma_fRz_R**2)
sigma_fRz_z = np.sqrt(((a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz)))**2+cov_vRvTvz[:,2,2])
A_z = (1-contfrac)*(1./(sigma_fRz_z*np.sqrt(2*np.pi)))
E_z = (-(v_z-vo)**2)/(2*sigma_fRz_z**2)
contA_R = (contfrac)*(1./(sigmacontR*np.sqrt(2*np.pi)))
contA_z = (contfrac)*(1./(sigmacontz*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*sigmacontR**2))
contE_z = (-(v_z-vo)**2/(2*sigmacontz**2))
As = np.dstack([A_R, A_z, contA_R, contA_z])[0]
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
logp = logsumexp(Es, b=As, axis=1)
if return_each:
return logp
logp = np.sum(logp)
return logp
def gaussian_expR_quadz(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.,0.,0.01]):
vo = 0.
sigmacont=100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, v_Ro, v_zo, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
sigmacontR, sigmacontz = np.sqrt(sigmacont**2+cov_vRvTvz[:,0,0]), np.sqrt(sigmacont**2+cov_vRvTvz[:,2,2])
z = np.fabs(z)
sigma_fRz_R = np.sqrt(((a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR)))**2+cov_vRvTvz[:,0,0])
A_R = (1-contfrac)*(1./(sigma_fRz_R*np.sqrt(2*np.pi)))
E_R = (-(v_R-v_Ro)**2)/(2*sigma_fRz_R**2)
sigma_fRz_z = np.sqrt(((a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz)))**2+cov_vRvTvz[:,2,2])
A_z = (1-contfrac)*(1./(sigma_fRz_z*np.sqrt(2*np.pi)))
E_z = (-(v_z-v_zo)**2)/(2*sigma_fRz_z**2)
contA_R = (contfrac)*(1./(sigmacontR*np.sqrt(2*np.pi)))
contA_z = (contfrac)*(1./(sigmacontz*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*sigmacontR**2))
contE_z = (-(v_z-vo)**2/(2*sigmacontz**2))
As = np.dstack([A_R, A_z, contA_R, contA_z])[0]
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
logp = logsumexp(Es, b=As, axis=1)
logp = np.sum(logp)
return logp
def gaussian_expR_expz_fixedv0(v_R, v_z, R, z, med_z, params=[1/8.,1/8.,np.log10(50.),1/8.,1/8.,np.log10(50.),0.01]):
vo = 0.
hRsigmaR, hzsigmaR, sigmaR, hRsigmaz, hzsigmaz, sigmaz, contfrac = params
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
sigmacont = 200.
z = np.fabs(z)
sigma_fRz_R = sigmaR*np.exp(-hRsigmaR*(R-_R0)-hzsigmaR*(z-med_z))
sigma_fRz_z = sigmaz*np.exp(-hRsigmaz*(R-_R0)-hzsigmaz*(z-med_z))
A_R = (1-contfrac)*(1./(sigma_fRz_R*np.sqrt(2*np.pi)))
E_R = (-(v_R-vo)**2)/(2*sigma_fRz_R**2)
A_z = (1-contfrac)*(1./(sigma_fRz_z*np.sqrt(2*np.pi)))
E_z = (-(v_z-vo)**2)/(2*sigma_fRz_z**2)
contA = (contfrac)*(1./(sigmacont*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*sigmacont**2))
contE_z = (-(v_z-vo)**2/(2*sigmacont**2))
As = np.dstack([A_R, A_z, np.ones(len(v_R))*contA, np.ones(len(v_R))*contA])[0]
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
logp = logsumexp(Es, b=As, axis=1)
logp = np.sum(logp)
return logp
def gaussian_expR_expz(v_R, v_z, R, z, med_z, params=[1/8.,1/8.,np.log10(50.),1/8.,1/8.,np.log10(50.),0.,0.,0.01]):
vo = 0.
hRsigmaR, hzsigmaR, sigmaR, hRsigmaz, hzsigmaz, sigmaz, v_Ro, v_zo, contfrac = params
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
z = np.fabs(z)
sigma_fRz_R = sigmaR*np.exp(-hRsigmaR*(R-_R0)-hzsigmaR*(z-med_z))
sigma_fRz_z = sigmaz*np.exp(-hRsigmaz*(R-_R0)-hzsigmaz*(z-med_z))
A_R = (1-contfrac)*(1./(sigma_fRz_R*np.sqrt(2*np.pi)))
E_R = (-(v_R-v_Ro)**2)/(2*sigma_fRz_R**2)
A_z = (1-contfrac)*(1./(sigma_fRz_z*np.sqrt(2*np.pi)))
E_z = (-(v_z-v_zo)**2)/(2*sigma_fRz_z**2)
contA = (contfrac)*(1./(100*np.sqrt(2*np.pi)))
contE_R = (-(v_R-vo)**2/(2*100**2))
contE_z = (-(v_z-vo)**2/(2*100**2))
As = np.dstack([A_R, A_z, np.ones(len(v_R))*contA, np.ones(len(v_R))*contA])[0]
Es = np.dstack([E_R, E_z, contE_R, contE_z])[0]
logp = logsumexp(Es, b=As, axis=1)
logp = np.sum(logp)
return logp
def ellipsoid_old(v_R, v_z, R, z,
cov_vRvTvz, med_z,
params=[1/8.,np.log10(50.),1.,1.,1/8.,np.log10(50.),1.,1.,0.,0.,0.01]):
vo = 0.
sigmacont = 100.
h_sigmaR, sigmaR, a_R, b_R, h_sigmaz, sigmaz, a_z, b_z, alpha_0,alpha_1, contfrac = params
h_sigmaR, h_sigmaz = 1/h_sigmaR, 1/h_sigmaz
sigmaR, sigmaz = 10**sigmaR, 10**sigmaz
z = np.fabs(z)
sigma_fRz_R = (a_R*(z-med_z)**2+b_R*(z-med_z)+sigmaR)*(np.exp(-1*(R-_R0)/h_sigmaR))
sigma_fRz_z = (a_z*(z-med_z)**2+b_z*(z-med_z)+sigmaz)*(np.exp(-1*(R-_R0)/h_sigmaz))
tana= alpha_0+alpha_1*z/R #+params[11]*(z/R)**2.
sig2rz= (sigma_fRz_R**2.-sigma_fRz_z**2.)*tana/(1.-tana**2.)
#Do likelihood
out= 0.
for ii in range(len(v_R)):
vv= np.array([v_R[ii],v_z[ii]])
VV= np.array([[sigma_fRz_R[ii]**2.+cov_vRvTvz[ii,0,0],
sig2rz[ii]+cov_vRvTvz[ii,0,2]],
[sig2rz[ii]+cov_vRvTvz[ii,0,2],
sigma_fRz_z[ii]**2.+cov_vRvTvz[ii,2,2]]])
outVV= np.array([[sigmacont**2.+cov_vRvTvz[ii,0,0],
cov_vRvTvz[ii,0,2]],
[cov_vRvTvz[ii,0,2],
sigmacont**2.+cov_vRvTvz[ii,2,2]]])
#print VV, outVV, numpy.linalg.det(VV), numpy.linalg.det(outVV)
detVV=
|
np.linalg.det(VV)
|
numpy.linalg.det
|
#Copyright 2009 <NAME>, <<EMAIL>>
"""
This module supplies functions used to implement graph theory.
"""
__docformat__ = 'restructuredtext'
import numpy as np
from dfs import dfs
from general import find, issubset
def parents(adj_mat, i):
"""
Returns the indices of the parent nodes of the input node, i, in the
given adjacency matrix.
Parameters
----------
adj_mat: Numpy ndarray
Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
edge from node i to node j.
i: Int
The index of the node whose parents are to be found.
"""
"""Check if this is perhaps a sparse matrix"""
if type(adj_mat) != np.ndarray:
posi = np.array((adj_mat[:, i].todense() == 1)).squeeze()
else:
posi = np.array((adj_mat[:, i] == 1))
vals = []
while np.sum(posi)!=0:
t_pos = np.argmax(posi)
posi[t_pos]=0
vals.append(t_pos)
return np.array(vals,dtype=np.int32)
def children(adj_mat, i):
"""
Returns the indices of the children nodes of the input node, i, in the
given adjacency matrix.
Parameters
----------
adj_mat: Numpy ndarray
Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
edge from node i to node j.
i: Int
The index of the node whose parents are to be found.
"""
"""Check if this is perhaps a sparse matrix"""
if type(adj_mat) != np.ndarray:
adj_mat = adj_mat.tocsr()
posi = np.array((adj_mat[i, :].todense() == 1)).squeeze()
adj_mat = adj_mat.tocsc()
else:
posi = np.array((adj_mat[i, :] == 1))
vals = []
while np.sum(posi)!=0:
t_pos = np.argmax(posi)
posi[t_pos]=0
vals.append(t_pos)
return vals
def neighbours(adj_mat, i):
"""
Returns the indices of the neighbours nodes of the input node, i, in the
given adjacency matrix.
Parameters
----------
adj_mat: Numpy ndarray
Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
edge from node i to node j.
i: Int
The index of the node whose parents are to be found.
"""
kids = np.array(children(adj_mat, i))
folks = np.array(parents(adj_mat,i))
if issubset(kids, folks) and issubset(folks, kids):
nbrs = kids
else:
nbrs = np.hstack((kids, folks)).tolist()
return nbrs
def family(adj_mat, i):
"""
Returns the indices of the family nodes of the input node, i, in the
given adjacency matrix.
Parameters
----------
adj_mat: Numpy ndarray
Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
edge from node i to node j.
i: Int
The index of the node whose parents are to be found.
"""
f = parents(adj_mat, i)
f = np.hstack([f,i])
return f
def topological_sort(A):
"""
Returns the indices of the nodes in the graph defined by the adjacency
matrix A in topological order.
Parameters
----------
A: Scipy sparse csc_matrix
Adjacency matrix. If A[i, j] = 1, there exists a directed edge from
node i to node j.
"""
n = A.shape[0]
indeg = []
zero_indeg = []
for i in range(0,n):
indeg.append(len(parents(A,i)))
if indeg[i] == 0:
zero_indeg.append(i)
zero_indeg.reverse()
t = 1
order = []
while len(zero_indeg)!=0:
v = zero_indeg.pop()
order.append(v)
t = t + 1
cs = children(A, v)
for j in range(0,len(cs)):
c = cs[j]
indeg[c] = indeg[c] - 1
if indeg[c] == 0:
zero_indeg.insert(0,c)
return order
def moralize(G):
"""
Converts a directed graph to an undirected graph, by connecting the
parents of every node together.
Parameters
----------
G: Numpy ndarray
Adjacency matrix. If A[i, j] = 1, there exists a directed edge from
node i to node j.
"""
M = G.copy()
n = M.shape[0]
for i in range(0,n):
fam = family(G,i)
for j in fam:
M[j, fam] = 1
"""Make sure no node has an edge to itself"""
M = setdiag(M, 0)
moral_edges = np.triu(M-G,0)
return [M, moral_edges]
def setdiag(G, val):
"""
Sets the diagonal elements of a matrix to a specified value.
Parameters
----------
G: A 2D matrix or array.
The matrix to modify.
val: Int or float
The value to which the diagonal of 'G' will be set.
"""
n = G.shape[0]
for i in range(0,n):
G[i,i] = val
return G
def graph_to_jtree(model_graph, ns):
"""
This function triangulates a moral graph and obtains a junction tree
from the cliques of the triangulated graph by computing the maximum
spanning tree for those cliques.
Parameters
----------
model_graph: Numpy ndarray
MG[i,j] = 1 iff there is an edge between node i and node j.
ns: List
The node sizes, where ns[i] = the number of discrete values node i
can take on [1 if observed].
Output
------
jtree: Numpy ndarray
A matrix reprsenting the edges in the junction tree. jtree(i,j)=1
iff there is an edge between clique i and clique j.
root: Int
The index of the root clique.
cliques: List
A list of lists of the indices of each clique. cliques[i] = the
indices of the nodes in clique i.
B: Numpy ndarray
A map of which clique each node appears in, B[i,j] = 1 iff node j
occurs in clique i.
w: List
The weights of the cliques, w[i] = weight of clique i.
"""
"""Make sure that no node has a edge connecting to itself."""
model_graph = setdiag(model_graph, 0)
"""Determine the elimination order"""
elim_order = best_first_elim_order(model_graph.copy(), ns)
"""
Using the elimination order and the moral graph, obtain the new cliques
using triangulation.
"""
[triangulated_graph, cliques] = triangulate(model_graph.copy(), elim_order)
"""
Obtain a junction tree from the set of cliques.
"""
[jtree, root, B, w] = cliques_to_jtree(cliques, ns)
return [jtree, root, cliques, B, w]
def best_first_elim_order(G, node_sizes, stage=[]):
"""
This function greedily searches for an optimal elimination order.
Find an order in which to eliminate nodes from the graph in such a way
as to try and minimize the weight of the resulting triangulated graph.
The weight of a graph is the sum of the weights of each of its cliques;
the weight of a clique is the product of the weights of each of its
members; the weight of a node is the number of values it can take on.
Since this is an NP-hard problem, we use the following greedy heuristic:
At each step, eliminate that node which will result in the addition of
the least number of fill-in edges, breaking ties by choosing the node
that induces the lighest clique.
For details, see
- Kjaerulff, "Triangulation of graphs -- algorithms giving small total
state space", Univ. Aalborg tech report, 1990 (www.cs.auc.dk/~uk)
- <NAME> and <NAME>, "Inference in Belief Networks: A procedural
guide", Intl. J. Approx. Reasoning, 11, 1994
Parameters
----------
G: Numpy ndarray
G[i,j] = 1 iff there is an edge between node i and node j.
node_sizes: List
The node sizes, where ns[i] = the number of discrete values
node i can take on [1 if observed].
stage: List
stage[i] is a list of the nodes that must be eliminated at i'th
stage.
"""
"""Obtain the number of nodes in the graph"""
n = G.shape[0]
if stage == []:
stage = [range(0, n)]
MG = G.copy()
uneliminated = np.ones((1, n))
order =
|
np.zeros((1, n))
|
numpy.zeros
|
import copy
import functools as ft
import sys
import numpy as np
import chemex.containers.helper as cch
import chemex.containers.noise as ccn
import chemex.containers.plot as ccp
@ft.total_ordering
class RelaxationProfile:
def __init__(self, name, data, pulse_seq, pnames, params, params_mf):
self.name = name
self.data = data
self._pulse_seq = pulse_seq
self._pnames = pnames
self.params = params
self.params_mf = params_mf
self._plot = ccp.relaxation
@classmethod
def from_file(cls, path, config, pulse_seq, pnames, params, params_mf):
name = config["spin_system"]
data = RelaxationData.from_file(
path, filter_planes=config["data"]["filter_planes"]
)
return cls(name, data, pulse_seq, pnames, params, params_mf)
def residuals(self, params):
data = self.data.points[self.data.mask]
return (self.calculate(params) - data["intensities"]) / data["errors"]
def calculate(self, params, times=None):
data = self.data.points[self.data.mask]
par_values = self._get_parvals(params)
calculated = self._pulse_seq.calculate(tuple(data["times"]), par_values)
scale = cch.get_scale(data["intensities"], data["errors"], calculated)
if times is not None:
calculated = self._pulse_seq.calculate(tuple(times), par_values)
return scale * calculated
def estimate_noise_variance(self, kind):
return self.data.estimate_noise_variance(kind)
def set_noise(self, value):
self.data.points["errors"] = value
def print(self, params):
output = f"[{self.name}]\n"
output += (
f"# {'TIMES (S)':>12s} "
f"{'INTENSITY (EXP)':>17s} "
f"{'ERROR (EXP)':>17s} "
f"{'INTENSITY (CALC)':>17s}\n"
)
values = self.calculate(params, self.data.points["times"])
for point, mask, value in zip(self.data.points, self.data.mask, values):
offset, intensity, error = point
output += "#" if not mask else " "
output += (
f" {offset: 12.2f} {intensity: 17.8e} {error: 17.8e} {value: 17.8e}"
)
output += " # NOT USED IN THE FIT\n" if not mask else "\n"
return output + "\n\n"
def filter(self, params):
pass
def plot(self, params, file_pdf, file_exp, file_fit, simulation=False):
data_exp = self._get_plot_data_exp(simulation)
data_fit = self._get_plot_data_fit(params, simulation)
self._plot(file_pdf, self.name, data_exp, data_fit)
output_fit = self._format_data_fit(data_fit)
file_fit.write(output_fit + "\n\n")
if not simulation:
output_exp = self._format_data_exp(data_exp)
file_exp.write(output_exp + "\n\n")
def monte_carlo(self, params):
intensities_ref = self.calculate(params)
profile = copy.copy(self)
profile.data = profile.data.monte_carlo(intensities_ref)
return profile
def bootstrap(self):
"""Make a profile for bootstrap analysis."""
profile = copy.copy(self)
profile.data = profile.data.bootstrap()
return profile
def _get_parvals(self, params):
return tuple(
(name1, params[name2].value) for name1, name2 in self._pnames.items()
)
def _get_plot_data_exp(self, simulation=False):
dtype = [
("times", "f8"),
("intensities", "f8"),
("errors", "f8", (2,)),
("mask", "?"),
]
if simulation:
return np.rec.array([[], [], [], []], dtype=dtype)
points = self.data.points
times = points["times"]
intst_ref = points["intensities"][np.argmax(np.abs(points["intensities"]))]
intensities = points["intensities"] / intst_ref
errors = points["errors"] / abs(intst_ref)
errors = np.array([-errors, errors]).transpose()
mask = self.data.mask
data_exp = np.rec.array([times, intensities, errors, mask], dtype=dtype)
return np.sort(data_exp, order="times")
def _get_plot_data_fit(self, params, simulation=False):
data = self.data.points
if simulation:
intst_calc = self.calculate(params)
intst_ref = intst_calc[np.argmax(np.abs(intst_calc))]
else:
points = self.data.points
intst_ref = points["intensities"][np.argmax(
|
np.abs(points["intensities"])
|
numpy.abs
|
import numpy as np
def windowing(image_size, window_size, sliding_strides, mode='fill'):
assert len(image_size) == len(window_size) == len(sliding_strides) == 3
window_size = np.array(window_size)
sliding_strides = np.array(sliding_strides)
if mode == 'fill' and (window_size < sliding_strides).all():
raise ValueError('invalid arguments with "fill" mode window_size {} < side_strides {}'.format(window_size, sliding_strides))
ia = np.arange(0, image_size[0] - window_size[0], sliding_strides[0])
ib = np.arange(0, image_size[1] - window_size[1], sliding_strides[1])
ic = np.arange(0, image_size[2] - window_size[2], sliding_strides[2])
if mode == 'fill':
ia = np.append(ia, image_size[0] - window_size[0])
ib = np.append(ib, image_size[1] - window_size[1])
ic =
|
np.append(ic, image_size[2] - window_size[2])
|
numpy.append
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import unittest
import numpy as np
import pandas as pd
from os.path import join, dirname
from climateeconomics.sos_processes.iam.witness_wo_energy.datacase_witness_wo_energy import DataStudy
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from tempfile import gettempdir
from copy import deepcopy
from gemseo.utils.compare_data_manager_tooling import delete_keys_from_dict,\
compare_dict
from energy_models.core.stream_type.resources_models.resource_glossary import ResourceGlossary
from scipy.interpolate.interpolate import interp1d
class DICEParallelTest(unittest.TestCase):
RESSOURCE_CO2 = ResourceGlossary.CO2['name']
def setUp(self):
self.name = 'Test'
self.root_dir = gettempdir()
self.ee = ExecutionEngine(self.name)
def test_01_exec_parallel(self):
"""
1 proc
"""
n_proc = 1
repo = 'climateeconomics.sos_processes.iam'
builder = self.ee.factory.get_builder_from_process(
repo, 'witness_wo_energy')
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
usecase = DataStudy()
usecase.study_name = self.name
values_dict = {}
years =
|
np.arange(2020, 2101, 1)
|
numpy.arange
|
#https://qiita.com/yu4u/items/70aa007346ec73b7ff05
import numpy as np
class MixupGenerator():
def __init__(self, x, y, batch_size=32, mix_num=2, alpha=0.2):
self.x = x
self.y = y
self.batch_size = batch_size
self.alpha = alpha
self.mix_num = mix_num
#
self.__sample_num = len(self.x)
self.__dirichlet_alpha = np.ones(self.mix_num) * self.alpha
return
def flow(self):
while True:
indexes = self.get_indexes()
itr_num = int(np.ceil(self.__sample_num / self.batch_size))
for i in range(itr_num):
batch_indxs = indexes[:, i*self.batch_size : (i+1)*self.batch_size]
x, y = self.mixup(self.x[batch_idxs], self.y[batch_idxs])
yield x, y
def mixup(self, batch_x, batch_y):
'''
return mixuped_x, mixuped_y.
batch_x = self.x[batch_idxs], batch_y = self.y[batch_idxs]
batch_idxs =
[
[idx(0), idx(1), ..., idx(batch_size)], # indexes of mixed no. 1
[idx(0), idx(1), ..., idx(batch_size)], # indexes of mixed no. 2
...,
[idx(0), idx(1), ..., idx(batch_size)] # indexes of mixed no. mix_num
].
idx(k)s of mixed no.1, 2, ..., mix_num are mixed.
'''
mix_num = batch_x.shape[0]
batch_size = batch_x.shape[1]
#mixed_x[k,:,:,...] = batch_x[0,k,:,:,...] * mixup_rate[0,k] + batch_x[1,k,:,:,...] * mixup_rate[1,k] + ... + batch_x[mix_num,k,:,:,...] * mixup_rate[mix_num,k]
#mixed_y[k,:,:,...] = batch_y[0,k,:,:,...] * mixup_rate[0,k] + batch_y[1,k,:,:,...] * mixup_rate[1,k] + ... + batch_y[mix_num,k,:,:,...] * mixup_rate[mix_num,k]
mixup_rate = np.random.dirichlet(alpha=self.__dirichlet_alpha, size=(batch_size))
mixup_rate_tr = np.transpose(mixup_rate)
reshapelist__mix_rate_tr_x = [mix_num, batch_size] + [1]*(len(batch_x.shape) - 2)
reshapelist__mix_rate_tr_y = [mix_num, batch_size] + [1]*(len(batch_y.shape) - 2)
mixup_rate_tr_x = np.reshape(mixup_rate_tr, reshapelist__mix_rate_tr_x)
mixup_rate_tr_y = np.reshape(mixup_rate_tr, reshapelist__mix_rate_tr_y)
#
mixuped_x = np.sum(batch_x * mixup_rate_tr_x, axis=0)
mixuped_y = np.sum(batch_y * mixup_rate_tr_y, axis=0)
return mixuped_x, mixuped_y
def get_indexes(self):
'''
return indexes.
indexes =
[
[shuffled [0, 1,.., sample_num]], #indexes of mixed no. 1
[shuffled [0, 1,.., sample_num]], #indexes of mixed no. 2
...,
[shuffled [0, 1,.., sample_num]], #indexes of mixed no. mix_num
]
'''
indexes = np.ones((self.mix_num, self.__sample_num), dtype='int') * np.arange(self.__sample_num)
for i in range(self.mix_num):
np.random.shuffle(indexes[i,:])
return indexes
from keras.utils import Sequence
from keras.preprocessing.image import ImageDataGenerator
import scipy.stats as scst
class MixupSequence(Sequence):
def __init__(self, x, y, batch_size=32, mix_num=2, alpha=0.2):
self.x = x
self.y = y
self.batch_size = batch_size
self.alpha = alpha
self.mix_num = mix_num
#
self.__sample_num = len(self.x)
self.__dirichlet_alpha = np.ones(self.mix_num) * self.alpha
#
self.__shuffuled_idxes = self.get_indexes()
return
def __len__(self):
return int(np.ceil(len(self.x) / self.batch_size))
def __getitem__(self, idx):
batch_x, batch_y = self.get_next_batch(idx)
mixed_x, mixed_y = self.mixup(batch_x, batch_y)
return mixed_x, mixed_y
def on_epoch_end(self):
self.__shuffuled_idxes = self.get_indexes()
return
def mixup(self, batch_x, batch_y):
'''
return mixuped_x, mixuped_y.
batch_x = self.x[batch_idxs], batch_y = self.y[batch_idxs]
batch_idxs =
[
[idx(0), idx(1), ..., idx(batch_size)], # indexes of mixed no. 1
[idx(0), idx(1), ..., idx(batch_size)], # indexes of mixed no. 2
...,
[idx(0), idx(1), ..., idx(batch_size)] # indexes of mixed no. mix_num
].
idx(k)s of mixed no.1, 2, ..., mix_num are mixed.
'''
mix_num = batch_x.shape[0]
batch_size = batch_x.shape[1]
#mixed_x[k,:,:,...] = batch_x[0,k,:,:,...] * mixup_rate[0,k] + batch_x[1,k,:,:,...] * mixup_rate[1,k] + ... + batch_x[mix_num,k,:,:,...] * mixup_rate[mix_num,k]
#mixed_y[k,:,:,...] = batch_y[0,k,:,:,...] * mixup_rate[0,k] + batch_y[1,k,:,:,...] * mixup_rate[1,k] + ... + batch_y[mix_num,k,:,:,...] * mixup_rate[mix_num,k]
#np.random.dirichlet has error with small alpha.
#mixup_rate = np.random.dirichlet(alpha=self.__dirichlet_alpha, size=(batch_size))
mixup_rate = scst.dirichlet.rvs(alpha=self.__dirichlet_alpha, size=batch_size)
mixup_rate_tr = np.transpose(mixup_rate)
reshapelist__mix_rate_tr_x = [mix_num, batch_size] + [1]*(len(batch_x.shape) - 2)
reshapelist__mix_rate_tr_y = [mix_num, batch_size] + [1]*(len(batch_y.shape) - 2)
mixup_rate_tr_x = np.reshape(mixup_rate_tr, reshapelist__mix_rate_tr_x)
mixup_rate_tr_y = np.reshape(mixup_rate_tr, reshapelist__mix_rate_tr_y)
#
mixuped_x = np.sum(batch_x * mixup_rate_tr_x, axis=0)
mixuped_y = np.sum(batch_y * mixup_rate_tr_y, axis=0)
return mixuped_x, mixuped_y
def get_indexes(self):
'''
return indexes.
indexes =
[
[shuffled [0, 1,.., sample_num]], #indexes of mixed no. 1
[shuffled [0, 1,.., sample_num]], #indexes of mixed no. 2
...,
[shuffled [0, 1,.., sample_num]], #indexes of mixed no. mix_num
]
'''
indexes = np.ones((self.mix_num, self.__sample_num), dtype='int') *
|
np.arange(self.__sample_num)
|
numpy.arange
|
#Actor Critic method using softmax parametrization
import numpy as np
import gym
env=gym.make('CartPole-v1')
num_actions=2#number of available actions
num_episodes=500
fourier_order=3#change order as desired, set to 0 to use raw observations
basealpha=1e-2
observations_dim=np.shape(env.observation_space.high)[0] #the observations in the environment
rewardthresh=300 # succcesful if it balances for 300 steps
if fourier_order==0:
w=np.zeros([observations_dim,num_actions])
theta=np.random.normal(0,0.1,[pow(fourier_order+1,observations_dim),num_actions])
if fourier_order==0:
theta=np.random.rand(observations_dim,num_actions)
w=np.random.normal(0,0.1,[pow(fourier_order+1,observations_dim),num_actions])
stepcount=np.zeros([num_episodes,1])
gamma=0.99 #discount factor
zeta=0.9#bootstrapping parameter, note that lambda is keyword in python
visualize_after_steps=num_episodes-5 #visualize the last 5 runs
def createalphas(): #different alpha for different order terms of fourier
if fourier_order==0:
return np.ones([observations_dim,num_actions])*basealpha
temp=tuple([np.arange(fourier_order+1)]*observations_dim)
b=np.array(np.meshgrid(*temp)).T.reshape(-1,observations_dim)
c=np.linalg.norm(b,axis=1)
d=basealpha/c
d[0]=basealpha
d = np.expand_dims(d, axis=1)
alphavec=np.tile(d,num_actions)
alphavec=np.reshape(alphavec,(-1,num_actions))
return alphavec
alphavec=createalphas() #create the learning rate matrix
betavec=alphavec*1
def translate(value, leftMin, leftMax, rightMin, rightMax):
leftrange = leftMax - leftMin
rightrange = rightMax - rightMin
#Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / leftrange
#Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightrange)
def normalize(state):
normstate=np.empty(np.shape(state))
val1=np.array([2.5, 3.6, 0.28, 3.7]) #in case you want to set manually the limits
val=-val1
for i in range(np.shape(state)[0]):
normstate[i]=translate(state[i],val[i],val1[i],0,1)
#print (normstate)
return normstate
def computeFourierBasis(state):
if fourier_order==0: #no cosine terms at all
return normalize(state)
normstate=normalize(state)
temp=tuple([np.arange(fourier_order+1)]*observations_dim)
b=np.array(np.meshgrid(*temp)).T.reshape(-1,observations_dim)
return np.cos(np.pi*np.dot(b,normstate))
def computevalue(w,state,action): #compute value of taking some state in some state
statefeature=computeFourierBasis(curstate)
statefeaturetemp=np.tile(statefeature[:,np.newaxis],[1,num_actions])
f=np.dot(theta.T,statefeature)
f=f-np.max(f) #numerical stability
denominator=np.sum(np.exp(f))
numerator=np.exp(f)
softmax=numerator/denominator
statefeaturetemp=np.tile(statefeature[:,np.newaxis],[1,num_actions])
temp=-softmax*statefeaturetemp
temp[:,action]+=statefeature
x=np.dot(np.ravel(temp,order='F'),np.ravel(w,order='F'))
return x
def pick_action(state,theta):
state=computeFourierBasis(state)
f=np.dot(theta.T,state)
f=f-np.max(f) #for numerical stability
denominator= np.sum(np.exp(f))
numerator=np.exp(f)
val=numerator/denominator
#print(np.max(val)*100)
distribution=np.cumsum(val)# we have obtained the cumulative distribution
temp=np.random.rand(1)
action=np.digitize(temp,distribution) #picking from the probability distro'''
return int(action)
def update_critic(w,delta,e):
w= w+ delta*betavec*e
return w
def update_actor(theta,w,curstate,action):
statefeature=computeFourierBasis(curstate)
statefeaturetemp=np.tile(statefeature[:,np.newaxis],[1,num_actions])
f=np.dot(theta.T,statefeature)
f=f-np.max(f) #numerical stability
denominator=np.sum(
|
np.exp(f)
|
numpy.exp
|
import numpy as np
np.set_printoptions(suppress=True)
def compute_iou(bboxes1, bboxes2, bbox_format='cxcywh'):
if bbox_format == 'cxcywh':
# if bboxes are composed of center point xy and wh/2.
cx1, cy1, w1, h1 = np.split(bboxes1, 4, axis=1)
cx2, cy2, w2, h2 = np.split(bboxes2, 4, axis=1)
x11 = cx1 - w1 / 2
y11 = cy1 - h1 / 2
x12 = cx1 + w1 / 2
y12 = cy1 + h1 / 2
x21 = cx2 - w2 /2
y21 = cy2 - h2 /2
x22 = cx2 + w2 /2
y22 = cy2 + h2 /2
elif bbox_format == 'tlxtlywh':
# if bboxes are composed of top left point xy and wh.
tlx1, tly1, w1, h1 = np.split(bboxes1, 4, axis=1)
tlx2, tly2, w2, h2 = np.split(bboxes2, 4, axis=1)
x11 = tlx1
y11 = tly1
x12 = tlx1 + w1
y12 = tly1 + h1
x21 = tlx2
y21 = tly2
x22 = tlx2 + w2
y22 = tly2 + h2
else:
# if bboxes are composed of top left point xy and bottom right xy.
x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)
x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1)
# determine the (x, y)-coordinates of the intersection rectangle
inter_x1 = np.maximum(x11, x21.T)
inter_y1 = np.maximum(y11, y21.T)
inter_x2 = np.minimum(x12, x22.T)
inter_y2 = np.minimum(y12, y22.T)
# # compute the area of intersection rectangle
inter_area = np.maximum(inter_x2 - inter_x1 + 1 , 1e-6) * np.maximum(inter_y2 - inter_y1 + 1 , 1e-6)
# # compute the area of both the prediction and ground-truth rectangles
bboxes1_area = (x12 - x11 + 1 ) * (y12 - y11 + 1)
bboxes2_area = (x22 - x21 + 1 ) * (y22 - y21 + 1)
# compute the area of intersection rectangle
# inter_area = np.maximum(inter_x2 - inter_x1, 0) * np.maximum(inter_y2 - inter_y1, 0)
# # compute the area of both the prediction and ground-truth rectangles
# bboxes1_area = (x12 - x11) * (y12 - y11)
# bboxes2_area = (x22 - x21) * (y22 - y21)
iou = inter_area / ((bboxes1_area + bboxes2_area.T - inter_area) + 1e-5)
assert np.min(iou) + 1e-5 >= 0
return iou
def measure_tpfp(pred_bboxes, gt_bboxes, iou_threshold=0.5, bbox_format='cxcywh'):
'''
https://github.com/rafaelpadilla/Object-Detection-Metrics
위 프로젝트의 https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/master/pascalvoc.py
코드 및 voc 평가방법 참고
pred_bboxes: shape:[num_pred_bboxes, 6], class, x, y, w, h, confidence score
gt_bboxes: shape:[num_gt_bboxes, 5], class, x, y, w, h
iou_threshold: 예측된 바운딩 박스를 tp/fp로 분류하기위한 iou thereshold
return class_tp_fp_score
class_tp_fp_score: shape:[num_pred_bboxes, 4], class, tp, fp, confidence score
예측된 바운딩 박스별로 class tp/fp, confidence score 값을 기록한 table을 return 함
'''
pred_bboxes = pred_bboxes[np.argsort(pred_bboxes[:, 5])[::-1]]
# print(pred_bboxes)
# exit()
iou_per_box = np.zeros((len(pred_bboxes), 1))
tp_per_box = np.zeros((len(pred_bboxes), 1))
for c in np.unique(gt_bboxes[:, 0]):
gt_mask = gt_bboxes[:, 0] == c
pred_mask = pred_bboxes[:, 0] == c
filtered_gt_bboxes = gt_bboxes[gt_mask] # (M, )
filtered_pred_bboxes = pred_bboxes[pred_mask] # (N, )
if len(filtered_gt_bboxes) == 0 or len(filtered_pred_bboxes) == 0:
continue
filtered_iou_per_box = iou_per_box[pred_mask]
filtered_tp_per_box = tp_per_box[pred_mask]
iou_matrix = compute_iou(filtered_pred_bboxes[:, 1:5], filtered_gt_bboxes[:, 1:5], bbox_format) # (N, M)
#for iou in iou_matrix:
#print("np.sum(iou_matrix): ", np.sum(iou_matrix))
# TP 여부를 판단할때, 어떤 한 Predicted Box가 두 Ground Truth Box에 대한 검출 결과로 중복적으로 매칭될 수 있다.
# 이 경우 다른 Predicted Box가 두 Ground Truth Box중 하나의 Box에 대한 검출 결과로 매칭되고 TP가 될 가능성이 생긴다.
# 다만 VOC 는 이런 경우를 신경쓰지않음...
for i in range(len(filtered_gt_bboxes)):
iou = iou_matrix[:, i] # filtered_pred_bboxes와 i번째 filtered_gt_bbox간 IoU
matched = False
for j in range(len(iou)):
if filtered_tp_per_box[j] == 1.: # already matched
continue
if iou[j] >= iou_threshold:
if not matched:
matched = True
filtered_iou_per_box[j] = iou[j]
filtered_tp_per_box[j] = 1.
iou_per_box[pred_mask] = filtered_iou_per_box
tp_per_box[pred_mask] = filtered_tp_per_box
fp_per_box = 1 - tp_per_box
# print(pred_bboxes.shape)
# print(np.sum(tp_per_box) + np.sum(fp_per_box))
assert np.sum(tp_per_box) <= len(gt_bboxes), "Your code is wrong. The number of TP cases cannot exceed the number of ground truth boxes."
assert np.sum(tp_per_box) +
|
np.sum(fp_per_box)
|
numpy.sum
|
import os
import numpy as np
import pandas as pd
import matplotlib.cm as cm
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from astropy.io import fits
def cumulative_distribution_merra2(site, path, month_list, out_file, npix):
sites_dict = {'atacama':{'lllong':-67.9774138-0.35*npix,'lllat':-23.0755446-0.35*npix,'urlong':-67.9774138+0.35*npix,'urlat':-23.0755446+0.35*npix},
'tenerife':{'lllong':-17.0,'lllat':-27.5,'urlong':-12.9,'urlat':29.5},
'test_atc':{'lllong':-63.9774138-0.35,'lllat':-23.0755446-0.35,'urlong':-63.9774138+0.35,'urlat':-23.0755446+0.35},
'qubic': {'lllong':-66.474650-0.35*npix,'lllat':-24.191996 -0.35*npix,'urlong':-66.474650+0.35*npix,'urlat':-24.191996+0.35*npix},
'strip': {'lllong':-16.5110782-0.35*npix,'lllat':28.3003912-0.35*npix,'urlong':-16.5110782+0.35*npix,'urlat':28.3003912+0.35*npix}}
lllong = sites_dict[site]['lllong']
lllat = sites_dict[site]['lllat']
urlong = sites_dict[site]['urlong']
urlat = sites_dict[site]['urlat']
month_hdul = fits.HDUList()
mese = 0
for month in month_list:
print("Mese:", month)
df_tqi = pd.DataFrame()
df_tql = pd.DataFrame()
df_tqv = pd.DataFrame()
df_qv10m = pd.DataFrame()
df_ps = pd.DataFrame()
df_ts = pd.DataFrame()
df_t10m = pd.DataFrame()
df_u10m = pd.DataFrame()
df_v10m = pd.DataFrame()
file_list = os.listdir(month)
for day in file_list:
# print("GIorno: ", day)
data = Dataset(path+month+"/"+day, mode='r')
lons = data.variables['lon'][:]
lats = data.variables['lat'][:]
lon, lat = np.meshgrid(lons, lats)
idx = np.where((lon[1, :] < urlong) & (lon[1, :] > lllong))
idy = np.where((lat[:, 1] < urlat) & (lat[:, 1] > lllat))
WV_l_tqi = data.variables["TQI"][:, np.amin(idy):np.amax(idy)+1, np.amin(idx):np.amax(idx)+1]
WV_l_tql = data.variables["TQL"][:, np.amin(idy):np.amax(idy)+1, np.amin(idx):np.amax(idx)+1]
WV_l_tqv = data.variables["TQV"][:, np.amin(idy):np.amax(idy)+1, np.amin(idx):np.amax(idx)+1]
WV_l_qv10m = data.variables["QV10M"][:, np.amin(idy):np.amax(idy)+1, np.amin(idx):np.amax(idx)+1]
WV_l_ps = data.variables["PS"][:, np.amin(idy):np.amax(idy)+1, np.amin(idx):np.amax(idx)+1]
WV_l_ts = data.variables["TS"][:,
|
np.amin(idy)
|
numpy.amin
|
import numpy as np
## Polynomial Model from <NAME> (Rix+ 2017) ##
datadir = '/geir_data/scr/price-jones/Code/synspec/data/'
psminfo = np.load('{0}/kurucz_quadratic_psm.npz'.format(datadir))
coeff_array = psminfo['coeff_array']
# a set of training labels
training_labels = psminfo['training_labels']
wavelength = psminfo['wavelength']
# auxiliary arrays to reconstruct the spectrum (because we need to choose a reference point to "Taylor-expand"
inds = psminfo['indices']
reference_flux = psminfo['reference_flux']
reference_point = psminfo['reference_point']
Teff,logg,vturb,ch,nh,oh,nah,mgh,alh,sih,sh,kh,cah,tih,vh,mnh,nih,feh,c12c13 = reference_point
#LABEL ORDER Teff [1000K], logg, vturb [km/s] (micro), ch, nh, oh, nah, mgh, alh, sih, sh, kh, cah,
#tih, vh, mnh, nih, feh, log10(c12c13)
#==================================================
# make generate APOGEE spectrum
def generate_spectrum(labels=None,Teff=Teff,logg=logg,vturb=vturb,ch=ch,nh=nh,
oh=oh,nah=nah,mgh=mgh,alh=alh,sih=sih,sh=sh,kh=kh,
cah=cah,tih=tih,vh=vh,mnh=mnh,nih=nih,feh=feh,
c12c13=c12c13,order=2):
if not isinstance(labels,(list,np.ndarray)):
labels = np.array([Teff,logg,vturb,ch,nh,oh,nah,mgh,alh,sih,sh,kh,cah,
tih,vh,mnh,nih,feh,c12c13])
# make quadratic labels
linear_terms = np.array(labels) - reference_point
if order == 1:
lvec = np.hstack((linear_terms))
# generate spectrum
lin_coeff = coeff_array.T[:len(linear_terms)].T
spec_generate = np.dot(lin_coeff,lvec) + reference_flux
if order == 1.5:
linind = 19
t = linear_terms[0]
g = linear_terms[1]
f = linear_terms[17]
fit_terms = np.array([t**2,t*g,t*f,g**2,g*f,f**2])
lvec = np.hstack((linear_terms,fit_terms))
coeffs = np.array([coeff_array[:,0+linind],coeff_array[:,1+linind],
coeff_array[:,17+linind],
coeff_array[:,19+linind],
coeff_array[:,35+linind],
coeff_array[:,187+linind]])
coeffs = np.concatenate((coeff_array.T[:len(linear_terms)],
coeffs)).T
spec_generate = np.dot(coeffs,lvec) + reference_flux
if order == 2:
quadratic_terms = np.einsum('i,j->ij',linear_terms,
linear_terms)[inds[:,0],inds[:,1]]
lvec = np.hstack((linear_terms, quadratic_terms))
# generate spectrum
spec_generate = np.dot(coeff_array,lvec) + reference_flux
return spec_generate
linind = 19
lin_coeff = coeff_array.T[:linind].T
quad_coeff = np.array([coeff_array[:,0+linind],coeff_array[:,19+linind],
coeff_array[:,37+linind],coeff_array[:,54+linind],
coeff_array[:,70+linind],coeff_array[:,85+linind],
coeff_array[:,99+linind],coeff_array[:,112+linind],
coeff_array[:,124+linind],coeff_array[:,135+linind],
coeff_array[:,145+linind],coeff_array[:,154+linind],
coeff_array[:,162+linind],coeff_array[:,169+linind],
coeff_array[:,175+linind],coeff_array[:,180+linind],
coeff_array[:,184+linind],coeff_array[:,187+linind],
coeff_array[:,189+linind]]).T
cross_inds = {0:np.arange(1,19)+linind, #Teff
1:np.append(np.arange(20,37),[1])+linind,#logg
2:np.append(np.arange(38,54),[2,20])+linind, #vturb
3:np.append(np.arange(55,70),[3,21,38])+linind, #ch
4:np.append(np.arange(71,85),[4,22,39,55])+linind, #nh
5:np.append(np.arange(86,99),[5,23,40,56,71])+linind, #oh
6:np.append(np.arange(100,112),[6,24,41,57,72,86])+linind, #nah
7:np.append(np.arange(113,124),[7,25,42,58,73,87,100])+linind, #mgh
8:np.append(np.arange(125,135),[8,26,43,59,74,88,101,113])+linind, #alh
9:np.append(np.arange(136,145),[9,27,44,60,75,89,102,114,125])+linind, #sih
10:np.append(np.arange(146,154),[10,28,45,61,76,90,103,115,126,136])+linind, #sh
11:np.append(np.arange(155,162),[11,29,46,62,77,91,104,116,127,137,146])+linind, #kh
12:np.append(np.arange(163,169),[12,30,47,63,78,92,105,117,128,138,147,155])+linind, #cah
13:np.append(np.arange(170,175),[13,31,48,64,79,93,106,118,129,139,148,156,163])+linind, # tih
14:np.append(np.arange(176,180),[14,32,49,65,80,94,107,119,130,140,149,157,164,170])+linind, #vh
15:np.append(np.arange(181,184),[15,33,50,66,81,95,108,118,131,141,150,158,165,171,176])+linind, #mnh
16:np.append(np.arange(185,187),[16,34,51,67,82,96,109,119,132,142,151,159,166,172,177,181])+linind, #nih
17:np.append(
|
np.arange(188,189)
|
numpy.arange
|
import cv2
import cv2.ml
import numpy as np
from sklearn import cross_validation
import pandas as pd
import os
DATA_DIR = r'C:\Users\hawker\Dropbox\Public\data_intake'
# DATA_DIR = 'C:/users/Wojtek/Dropbox/data_intake/'
def load_data(train_folder):
image_array = []
df = pd.read_csv(os.path.join(train_folder, 'steering_filtered.csv'))
df = df[df['filtered'] == True]
train_labels = df[['w', 's', 'a', 'd']].as_matrix()
train_images = df['filenames'].as_matrix()
for train_image in train_images:
image_path = os.path.join(train_folder,train_image)
pic = cv2.imread(image_path)
small = cv2.resize(pic, (0, 0), fx=0.5, fy=0.5)
#print(small[:,:,1].flatten().shape)
image_array.append(small[:, :, 1].flatten())
train =
|
np.array(image_array)
|
numpy.array
|
import numpy as np
from athena import ndarray
from athena import gpu_links as gpu_op
from athena import gpu_ops as ad
def test_batch_matmul(shape1=(7, 4, 6), shape2=(7, 6, 5), transA=False, transB=False):
executor_ctx = ndarray.gpu(1)
if transA:
shape1 = tuple(list(shape1)[:-2] + [shape1[-1], shape1[-2]])
if transB:
shape2 = tuple(list(shape2)[:-2] + [shape2[-1], shape2[-2]])
data = np.random.normal(0.0, 0.2, shape1).astype(np.float32)
weights = np.random.normal(0.0, 0.1, shape2).astype(np.float32)
ath_data = ad.Variable(name='data')
ath_weights = ad.Variable(name='weights')
ath_output = ad.batch_matmul_op(ath_data, ath_weights, trans_A=transA, trans_B=transB)
ath_grads = ad.gradients(ath_output, [ath_data, ath_weights])
executor = ad.Executor(
[ath_output] + ath_grads,
ctx=executor_ctx
)
ath_results = executor.run(feed_dict={ath_data: data, ath_weights: weights})
ath_results = [res.asnumpy() for res in ath_results]
import tensorflow as tf
tf_data = tf.placeholder(name='data', dtype=tf.float32)
tf_weights = tf.placeholder(name='weights', dtype=tf.float32)
tf_output = tf.matmul(tf_data, tf_weights, transpose_a=transA, transpose_b=transB)
tf_grads = tf.gradients(tf_output, [tf_data, tf_weights])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_results = sess.run([tf_output] + tf_grads, feed_dict={tf_data: data, tf_weights: weights})
np.testing.assert_allclose(ath_results[0], tf_results[0], atol=1e-6)
np.testing.assert_allclose(ath_results[1], tf_results[1], atol=1e-6)
np.testing.assert_allclose(ath_results[2], tf_results[2], atol=1e-6)
print('Pass batch matmul op test with shape ', shape1, shape2)
test_batch_matmul()
test_batch_matmul(transA=True)
test_batch_matmul(transB=True)
test_batch_matmul(transA=True, transB=True)
test_batch_matmul(shape1=(11, 3, 23, 17), shape2=(11, 3, 17, 13))
test_batch_matmul(shape1=(11, 3, 23, 17), shape2=(11, 3, 17, 13), transA=True)
test_batch_matmul(shape1=(11, 3, 23, 17), shape2=(11, 3, 17, 13), transB=True)
test_batch_matmul(shape1=(11, 3, 23, 17), shape2=(11, 3, 17, 13), transA=True, transB=True)
def test_broadcast(shape1=(3, 1), shape2=(2, 3, 4)):
ctx = ndarray.gpu(1)
x = np.random.random(shape1).astype(np.float32)
ath_x = ad.Variable(name='x', value=x)
ath_y = ad.broadcast_shape_op(ath_x, shape2)
ath_grad = ad.gradients(ath_y, [ath_x])[0]
executor = ad.Executor([ath_y, ath_grad], ctx=ctx)
ath_results = [var.asnumpy() for var in executor.run()]
import tensorflow as tf
tf_x = tf.convert_to_tensor(x)
tf_y = tf.broadcast_to(tf_x, shape2)
tf_grad = tf.gradients(tf_y, tf_x)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_results = sess.run([tf_y, tf_grad])
np.testing.assert_allclose(ath_results[0], tf_results[0])
np.testing.assert_allclose(ath_results[1], np.reshape(tf_results[1], ath_results[1].shape))
print('Passed broadcast shape op test with shape ', shape1, shape2)
test_broadcast()
test_broadcast((1,), (2, 3, 4, 5))
test_broadcast((1, 1, 3, 1), (9, 8, 3, 7))
def test_reduce_sum(shape=(2, 3, 4), axes=[2]):
ctx = ndarray.gpu(1)
x = np.random.random(shape).astype(np.float32)
ath_x = ad.Variable(name='x', value=x)
ath_y = ad.reduce_sum_op(ath_x, axes, keepdims=False)
ath_grad = ad.gradients(ath_y, [ath_x])[0]
executor = ad.Executor([ath_y, ath_grad], ctx=ctx)
ath_results = [var.asnumpy() for var in executor.run()]
import tensorflow as tf
tf_x = tf.convert_to_tensor(x)
tf_y = tf.reduce_sum(tf_x, axes)
tf_grad = tf.gradients(tf_y, tf_x)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_results = sess.run([tf_y, tf_grad])
np.testing.assert_allclose(ath_results[0], np.reshape(tf_results[0], ath_results[0].shape), rtol=1e-6)
np.testing.assert_allclose(ath_results[1], np.reshape(tf_results[1], ath_results[1].shape), rtol=1e-6)
print('Passed reduce sum op test with shape and axes ', shape, axes)
test_reduce_sum()
test_reduce_sum((2, 3, 4), [2, 1])
test_reduce_sum((2, 3, 4), [2, 1, 0])
test_reduce_sum((2, 3, 1, 5, 6), [1, 2, 4])
def test_reduce_mean(shape=(2, 3, 4), axes=[2]):
ctx = ndarray.gpu(1)
x = np.random.random(shape).astype(np.float32)
ath_x = ad.Variable(name='x', value=x)
ath_y = ad.reduce_mean_op(ath_x, axes, keepdims=False)
ath_grad = ad.gradients(ath_y, [ath_x])[0]
executor = ad.Executor([ath_y, ath_grad], ctx=ctx)
ath_results = [var.asnumpy() for var in executor.run()]
import tensorflow as tf
tf_x = tf.convert_to_tensor(x)
tf_y = tf.reduce_mean(tf_x, axes)
tf_grad = tf.gradients(tf_y, tf_x)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_results = sess.run([tf_y, tf_grad])
np.testing.assert_allclose(ath_results[0], np.reshape(tf_results[0], ath_results[0].shape), rtol=1e-6)
np.testing.assert_allclose(ath_results[1], np.reshape(tf_results[1], ath_results[1].shape), rtol=1e-6)
print('Passed reduce mean op test with shape and axes ', shape, axes)
test_reduce_mean()
test_reduce_mean((2, 3, 4), [2, 1])
test_reduce_mean((2, 3, 4), [2, 1, 0])
test_reduce_mean((2, 3, 1, 5, 6), [1, 2, 4])
def test_layernorm_forward(shape=(5, 3)):
ctx = ndarray.gpu(1)
# shape = (5, 3)
last_dim = shape[-1]
x = np.random.random(shape).astype(np.float32)
scale = np.random.random((last_dim,)).astype(np.float32)
bias = np.random.random((last_dim,)).astype(np.float32)
arr_x = ndarray.array(x, ctx=ctx)
arr_scale = ndarray.array(scale, ctx=ctx)
arr_bias = ndarray.array(bias, ctx=ctx)
arr_mean = ndarray.empty(list(shape[:-1]) + [1], ctx=ctx)
arr_var = ndarray.empty(list(shape[:-1]) + [1], ctx=ctx)
arr_y = ndarray.empty((shape), ctx=ctx)
gpu_op.layer_normalization(arr_x, arr_scale, arr_bias, arr_mean, arr_var, arr_y, 0.01)
y = arr_y.asnumpy()
np_means = x.mean(axis=-1, dtype=np.float32, keepdims=True)
np_vars = x.var(axis=-1, dtype=np.float32, keepdims=True)
std = np.sqrt(np_vars + 0.01, dtype=np.float32)
centered_input = x - np_means
normed_input = centered_input / std
bc_shape = [1] * len(x.shape)
bc_shape[-1] = x.shape[-1]
y_ = scale.reshape(bc_shape) * normed_input + \
bias.reshape(bc_shape)
np.testing.assert_allclose(np_means, arr_mean.asnumpy(), atol=1e-6)
np.testing.assert_allclose(np_vars, arr_var.asnumpy(), atol=1e-6)
np.testing.assert_allclose(y_, y, atol=1e-6)
print('Pass forward test with shape ', shape)
# test_layernorm_forward()
# test_layernorm_forward(shape=(4, 500, 67))
# test_layernorm_forward(shape=(2, 3, 5, 7, 11))
def test_layernorm_backward(shape=(5, 3)):
ctx = ndarray.gpu(1)
# shape = (5, 3)
last_dim = shape[-1]
grads = np.random.random(shape).astype(np.float32)
x = np.random.random(shape).astype(np.float32)
scale = np.random.random((last_dim,)).astype(np.float32)
mean = np.random.random(list(shape[:-1])+[1]).astype(np.float32)
var = np.random.random(list(shape[:-1])+[1]).astype(np.float32)
arr_grads = ndarray.array(grads, ctx=ctx)
arr_x = ndarray.array(x, ctx=ctx)
arr_scale = ndarray.array(scale, ctx=ctx)
arr_mean = ndarray.array(mean, ctx=ctx)
arr_var = ndarray.array(var, ctx=ctx)
grad_inarr = ndarray.empty(shape, ctx=ctx)
grad_scale = ndarray.empty((last_dim,), ctx=ctx)
grad_bias = ndarray.empty((last_dim,), ctx=ctx)
gpu_op.layer_normalization_gradient(arr_grads, arr_x, arr_scale,
grad_inarr, grad_scale, grad_bias, arr_mean, arr_var, 0.01)
# numpy calculate phase
red_axis = tuple(range(grads.ndim-1))
np_grad_bias = grads.sum(red_axis) # (X,)
std = np.sqrt(var + 0.01) # (N, 1)
x_centered = x - mean # (N, X)
x_norm = x_centered / std # (N, X)
np_grad_scale = (grads * x_norm).sum(red_axis) # (X,)
last_dim = x.shape[-1]
dx_norm = grads * scale.reshape([1] * (grads.ndim - 1) + [-1]) # (N, X)
dvar = (dx_norm * x_centered).sum(axis=-1, keepdims=True) * -0.5 / (var + 0.01) / std # (N, 1)
dx_mu_1 = dx_norm / std # (N, X)
dx_mu_2 = dvar * 2 * x_centered / last_dim # (N, X)
dx_1 = dx_mu_1 + dx_mu_2 # (N, X)
dx_2 = -1 * dx_1.sum(axis=-1, keepdims=True) / last_dim # (N, 1)
np_grad_inarr = dx_1 + dx_2 # (N, X)
np.testing.assert_allclose(np_grad_bias, grad_bias.asnumpy(), rtol=1e-4, atol=1e-4)
np.testing.assert_allclose(np_grad_scale, grad_scale.asnumpy(), rtol=1e-4, atol=1e-4)
np.testing.assert_allclose(np_grad_inarr, grad_inarr.asnumpy(), rtol=1e-4, atol=1e-4)
print('Pass backward test with shape ', shape)
# test_layernorm_backward()
# test_layernorm_backward(shape=(4, 500, 67))
# test_layernorm_backward(shape=(2, 3, 5, 7, 11))
def test_layer_norm_op(shape = (5, 3)):
# scale = np.random.random((shape[-1],)).astype(np.float32)
# bias = np.random.random((shape[-1],)).astype(np.float32)
scale = np.ones((shape[-1], )).astype(np.float32)
bias = np.zeros((shape[-1], )).astype(np.float32)
scale_data = ad.Variable(name='layer_norm_scale', value=scale)
bias_data = ad.Variable(name='layer_norm_biad', value=bias)
input_data = ad.Variable(name='input')
output = ad.layer_normalization_op(input_data, scale_data, bias_data, 1e-12)
grads = ad.gradients(output, [scale_data, bias_data, input_data])
executor_ctx = ndarray.gpu(1)
executor = ad.Executor(
[output]+grads,
ctx=executor_ctx)
x = np.random.normal(loc=0.0, scale=1, size=shape).astype(np.float32)
results = executor.run(feed_dict={input_data: x})
y = results[0].asnumpy()
grad_scale = results[1].asnumpy()
grad_bias = results[2].asnumpy()
grad_input = results[3].asnumpy()
# print(y)
np_means = x.mean(axis=-1, dtype=np.float32, keepdims=True)
np_vars = x.var(axis=-1, dtype=np.float32, keepdims=True)
std = np.sqrt(np_vars + 1e-12, dtype=np.float32)
centered_input = x - np_means
normed_input = centered_input / std
bc_shape = [1] * len(x.shape)
bc_shape[-1] = x.shape[-1]
y_ = scale.reshape(bc_shape) * normed_input + \
bias.reshape(bc_shape)
np.testing.assert_allclose(y_, y, atol=1e-6)
# print(y_)
prev_grad = np.ones(y_.shape).astype(np.float32)
red_axis = tuple(range(prev_grad.ndim-1))
np_grad_bias = prev_grad.sum(red_axis) # (X,)
std =
|
np.sqrt(np_vars + 1e-12)
|
numpy.sqrt
|
import smart
import numpy as np
def _residual(data,model):
"""
Return a residual flux array with the length of the data. (deprecated)
"""
residual = []
# find the region where the model is in the range of the data
data_model_range = np.where(np.logical_and(np.array(model.wave) >= data.wave[0], \
np.array(model.wave) <= data.wave[-1]))[0]
#residual = np.zeros(len(data_model_range))
for i in data_model_range:
model_wave = model.wave[i]
j = np.isclose(
|
np.array(data.wave)
|
numpy.array
|
import functools
import unittest
import numpy as np
import jax.numpy as jnp
from timemachine.lib import custom_ops
from timemachine.potentials import bonded
from timemachine.potentials import nonbonded
import jax
import jax.ops
from jax.config import config; config.update("jax_enable_x64", True)
# testing thermodynamic integration
class ReferenceLangevin():
def __init__(self, dt, ca, cb, cc):
self.dt = dt
self.coeff_a = ca
self.coeff_bs = cb
self.coeff_cs = cc
def step(self, x_t, v_t, dE_dx):
noise = np.random.normal(size=(x_t.shape[0], x_t.shape[1]))
v_t_1 = self.coeff_a*v_t - jnp.expand_dims(self.coeff_bs, axis=-1)*dE_dx + jnp.expand_dims(self.coeff_cs, axis=-1)*noise
x_t_1 = x_t + v_t_1*self.dt
final_X = jnp.concatenate([x_t_1[:, :3], x_t[:, 3:]], axis=1)
final_V = jnp.concatenate([v_t_1[:, :3], v_t[:, 3:]], axis=1)
return final_X, final_V
def symmetrize(a, num_atoms, num_dims):
a = a.reshape(num_atoms*ndims, num_atoms*ndims)
a = np.tril(a)
a = a + a.T - np.diag(a.diagonal())
return a.reshape(num_atoms, ndims, num_atoms, ndims)
def compute_d2u_dldp(energies, params, xs, dx_dps, dp_idxs, num_host_atoms):
assert len(xs.shape) == 2
N = xs.shape[0]
D = xs.shape[1]
assert len(dx_dps.shape) == 3
mixed_partials = []
hessians = []
# we need to compute this separately since the context's sgemm call overwrites
# the values of d2u_dxdp
# batched call
for p in energies:
_, _, ph, _, pmp = p.derivatives(np.expand_dims(xs, axis=0), params, dp_idxs)
mixed_partials.append(pmp)
#DEBUG LOWER/UPPER
np.testing.assert_almost_equal(symmetrize(np.triu(ph), N, D), symmetrize(np.tril(ph), N, D))
hessians.append(ph)
hessians = np.sum(hessians, axis=0)[0]
#DEBUG LOWER/UPPER
np.testing.assert_almost_equal(symmetrize(np.triu(hessians)), symmetrize(np.tril(hessians)))
# assert 0
mixed_part = np.sum(mixed_partials, axis=0)[0]
hess_idxs = jax.ops.index[num_host_atoms:, 3:, :, :3]
dx_dp_idxs = jax.ops.index[:, :, :3]
mp_idxs = jax.ops.index[:, num_host_atoms:, 3:]
lhs = np.einsum('ijkl,mkl->mij', hessians[hess_idxs], dx_dps[dx_dp_idxs]) # correct only up to main hessian
rhs = mixed_part[mp_idxs]
# lhs + rhs has shape [P, num_atoms-num_host_atoms, 1]
d2u_dldp = np.sum(lhs+rhs, axis=(1,2)) # P N 4 -> P
return d2u_dldp
class TestTI(unittest.TestCase):
def test_du_dlamba(self):
param_idxs = np.array([
[0, 3],
[1, 2],
[1, 2],
[1, 2],
[1, 2]], dtype=np.int32)
scale_matrix = np.array([
[ 0, 0, 1,0.5, 0],
[ 0, 0, 0, 1, 1],
[ 1, 0, 0, 0,0.2],
[0.5, 1, 0, 0, 1],
[ 0, 1,0.2, 1, 0],
], dtype=np.float64)
params = np.array([3.0, 2.0, 1.0, 1.4], dtype=np.float64)
energy_fn = functools.partial(nonbonded.lennard_jones,
scale_matrix=scale_matrix,
param_idxs=param_idxs,
box=None,
cutoff=None)
def potential_lambda(l, pp):
# extra_dim = np.zeros(5)
# extra_dim[3:, 4] = l
x0 = jnp.array([
[ 1.0, 0.5, -0.5, 0.0],
[ 0.2, 0.1, -0.3, 0.0],
[ 0.5, 0.4, 0.3, 0.0],
[-1.1, -0.5, -0.3, l],
[ 0.7, -0.2, 2.3, l],
], dtype=np.float64)
return energy_fn(x0, pp)
lamb = 0.1
x0 = jnp.array([
[ 1.0, 0.5, -0.5, 0.0],
[ 0.2, 0.1, -0.3, 0.0],
[ 0.5, 0.4, 0.3, 0.0],
[-1.1, -0.5, -0.3, lamb],
[ 0.7, -0.2, 2.3, lamb],
], dtype=np.float64)
dudl_fn = jax.grad(potential_lambda, argnums=(0,))
ref_val = dudl_fn(lamb, params)
# d2u_dldp = jax.jacfwd(dudl_fn, argnums=(1,))
dudx = jax.grad(energy_fn, argnums=(0,))
test_grads = dudx(x0, params)[0]
test_val = np.sum(test_grads[3:, 3:])
np.testing.assert_almost_equal(ref_val, test_val)
def test_4d_ti(self):
# first three atoms are bonded
# remaining two particle are nonbonded
masses = np.array([1.0, 12.0, 4.0, 5.0, 5.0])
x0 = np.array([
[ 1.0, 0.5, -0.5, 0.0],
[ 0.2, 0.1, -0.3, 0.0],
[ 0.5, 0.4, 0.3, 0.0],
[-1.1, -0.5, -0.3, 0.7],
[ 0.7, -0.2, 2.3, 0.7],
], dtype=np.float64)
x0.setflags(write=False)
np.random.seed(1337)
v0 = np.random.rand(x0.shape[0], x0.shape[1])
v0[:, -1] = 0 # this needs to be set else the velocities will probably a small delta in the 4th dimension
num_atoms = x0.shape[0]
# bond bond angle angle lj lj lj lj q q
params = np.array([100.0, 2.0, 75.0, 1.81, 3.0, 2.0, 1.0, 1.4, 0.21, -0.12], np.float64)
# bond_params = np.array([100.0, 2.0], dtype=np.float64)
bond_idxs =
|
np.array([[0, 1], [1, 2], [3, 4]], dtype=np.int32)
|
numpy.array
|
# t-leNet model: t-leNet + WW
import tensorflow.keras as keras
import tensorflow as tf
import numpy as np
import time
from utils.utils import save_logs_t_leNet as save_logs
from utils.utils import calculate_metrics
class Classifier_TLENET:
def __init__(self, output_directory, input_shape, nb_classes,verbose=0,build=True):
self.output_directory = output_directory
self.verbose = verbose
self.warping_ratios = [0.5,1,2]
self.slice_ratio = 0.1
self.model = self.build_model(input_shape, nb_classes)
def slice_data(self, data_x, data_y, length_sliced):
n = data_x.shape[0]
length = data_x.shape[1]
n_dim = data_x.shape[2] # for MTS
nb_classes = data_y.shape[1]
increase_num = length - length_sliced + 1 #if increase_num =5, it means one ori becomes 5 new instances.
n_sliced = n * increase_num
print((n_sliced, length_sliced,n_dim))
new_x = np.zeros((n_sliced, length_sliced,n_dim))
new_y = np.zeros((n_sliced,nb_classes))
for i in range(n):
for j in range(increase_num):
new_x[i * increase_num + j, :,:] = data_x[i,j : j + length_sliced,:]
new_y[i * increase_num + j] = np.int_(data_y[i].astype(np.float32))
return new_x, new_y, increase_num
def window_warping(self, data_x, warping_ratio):
num_x = data_x.shape[0]
len_x = data_x.shape[1]
dim_x = data_x.shape[2]
x = np.arange(0,len_x,warping_ratio)
xp = np.arange(0,len_x)
new_length = len(np.interp(x,xp,data_x[0,:,0]))
warped_series = np.zeros((num_x,new_length,dim_x),dtype=np.float64)
for i in range(num_x):
for j in range(dim_x):
warped_series[i,:,j] =
|
np.interp(x,xp,data_x[i,:,j])
|
numpy.interp
|
import scipy
import scipy.sparse
import numpy as np
import copy
def util_convert_csr_to_dds(adj_scipy_csr, num_col_partitions):
"""Doing src vertex partitioning (column dimension) by converting csr to dds (dense-dense-sparse).
Parameters
----------
adj_scipy_csr : scipy.sparse.csr_matrix
The input matrix to be partitioned
num_col_partitions : int
Number of partitions along the column dimension
Returns
-------
s1_pos : numpy.array, dtype is int32
1-D with shape [num_col_partitions * (num_rows + 1)]
s1_idx : numpy.array, dtype is int32
1-D with shape [nnz]
vals : numpy.array, dtype is float32
1-D with shape [nnz]
"""
num_rows = adj_scipy_csr.shape[0]
num_cols = adj_scipy_csr.shape[1]
adj_data = adj_scipy_csr.data
adj_indices = adj_scipy_csr.indices
adj_indptr = adj_scipy_csr.indptr
d1_size = num_col_partitions
d2_size = adj_indptr.shape[0]
s1_pos = np.zeros(shape=(d1_size*d2_size), dtype=adj_indptr.dtype)
s1_idx = np.zeros(shape=adj_indices.shape, dtype=adj_indices.dtype)
vals =
|
np.zeros(shape=adj_data.shape, dtype=adj_data.dtype)
|
numpy.zeros
|
# License: BSD 3 clause
import numpy as np
from tick.base_model import ModelFirstOrder, ModelLipschitz
from .build.survival import ModelSCCS as _ModelSCCS
from tick.preprocessing.utils import check_longitudinal_features_consistency, \
check_censoring_consistency
class ModelSCCS(ModelFirstOrder, ModelLipschitz):
"""Discrete-time Self Control Case Series (SCCS) likelihood. This class
provides first order information (gradient and loss) model.
Parameters
----------
n_intervals : `int`
Number of time intervals observed for each sample.
n_lags : `numpy.ndarray`, shape=(n_features,), dtype="uint64"
Number of lags per feature. The model will regress labels on the last
observed values of the features over the corresponding `n_lags` time
intervals. `n_lags` values must be between 0 and `n_intervals` - 1.
Attributes
----------
features : `list` of `numpy.ndarray` or `list` of `scipy.sparse.csr_matrix`,
list of length n_cases, each element of the list of
shape=(n_intervals, n_features)
The list of features matrices.
labels : `list` of `numpy.ndarray`,
list of length n_cases, each element of the list of
shape=(n_intervals,)
The labels vector
censoring : `numpy.ndarray`, shape=(n_cases,), dtype="uint64"
The censoring data. This array should contain integers in
[1, n_intervals]. If the value i is equal to n_intervals, then there
is no censoring for sample i. If censoring = c < n_intervals, then
the observation of sample i is stopped at interval c, that is, the
row c - 1 of the corresponding matrix. The last n_intervals - c rows
are then set to 0.
n_cases : `int` (read-only)
Number of samples
n_features : `int` (read-only)
Number of features
n_coeffs : `int` (read-only)
Total number of coefficients of the model
"""
_const_attr = [
"labels", "features", "censoring", "n_features", "n_cases", "n_lags",
"n_intervals"
]
_attrinfos = {key: {'writable': False} for key in _const_attr}
def __init__(self, n_intervals: int, n_lags: np.array):
ModelFirstOrder.__init__(self)
ModelLipschitz.__init__(self)
self.n_intervals = n_intervals
self.n_features = len(n_lags)
self.n_lags = n_lags
for n_l in n_lags:
if n_l >= n_intervals:
raise ValueError("n_lags should be < n_intervals")
self.labels = None
self.features = None
self.censoring = None
self.n_cases = None
def fit(self, features, labels, censoring=None):
"""Set the data into the model object.
Parameters
----------
features : List[{2d array, csr matrix containing float64
of shape (n_intervals, n_features)}]
The features matrix
labels : List[{1d array, csr matrix of shape (n_intervals,)]
The labels vector
censoring : 1d array of shape (n_cases,)
The censoring vector
Returns
-------
output : `ModelSCCS`
The current instance with given data
"""
ModelFirstOrder.fit(self, features, labels, censoring)
ModelLipschitz.fit(self, features, labels)
self._set(
"_model",
_ModelSCCS(features=self.features, labels=self.labels,
censoring=self.censoring, n_lags=self.n_lags))
self.dtype = features[0].dtype
return self
def _set_data(self, features, labels, censoring):
"""Set the data to the model.
Parameters
----------
features : `list` of `numpy.ndarray` or `list` of `scipy.sparse.csr_matrix`,
list of length n_cases, each element of the list of
shape=(n_intervals, n_features)
The list of features matrices.
labels : `list` of `numpy.ndarray`,
list of length n_cases, each element of the list of
shape=(n_intervals,)
The labels vector
censoring : `numpy.ndarray`, shape=(n_cases,), dtype="uint64"
The censoring data. This array should contain integers in
[1, n_intervals]. If the value i is equal to n_intervals, then there
is no censoring for sample i. If censoring = c < n_intervals, then
the observation of sample i is stopped at interval c, that is, the
row c - 1 of the correponding matrix. The last n_intervals - c rows
are then set to 0.
"""
n_intervals, n_coeffs = features[0].shape
n_lags = self.n_lags
self._set("n_intervals", n_intervals)
self._set("n_coeffs", n_coeffs)
# TODO: implement checker as outside function
# if n_lags > 0 and n_coeffs % (n_lags + 1) != 0:
# raise ValueError("(n_lags + 1) should be a divisor of n_coeffs")
# else:
# self._set("n_features", int(n_coeffs / (n_lags + 1)))
self._set("n_cases", len(features))
if len(labels) != self.n_cases:
raise ValueError("Features and labels lists should have the same\
length.")
if censoring is None:
censoring =
|
np.full(self.n_cases, self.n_intervals, dtype="uint64")
|
numpy.full
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy.io
import sklearn
import sklearn.datasets
def sigmoid(x):
s = 1/(1+np.exp(-x))
return s
def relu(x):
s = np.maximum(0,x)
return s
def softmax(Z):
S = np.exp(Z + np.max(Z)) / np.sum(np.exp(Z + np.max(Z)), axis = 0)
return S
def forward_prop(X, parameters):
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
z1 =
|
np.dot(W1, X)
|
numpy.dot
|
"""Robust Adversarial Reinforcement Learning using Adversarial Populations (RAP)
References papers & code:
* [Robust Adversarial Reinforcement Learning](https://arxiv.org/abs/1703.02702)
* [Robust Reinforcement Learning using Adversarial Populations](https://arxiv.org/abs/2008.01825)
* [robust-adversarial-rl](https://github.com/jerinphilip/robust-adversarial-rl)
* [rllab-adv](https://github.com/lerrel/rllab-adv)
* [Robust Reinforcement Learning via adversary pools](https://github.com/eugenevinitsky/robust_RL_multi_adversary)
Example:
train on cartpole_adversary::
$ python tests/test_main.py --mode train_two_phase --exp_id rap_cartpole_adv \
--algo rap --task cartpole_adversary --num_workers 2 --max_env_steps 2000000 \
--tensorboard --use_gae --num_adversaries 2
Todo:
*
"""
import os
import time
import numpy as np
import torch
from collections import defaultdict
from safe_control_gym.utils.logging import ExperimentLogger
from safe_control_gym.utils.utils import get_random_state, set_random_state, is_wrapped
from safe_control_gym.envs.env_wrappers.vectorized_env import make_vec_envs
from safe_control_gym.envs.env_wrappers.record_episode_statistics import RecordEpisodeStatistics, VecRecordEpisodeStatistics
from safe_control_gym.math_and_models.normalization import BaseNormalizer, MeanStdNormalizer, RewardStdNormalizer
from safe_control_gym.controllers.base_controller import BaseController
from safe_control_gym.controllers.ppo.ppo_utils import PPOAgent, PPOBuffer, compute_returns_and_advantages
from safe_control_gym.controllers.rarl.rarl_utils import split_obs_by_adversary
class RAP(BaseController):
"""rarl via adersarial population with PPO."""
def __init__(self,
env_func,
training=True,
checkpoint_path="model_latest.pt",
output_dir="temp",
use_gpu=False,
seed=0,
**kwargs):
super().__init__(env_func, training, checkpoint_path, output_dir, use_gpu, seed, **kwargs)
# task
if self.training:
# training (+ evaluation)
self.env = make_vec_envs(env_func, None, self.rollout_batch_size, self.num_workers, seed)
self.env = VecRecordEpisodeStatistics(self.env, self.deque_size)
self.eval_env = env_func(seed=seed * 111)
self.eval_env = RecordEpisodeStatistics(self.eval_env, self.deque_size)
else:
# testing only
self.env = env_func()
self.env = RecordEpisodeStatistics(self.env)
# protagonist and adversary agents
shared_agent_args = dict(hidden_dim=self.hidden_dim,
use_clipped_value=self.use_clipped_value,
clip_param=self.clip_param,
target_kl=self.target_kl,
entropy_coef=self.entropy_coef,
actor_lr=self.actor_lr,
critic_lr=self.critic_lr,
opt_epochs=self.opt_epochs,
mini_batch_size=self.mini_batch_size)
self.agent = PPOAgent(self.env.observation_space, self.env.action_space, **shared_agent_args)
self.agent.to(self.device)
# fetch adversary specs from env
if self.training:
self.adv_obs_space = self.env.get_attr("adversary_observation_space")[0]
self.adv_act_space = self.env.get_attr("adversary_action_space")[0]
else:
self.adv_obs_space = self.env.adversary_observation_space
self.adv_act_space = self.env.adversary_action_space
self.adversaries = [PPOAgent(self.adv_obs_space, self.adv_act_space, **shared_agent_args) for _ in range(self.num_adversaries)]
for adv in self.adversaries:
adv.to(self.device)
# pre-/post-processing
self.obs_normalizer = BaseNormalizer()
if self.norm_obs:
self.obs_normalizer = MeanStdNormalizer(shape=self.env.observation_space.shape, clip=self.clip_obs, epsilon=1e-8)
self.reward_normalizer = BaseNormalizer()
if self.norm_reward:
self.reward_normalizer = RewardStdNormalizer(gamma=self.gamma, clip=self.clip_reward, epsilon=1e-8)
# logging
if self.training:
log_file_out = True
use_tensorboard = self.tensorboard
else:
# disable logging to texts and tfboard for evaluation
log_file_out = False
use_tensorboard = False
self.logger = ExperimentLogger(output_dir, log_file_out=log_file_out, use_tensorboard=use_tensorboard)
def reset(self):
"""Do initializations for training or evaluation."""
if self.training:
# Add episodic stats to be tracked.
self.env.add_tracker("constraint_violation", 0)
self.env.add_tracker("constraint_violation", 0, mode="queue")
self.eval_env.add_tracker("constraint_violation", 0, mode="queue")
self.eval_env.add_tracker("mse", 0, mode="queue")
self.total_steps = 0
obs, _ = self.env.reset()
self.obs = self.obs_normalizer(obs)
else:
# Add episodic stats to be tracked.
self.env.add_tracker("constraint_violation", 0, mode="queue")
self.env.add_tracker("constraint_values", 0, mode="queue")
self.env.add_tracker("mse", 0, mode="queue")
def close(self):
"""Shuts down and cleans up lingering resources."""
self.env.close()
if self.training:
self.eval_env.close()
self.logger.close()
def save(self, path):
"""Saves model params and experiment state to checkpoint path."""
path_dir = os.path.dirname(path)
os.makedirs(path_dir, exist_ok=True)
state_dict = {
"agent": self.agent.state_dict(),
"adversary": [adv.state_dict() for adv in self.adversaries],
"obs_normalizer": self.obs_normalizer.state_dict(),
"reward_normalizer": self.reward_normalizer.state_dict(),
}
if self.training:
exp_state = {
"total_steps": self.total_steps,
"obs": self.obs,
"random_state": get_random_state(),
"env_random_state": self.env.get_env_random_state()
}
state_dict.update(exp_state)
torch.save(state_dict, path)
def load(self, path):
"""Restores model and experiment given checkpoint path."""
state = torch.load(path)
# restore pllicy
self.agent.load_state_dict(state["agent"])
for i, adv_state_dict in enumerate(state["adversary"]):
self.adversaries[i].load_state_dict(adv_state_dict)
self.obs_normalizer.load_state_dict(state["obs_normalizer"])
self.reward_normalizer.load_state_dict(state["reward_normalizer"])
# restore experiment state
if self.training:
self.total_steps = state["total_steps"]
self.obs = state["obs"]
set_random_state(state["random_state"])
self.env.set_env_random_state(state["env_random_state"])
self.logger.load(self.total_steps)
def learn(self, env=None, **kwargs):
"""Performs learning (pre-training, training, fine-tuning, etc)."""
while self.total_steps < self.max_env_steps:
results = self.train_step()
# checkpoint
if self.total_steps >= self.max_env_steps or (self.save_interval and self.total_steps % self.save_interval == 0):
# latest/final checkpoint
self.save(self.checkpoint_path)
self.logger.info("Checkpoint | {}".format(self.checkpoint_path))
if self.num_checkpoints and self.total_steps % (self.max_env_steps // self.num_checkpoints) == 0:
# intermediate checkpoint
path = os.path.join(self.output_dir, "checkpoints", "model_{}.pt".format(self.total_steps))
self.save(path)
# eval
if self.eval_interval and self.total_steps % self.eval_interval == 0:
eval_results = self.run(env=self.eval_env, n_episodes=self.eval_batch_size)
results["eval"] = eval_results
self.logger.info("Eval | ep_lengths {:.2f} +/- {:.2f} | ep_return {:.3f} +/- {:.3f}".format(eval_results["ep_lengths"].mean(),
eval_results["ep_lengths"].std(),
eval_results["ep_returns"].mean(),
eval_results["ep_returns"].std()))
# save best model
eval_score = eval_results["ep_returns"].mean()
eval_best_score = getattr(self, "eval_best_score", -np.infty)
if self.eval_save_best and eval_best_score < eval_score:
self.eval_best_score = eval_score
self.save(os.path.join(self.output_dir, "model_best.pt"))
# logging
if self.log_interval and self.total_steps % self.log_interval == 0:
self.log_step(results)
def run(self, env=None, render=False, n_episodes=10, verbose=False, use_adv=False, **kwargs):
"""Runs evaluation with current policy."""
self.agent.eval()
for adv in self.adversaries:
adv.eval()
self.obs_normalizer.set_read_only()
if env is None:
env = self.env
else:
if not is_wrapped(env, RecordEpisodeStatistics):
env = RecordEpisodeStatistics(env, n_episodes)
env.add_tracker("constraint_violation", 0, mode="queue")
env.add_tracker("constraint_values", 0, mode="queue")
env.add_tracker("mse", 0, mode="queue")
obs, _ = env.reset()
obs = self.obs_normalizer(obs)
ep_returns, ep_lengths = [], []
frames = []
while len(ep_returns) < n_episodes:
with torch.no_grad():
obs = torch.FloatTensor(obs).to(self.device)
action = self.agent.ac.act(obs)
# no disturbance during testing
if use_adv:
adv_idx = np.random.choice(self.num_adversaries)
with torch.no_grad():
action_adv = self.adversaries[adv_idx].ac.act(obs)
else:
action_adv = np.zeros(self.adv_act_space.shape[0])
env.set_adversary_control(action_adv)
obs, reward, done, info = env.step(action)
if render:
env.render()
frames.append(env.render("rgb_array"))
if verbose:
print("obs {} | act {}".format(obs, action))
if done:
assert "episode" in info
ep_returns.append(info["episode"]["r"])
ep_lengths.append(info["episode"]["l"])
obs, _ = env.reset()
obs = self.obs_normalizer(obs)
# collect evaluation results
ep_lengths = np.asarray(ep_lengths)
ep_returns = np.asarray(ep_returns)
eval_results = {"ep_returns": ep_returns, "ep_lengths": ep_lengths}
if len(frames) > 0:
eval_results["frames"] = frames
# Other episodic stats from evaluation env.
if len(env.queued_stats) > 0:
queued_stats = {k: np.asarray(v) for k, v in env.queued_stats.items()}
eval_results.update(queued_stats)
return eval_results
def train_step(self):
"""Performs a training/fine-tuning step."""
self.agent.train()
for adv in self.adversaries:
adv.train()
self.obs_normalizer.unset_read_only()
start = time.time()
results = defaultdict(list)
# collect trajectories (with different adversary each time)
rollouts, rollout_splits = self.collect_rollouts()
# perform updates for both agent and adversaries
agent_results = self.agent.update(rollouts)
results.update(agent_results)
for adv_idx, adv_rollouts in rollout_splits:
adv_results = self.adversaries[adv_idx].update(adv_rollouts)
adv_results = {k + "_adv{}".format(adv_idx): v for k, v in adv_results.items()}
results.update(adv_results)
# miscellaneous
results.update({"step": self.total_steps, "elapsed_time": time.time() - start, "adv_indices": [adv_idx for adv_idx, _ in rollout_splits]})
return results
def log_step(self, results):
"""Does logging after a training step."""
step = results["step"]
# runner stats
self.logger.add_scalars(
{
"step": step,
"time": results["elapsed_time"],
"progress": step / self.max_env_steps
},
step,
prefix="time",
write=False,
write_tb=False)
# learning stats
self.logger.add_scalars(
{
k: results[k]
for k in ["policy_loss", "value_loss", "entropy_loss"]
},
step,
prefix="loss")
for adv_idx in results["adv_indices"]:
self.logger.add_scalars(
{
k: results[k + "_adv{}".format(adv_idx)]
for k in ["policy_loss", "value_loss", "entropy_loss"]
},
step,
prefix="loss_adv{}".format(adv_idx))
# performance stats
ep_lengths =
|
np.asarray(self.env.length_queue)
|
numpy.asarray
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 16:02:11 2019
@author: egomez
"""
######################
## Compute the decision analysis of the estimated p-values
#decission_param = decission_data(df_pvalues, combination_dict, data_features, sign_level = alpha, gamma = gamma)
#
#
## Calculate the decision index
#Theta = get_decision_index(decission_param, data_features, combination_dict)
######################
import numpy as np
import pandas as pd
import scipy
def func_exp_pure(x, a, c):
return a*np.exp(-c*(x))
def n_gamma_function(gamma_par, a, c):
gamma_par = np.array(gamma_par, dtype=float)
a = np.array(a, dtype=float)
c = np.array(c, dtype=float)
return np.floor((-1/c)*np.log((gamma_par)/(c*a)))
def distance(alpha_par, n, a, c):
n = np.array(n, dtype=float)
alpha_par = np.array(alpha_par, dtype=float)
a = np.array(a, dtype=float)
c = np.array(c, dtype=float)
A_alpha = alpha_par*n
A = ((1./c)*a)*(1-np.exp(-n*c))
return A_alpha - A
def nalpha_theory(a, c, sign_level = None):
"""
This function solves the equation alpha = aexp(-cn) for n. The solution
might not be a valid solution in the following cases:
- such an n value might not exists as aexp(-cn) is almost constant with
a>alpha
- aexp(-cn) is always smaller than alpha, so we set n = 0
"""
if sign_level is None:
sign_level = 0.05
a = np.array(a, dtype=float)
c = np.array(c, dtype=float)
if a <= sign_level:
nalpha = 0
else:
nalpha = np.floor((-1/c)*np.log(sign_level/a))
if nalpha < 0 and a <= sign_level:
nalpha = 0
elif nalpha < 0 and a > sign_level:
nalpha = np.inf
return nalpha
def nalpha_estimate(df, sign_level = None):
"""
This function provides an estimation of n-alpha taking into account the
bias of the sample, i.e. the distribution of the p-values for each value of
n.
n-alpha = argmin_n(| mean(p(n)) + std(p(n)) | < alpha)
n-alpha might not exist for two different reasons:
- the p-values are uniformly distributed above alpha so this n-value
will never exist.
- There is not enough data, i.e. sample size < n-alpha, so we do not
have enough p-values for the assessment of n-alpha
"""
if sign_level is None:
sign_level = 0.05
# Mean and standard error of the p-values
# df['p_value'] = pd.to_numeric(df['p_value'], downcast='float')
df = df.astype({"p_value": float})
mean = df.groupby('N')['p_value'].mean()
yerr = df.groupby('N')['p_value'].sem()
ymax = mean + yerr
x = np.array(mean.index)
x = x.astype(np.int)
# Obtain the n-value such that all p-values are smaller than 0.05
# (n-alpha in the manuscript)
Nsign = x[(ymax-sign_level) <= 0]
if len(Nsign) > 0:
# This value exists and we have enough samples as to assess it.
Nsign = Nsign[0]
else:
# Either the value does not exist or we do not have enough data to assess it.
Nsign = np.nan
# if Nsign == []:
# Nsign = 'NaN'
return Nsign
def decission_data_exponential(df, combination_dict, data_features, sign_level = None, gamma = None):
if sign_level is None:
sign_level = 0.05
if gamma is None:
gamma = 5e-06
decission_param = pd.DataFrame()
for c in range(len(combination_dict)):
df_comparison = df[df.comparison == combination_dict[np.str(c)]]
aux = pd.DataFrame()
aux['comparison'] = [combination_dict[
|
np.str(c)
|
numpy.str
|
import contextlib
import sys
import warnings
import itertools
import operator
import platform
from distutils.version import LooseVersion as _LooseVersion
import pytest
from hypothesis import given, settings, Verbosity
from hypothesis.strategies import sampled_from
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
complex_floating_types = np.complexfloating.__subclasses__()
# This compares scalarmath against ufuncs.
class TestTypes:
def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
# It was comparing the type numbers, but the new ufunc
# function-finding mechanism finds the lowest function
# to which both inputs can be cast - which produces 'l'
# when you do 'q' + 'b'. The old function finding mechanism
# skipped ahead based on the first argument, but that
# does not produce properly symmetric results...
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
# test leak of scalar objects
# a leak would show up in valgrind as still-reachable of ~2.6MB
for i in range(200000):
np.add(1, 1)
class TestBaseMath:
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
if dt != np.int32:
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
class TestPower:
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t, b))
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t, b)
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.)
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.)
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = ("error with %r and %r:"
"got %r, expected %r") % (t1, t2, result, 9)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
# modular power is not implemented, so ensure it errors
a = 5
b = 4
c = 10
expected = pow(a, b, c) # noqa: F841
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestModulus:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)[()]
b = np.array(sg2*19, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floordiv_and_mod, divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
# use list comprehension so a_ and b_ are scalars
div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)[()]
b = np.array(sg2*6e-8, dtype=dt2)[()]
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
# MSVC 2008 returns NaN here, so disable the check.
#rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
for op in [floordiv_and_mod, divmod]:
div, mod = op(fone, fzer)
assert_(
|
np.isinf(div)
|
numpy.isinf
|
'''
Includes:
* Function to compute IoU similarity for axis-aligned, rectangular, 2D bounding boxes
* Function to perform greedy non-maximum suppression
* Function to decode raw SSD model output
* Class to encode targets for SSD model training
Copyright (C) 2017 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
def iou(boxes1, boxes2, coords='centroids'):
'''
Compute the intersection-over-union similarity (also known as Jaccard similarity)
of two axis-aligned 2D rectangular boxes or of multiple axis-aligned 2D rectangular
boxes contained in two arrays with broadcast-compatible shapes.
Three common use cases would be to compute the similarities for 1 vs. 1, 1 vs. `n`,
or `n` vs. `n` boxes. The two arguments are symmetric.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
Shape must be broadcast-compatible to `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
Shape must be broadcast-compatible to `boxes1`.
coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format
`(cx, cy, w, h)` or 'minmax' for the format `(xmin, xmax, ymin, ymax)`. Defaults to 'centroids'.
Returns:
A 1D Numpy array of dtype float containing values in [0,1], the Jaccard similarity of the boxes in `boxes1` and `boxes2`.
0 means there is no overlap between two given boxes, 1 means their coordinates are identical.
'''
if len(boxes1.shape) > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(len(boxes1.shape)))
if len(boxes2.shape) > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(len(boxes2.shape)))
if len(boxes1.shape) == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if len(boxes2.shape) == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError("It must be boxes1.shape[1] == boxes2.shape[1] == 4, but it is boxes1.shape[1] == {}, boxes2.shape[1] == {}.".format(boxes1.shape[1], boxes2.shape[1]))
if coords == 'centroids':
# TODO: Implement a version that uses fewer computation steps (that doesn't need conversion)
boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2minmax')
boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2minmax')
elif coords != 'minmax':
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax' and 'centroids'.")
intersection = np.maximum(0, np.minimum(boxes1[:,1], boxes2[:,1]) - np.maximum(boxes1[:,0], boxes2[:,0])) * np.maximum(0, np.minimum(boxes1[:,3], boxes2[:,3]) - np.maximum(boxes1[:,2], boxes2[:,2]))
union = (boxes1[:,1] - boxes1[:,0]) * (boxes1[:,3] - boxes1[:,2]) + (boxes2[:,1] - boxes2[:,0]) * (boxes2[:,3] - boxes2[:,2]) - intersection
return intersection / union
def convert_coordinates(tensor, start_index, conversion='minmax2centroids'):
'''
Convert coordinates for axis-aligned 2D boxes between two coordinate formats.
Creates a copy of `tensor`, i.e. does not operate in place. Currently there are
two supported coordinate formats that can be converted from and to each other:
1) (xmin, xmax, ymin, ymax) - the 'minmax' format
2) (cx, cy, w, h) - the 'centroids' format
Note that converting from one of the supported formats to another and back is
an identity operation up to possible rounding errors for integer tensors.
Arguments:
tensor (array): A Numpy nD array containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
conversion (str, optional): The conversion direction. Can be 'minmax2centroids'
or 'centroids2minmax'. Defaults to 'minmax2centroids'.
Returns:
A Numpy nD array, a copy of the input tensor with the converted coordinates
in place of the original coordinates and the unaltered elements of the original
tensor elsewhere.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+1]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+2] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind] # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+2] # Set h
elif conversion == 'centroids2minmax':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids' and 'centroids2minmax'.")
return tensor1
def convert_coordinates2(tensor, start_index, conversion='minmax2centroids'):
'''
A pure matrix multiplication implementation of `convert_coordinates()`.
Although elegant, it turns out to be marginally slower on average than
`convert_coordinates()`. Note that the two matrices below are each other's
multiplicative inverse.
For details please refer to the documentation of `convert_coordinates()`.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
M = np.array([[0.5, 0. , -1., 0.],
[0.5, 0. , 1., 0.],
[0. , 0.5, 0., -1.],
[0. , 0.5, 0., 1.]])
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
elif conversion == 'centroids2minmax':
M = np.array([[ 1. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 1. ],
[-0.5, 0.5, 0. , 0. ],
[ 0. , 0. , -0.5, 0.5]])
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids' and 'centroids2minmax'.")
return tensor1
def greedy_nms(y_pred_decoded, iou_threshold=0.45, coords='minmax'):
'''
Perform greedy non-maximum suppression on the input boxes.
Greedy NMS works by selecting the box with the highest score and
removing all boxes around it that are too close to it measured by IoU-similarity.
Out of the boxes that are left over, once again the one with the highest
score is selected and so on, until no boxes with too much overlap are left.
This is a basic, straight-forward NMS algorithm that is relatively efficient,
but it has a number of downsides. One of those downsides is that the box with
the highest score might not always be the box with the best fit to the object.
There are more sophisticated NMS techniques like [this one](https://lirias.kuleuven.be/bitstream/123456789/506283/1/3924_postprint.pdf)
that use a combination of nearby boxes, but in general there will probably
always be a trade-off between speed and quality for any given NMS technique.
Arguments:
y_pred_decoded (list): A batch of decoded predictions. For a given batch size `n` this
is a list of length `n` where each list element is a 2D Numpy array.
For a batch item with `k` predicted boxes this 2D Numpy array has
shape `(k, 6)`, where each row contains the coordinates of the respective
box in the format `[class_id, score, xmin, xmax, ymin, ymax]`.
Technically, the number of columns doesn't have to be 6, it can be
arbitrary as long as the first four elements of each row are
`xmin`, `xmax`, `ymin`, `ymax` (in this order) and the last element
is the score assigned to the prediction. Note that this function is
agnostic to the scale of the score or what it represents.
iou_threshold (float, optional): All boxes with a Jaccard similarity of
greater than `iou_threshold` with a locally maximal box will be removed
from the set of predictions, where 'maximal' refers to the box score.
Defaults to 0.45 following the paper.
coords (str, optional): The coordinate format of `y_pred_decoded`.
Can be one of the formats supported by `iou()`. Defaults to 'minmax'.
Returns:
The predictions after removing non-maxima. The format is the same as the input format.
'''
y_pred_decoded_nms = []
for batch_item in y_pred_decoded: # For the labels of each batch item...
boxes_left = np.copy(batch_item)
maxima = [] # This is where we store the boxes that make it through the non-maximum suppression
while boxes_left.shape[0] > 0: # While there are still boxes left to compare...
maximum_index = np.argmax(boxes_left[:,1]) # ...get the index of the next box with the highest confidence...
maximum_box = np.copy(boxes_left[maximum_index]) # ...copy that box and...
maxima.append(maximum_box) # ...append it to `maxima` because we'll definitely keep it
boxes_left = np.delete(boxes_left, maximum_index, axis=0) # Now remove the maximum box from `boxes_left`
if boxes_left.shape[0] == 0: break # If there are no boxes left after this step, break. Otherwise...
similarities = iou(boxes_left[:,2:], maximum_box[2:], coords=coords) # ...compare (IoU) the other left over boxes to the maximum box...
boxes_left = boxes_left[similarities <= iou_threshold] # ...so that we can remove the ones that overlap too much with the maximum box
y_pred_decoded_nms.append(np.array(maxima))
return y_pred_decoded_nms
def _greedy_nms(predictions, iou_threshold=0.45, coords='minmax'):
'''
The same greedy non-maximum suppression algorithm as above, but slightly modified for use as an internal
function for per-class NMS in `decode_y()`.
'''
boxes_left = np.copy(predictions)
maxima = [] # This is where we store the boxes that make it through the non-maximum suppression
while boxes_left.shape[0] > 0: # While there are still boxes left to compare...
maximum_index = np.argmax(boxes_left[:,0]) # ...get the index of the next box with the highest confidence...
maximum_box = np.copy(boxes_left[maximum_index]) # ...copy that box and...
maxima.append(maximum_box) # ...append it to `maxima` because we'll definitely keep it
boxes_left = np.delete(boxes_left, maximum_index, axis=0) # Now remove the maximum box from `boxes_left`
if boxes_left.shape[0] == 0: break # If there are no boxes left after this step, break. Otherwise...
similarities = iou(boxes_left[:,1:], maximum_box[1:], coords=coords) # ...compare (IoU) the other left over boxes to the maximum box...
boxes_left = boxes_left[similarities <= iou_threshold] # ...so that we can remove the ones that overlap too much with the maximum box
return np.array(maxima)
def _greedy_nms2(predictions, iou_threshold=0.45, coords='minmax'):
'''
The same greedy non-maximum suppression algorithm as above, but slightly modified for use as an internal
function in `decode_y2()`.
'''
boxes_left = np.copy(predictions)
maxima = [] # This is where we store the boxes that make it through the non-maximum suppression
while boxes_left.shape[0] > 0: # While there are still boxes left to compare...
maximum_index = np.argmax(boxes_left[:,1]) # ...get the index of the next box with the highest confidence...
maximum_box = np.copy(boxes_left[maximum_index]) # ...copy that box and...
maxima.append(maximum_box) # ...append it to `maxima` because we'll definitely keep it
boxes_left = np.delete(boxes_left, maximum_index, axis=0) # Now remove the maximum box from `boxes_left`
if boxes_left.shape[0] == 0: break # If there are no boxes left after this step, break. Otherwise...
similarities = iou(boxes_left[:,2:], maximum_box[2:], coords=coords) # ...compare (IoU) the other left over boxes to the maximum box...
boxes_left = boxes_left[similarities <= iou_threshold] # ...so that we can remove the ones that overlap too much with the maximum box
return np.array(maxima)
def decode_y(y_pred,
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
input_coords='centroids',
normalize_coords=False,
img_height=None,
img_width=None):
'''
Convert model prediction output back to a format that contains only the positive box predictions
(i.e. the same format that `enconde_y()` takes as input).
After the decoding, two stages of prediction filtering are performed for each class individually:
First confidence thresholding, then greedy non-maximum suppression. The filtering results for all
classes are concatenated and the `top_k` overall highest confidence results constitute the final
predictions for a given batch item. This procedure follows the original Caffe implementation.
For a slightly different and more efficient alternative to decode raw model output that performs
non-maximum suppresion globally instead of per class, see `decode_y2()` below.
Arguments:
y_pred (array): The prediction output of the SSD model, expected to be a Numpy array
of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)`, where `#boxes` is the total number of
boxes predicted by the model per image and the last axis contains
`[one-hot vector for the classes, 4 predicted coordinate offsets, 4 anchor box coordinates, 4 variances]`.
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
positive class in order to be considered for the non-maximum suppression stage for the respective class.
A lower value will result in a larger part of the selection process being done by the non-maximum suppression
stage, while a larger value will result in a larger part of the selection process happening in the confidence
thresholding stage. Defaults to 0.01, following the paper.
iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold`
with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
to the box score. Defaults to 0.45 following the paper.
top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
non-maximum suppression stage. Defaults to 200, following the paper.
input_coords (str, optional): The box coordinate format that the model outputs. Can be either 'centroids'
for the format `(cx, cy, w, h)` (box center coordinates, width, and height) or 'minmax'
for the format `(xmin, xmax, ymin, ymax)`. Defaults to 'centroids'.
normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])
and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs
relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.
Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect
coordinates. Requires `img_height` and `img_width` if set to `True`. Defaults to `False`.
img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.
img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.
Returns:
A python list of length `batch_size` where each list element represents the predicted boxes
for one image and contains a Numpy array of shape `(boxes, 6)` where each row is a box prediction for
a non-background class for the respective image in the format `[class_id, confidence, xmin, xmax, ymin, ymax]`.
'''
# print ("===== start", input_coords)
if normalize_coords and ((img_height is None) or (img_width is None)):
raise ValueError("If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`".format(img_height, img_width))
# 1: Convert the box coordinates from the predicted anchor box offsets to predicted absolute coordinates
y_pred_decoded_raw = np.copy(y_pred[:,:,:-8]) # Slice out the classes and the four offsets, throw away the anchor coordinates and variances, resulting in a tensor of shape `[batch, n_boxes, n_classes + 4 coordinates]`
if input_coords == 'centroids':
y_pred_decoded_raw[:,:,[-2,-1]] = np.exp(y_pred_decoded_raw[:,:,[-2,-1]] * y_pred[:,:,[-2,-1]]) # exp(ln(w(pred)/w(anchor)) / w_variance * w_variance) == w(pred) / w(anchor), exp(ln(h(pred)/h(anchor)) / h_variance * h_variance) == h(pred) / h(anchor)
y_pred_decoded_raw[:,:,[-2,-1]] *= y_pred[:,:,[-6,-5]] # (w(pred) / w(anchor)) * w(anchor) == w(pred), (h(pred) / h(anchor)) * h(anchor) == h(pred)
y_pred_decoded_raw[:,:,[-4,-3]] *= y_pred[:,:,[-4,-3]] * y_pred[:,:,[-6,-5]] # (delta_cx(pred) / w(anchor) / cx_variance) * cx_variance * w(anchor) == delta_cx(pred), (delta_cy(pred) / h(anchor) / cy_variance) * cy_variance * h(anchor) == delta_cy(pred)
y_pred_decoded_raw[:,:,[-4,-3]] += y_pred[:,:,[-8,-7]] # delta_cx(pred) + cx(anchor) == cx(pred), delta_cy(pred) + cy(anchor) == cy(pred)
y_pred_decoded_raw = convert_coordinates(y_pred_decoded_raw, start_index=-4, conversion='centroids2minmax')
elif input_coords == 'minmax':
y_pred_decoded_raw[:,:,-4:] *= y_pred[:,:,-4:] # delta(pred) / size(anchor) / variance * variance == delta(pred) / size(anchor) for all four coordinates, where 'size' refers to w or h, respectively
y_pred_decoded_raw[:,:,[-4,-3]] *= np.expand_dims(y_pred[:,:,-7] - y_pred[:,:,-8], axis=-1) # delta_xmin(pred) / w(anchor) * w(anchor) == delta_xmin(pred), delta_xmax(pred) / w(anchor) * w(anchor) == delta_xmax(pred)
y_pred_decoded_raw[:,:,[-2,-1]] *= np.expand_dims(y_pred[:,:,-5] - y_pred[:,:,-6], axis=-1) # delta_ymin(pred) / h(anchor) * h(anchor) == delta_ymin(pred), delta_ymax(pred) / h(anchor) * h(anchor) == delta_ymax(pred)
y_pred_decoded_raw[:,:,-4:] += y_pred[:,:,-8:-4] # delta(pred) + anchor == pred for all four coordinates
else:
raise ValueError("Unexpected value for `input_coords`. Supported input coordinate formats are 'minmax' and 'centroids'.")
# 2: If the model predicts normalized box coordinates and they are supposed to be converted back to absolute coordinates, do that
if normalize_coords:
y_pred_decoded_raw[:,:,-4:-2] *= img_width # Convert xmin, xmax back to absolute coordinates
y_pred_decoded_raw[:,:,-2:] *= img_height # Convert ymin, ymax back to absolute coordinates
# 3: Apply confidence thresholding and non-maximum suppression per class
n_classes = y_pred_decoded_raw.shape[-1] - 4 # The number of classes is the length of the last axis minus the four box coordinates
y_pred_decoded = [] # Store the final predictions in this list
for batch_item in y_pred_decoded_raw: # `batch_item` has shape `[n_boxes, n_classes + 4 coords]`
pred = [] # Store the final predictions for this batch item here
for class_id in range(1, n_classes): # For each class except the background class (which has class ID 0)...
single_class = batch_item[:,[class_id, -4, -3, -2, -1]] # ...keep only the confidences for that class, making this an array of shape `[n_boxes, 5]` and...
threshold_met = single_class[single_class[:,0] > confidence_thresh] # ...keep only those boxes with a confidence above the set threshold.
if threshold_met.shape[0] > 0: # If any boxes made the threshold...
maxima = _greedy_nms(threshold_met, iou_threshold=iou_threshold, coords='minmax') # ...perform NMS on them.
maxima_output = np.zeros((maxima.shape[0], maxima.shape[1] + 1)) # Expand the last dimension by one element to have room for the class ID. This is now an arrray of shape `[n_boxes, 6]`
maxima_output[:,0] = class_id # Write the class ID to the first column...
maxima_output[:,1:] = maxima # ...and write the maxima to the other columns...
pred.append(maxima_output) # ...and append the maxima for this class to the list of maxima for this batch item.
# Once we're through with all classes, keep only the `top_k` maxima with the highest scores
pred = np.concatenate(pred, axis=0)
if pred.shape[0] > top_k: # If we have more than `top_k` results left at this point, otherwise there is nothing to filter,...
top_k_indices = np.argpartition(pred[:,1], kth=pred.shape[0]-top_k, axis=0)[pred.shape[0]-top_k:] # ...get the indices of the `top_k` highest-score maxima...
pred = pred[top_k_indices] # ...and keep only those entries of `pred`...
y_pred_decoded.append(pred) # ...and now that we're done, append the array of final predictions for this batch item to the output list
# print ("===== end", input_coords)
return y_pred_decoded
def decode_y2(y_pred,
confidence_thresh=0.5,
iou_threshold=0.45,
top_k='all',
input_coords='centroids',
normalize_coords=False,
img_height=None,
img_width=None):
'''
Convert model prediction output back to a format that contains only the positive box predictions
(i.e. the same format that `enconde_y()` takes as input).
Optionally performs confidence thresholding and greedy non-maximum suppression afte the decoding stage.
Note that the decoding procedure used here is not the same as the procedure used in the original Caffe implementation.
The procedure used here assigns every box its highest confidence as the class and then removes all boxes fro which
the highest confidence is the background class. This results in less work for the subsequent non-maximum suppression,
because the vast majority of the predictions will be filtered out just by the fact that their highest confidence is
for the background class. It is much more efficient than the procedure of the original implementation, but the
results may also differ.
Arguments:
y_pred (array): The prediction output of the SSD model, expected to be a Numpy array
of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)`, where `#boxes` is the total number of
boxes predicted by the model per image and the last axis contains
`[one-hot vector for the classes, 4 predicted coordinate offsets, 4 anchor box coordinates, 4 variances]`.
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in any positive
class required for a given box to be considered a positive prediction. A lower value will result
in better recall, while a higher value will result in better precision. Do not use this parameter with the
goal to combat the inevitably many duplicates that an SSD will produce, the subsequent non-maximum suppression
stage will take care of those. Defaults to 0.5.
iou_threshold (float, optional): `None` or a float in [0,1]. If `None`, no non-maximum suppression will be
performed. If not `None`, greedy NMS will be performed after the confidence thresholding stage, meaning
all boxes with a Jaccard similarity of greater than `iou_threshold` with a locally maximal box will be removed
from the set of predictions, where 'maximal' refers to the box score. Defaults to 0.45.
top_k (int, optional): 'all' or an integer with number of highest scoring predictions to be kept for each batch item
after the non-maximum suppression stage. Defaults to 'all', in which case all predictions left after the NMS stage
will be kept.
input_coords (str, optional): The box coordinate format that the model outputs. Can be either 'centroids'
for the format `(cx, cy, w, h)` (box center coordinates, width, and height) or 'minmax'
for the format `(xmin, xmax, ymin, ymax)`. Defaults to 'centroids'.
normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])
and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs
relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.
Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect
coordinates. Requires `img_height` and `img_width` if set to `True`. Defaults to `False`.
img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.
img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.
Returns:
A python list of length `batch_size` where each list element represents the predicted boxes
for one image and contains a Numpy array of shape `(boxes, 6)` where each row is a box prediction for
a non-background class for the respective image in the format `[class_id, confidence, xmin, xmax, ymin, ymax]`.
'''
if normalize_coords and ((img_height is None) or (img_width is None)):
raise ValueError("If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`".format(img_height, img_width))
# 1: Convert the classes from one-hot encoding to their class ID
y_pred_converted =
|
np.copy(y_pred[:,:,-14:-8])
|
numpy.copy
|
import opengen as og
import unittest
import casadi.casadi as cs
import numpy as np
import math
class ConstraintsTestCase(unittest.TestCase):
# -----------------------------------------------------------------------
# Infinity Ball
# -----------------------------------------------------------------------
def test_ball_inf_origin(self):
ball = og.constraints.BallInf(None, 1)
x = np.array([3, 2])
x_sym = cs.SX.sym("x", 2)
d_num = ball.distance_squared(x)
d_sym = float(cs.substitute(ball.distance_squared(x_sym), x_sym, x))
self.assertAlmostEqual(d_sym, d_num, 8, "computation of distance")
correct_squared_distance = 5.0
self.assertAlmostEqual(d_num, correct_squared_distance,
8, "expected squared distance")
# verify that it works with cs.MX
x_sym_mx = cs.MX.sym("xmx", 2)
sqdist_mx = ball.distance_squared(x_sym_mx)
sqdist_mx_fun = cs.Function('sqd', [x_sym_mx], [sqdist_mx])
self.assertAlmostEqual(correct_squared_distance,
sqdist_mx_fun(x)[0], 5)
def test_ball_inf_origin_inside(self):
ball = og.constraints.BallInf(None, 1)
x = np.array([0.1, -0.2])
x_sym = cs.SX.sym("x", 2)
d_num = ball.distance_squared(x)
d_sym = float(cs.substitute(ball.distance_squared(x_sym), x_sym, x))
self.assertAlmostEqual(d_sym, d_num, 10, "computation of distance")
correct_squared_distance = 0
self.assertAlmostEqual(d_num, correct_squared_distance,
10, "expected squared distance")
def test_ball_inf_xc(self):
ball = og.constraints.BallInf([-1, -1], 0.5)
x = np.array([1, -2])
x_sym = cs.SX.sym("x", 2)
d_num = ball.distance_squared(x)
d_sym = float(cs.substitute(ball.distance_squared(x_sym), x_sym, x))
self.assertAlmostEqual(d_sym, d_num, 8, "computation of distance")
correct_squared_distance = 2.5
self.assertAlmostEqual(d_num, correct_squared_distance,
8, "expected squared distance")
def test_ball_inf_origin_convex(self):
ball = og.constraints.BallInf()
self.assertTrue(ball.is_convex())
def test_ball_inf_origin_compact(self):
ball = og.constraints.BallInf()
self.assertTrue(ball.is_compact())
# -----------------------------------------------------------------------
# Euclidean Ball
# -----------------------------------------------------------------------
def test_ball_euclidean_origin(self):
ball = og.constraints.Ball2(None, 1)
x = np.array([2, 2])
x_sym = cs.SX.sym("x", 2)
d_num = ball.distance_squared(x)
d_sym = float(cs.substitute(ball.distance_squared(x_sym), x_sym, x))
self.assertAlmostEqual(d_sym, d_num, 8, "computation of distance")
correct_squared_distance = 8*(1-np.sqrt(2)/4)**2
self.assertAlmostEqual(d_sym, correct_squared_distance,
8, "expected squared distance")
def test_ball_euclidean_origin_3d(self):
ball = og.constraints.Ball2(None, 1)
x = np.array([1, 1, 1])
d_num = ball.distance_squared(x)
correct_squared_distance = 0.535898384862246
self.assertAlmostEqual(correct_squared_distance,
d_num, 12, "computation of distance")
def test_ball_euclidean_origin_inside(self):
ball = og.constraints.Ball2(None, 1)
x = np.array([0.2, 0.8])
x_sym = cs.SX.sym("x", 2)
d_num = ball.distance_squared(x)
d_sym = float(cs.substitute(ball.distance_squared(x_sym), x_sym, x))
self.assertAlmostEqual(d_sym, d_num, 8, "computation of distance")
correct_squared_distance = 0.0
self.assertAlmostEqual(d_sym, correct_squared_distance,
8, "expected squared distance")
def test_ball_euclidean_xc(self):
ball = og.constraints.Ball2([1, 2], 1)
x = [0, 1]
x_sym = cs.SX.sym("x", 2)
d_num = ball.distance_squared(x)
d_sym = cs.substitute(ball.distance_squared(x_sym), x_sym, x)
self.assertAlmostEqual(d_sym, d_num, 8, "computation of distance")
correct_squared_distance = (np.sqrt(2) - 1) ** 2
self.assertAlmostEqual(d_sym, correct_squared_distance,
8, "expected squared distance")
def test_ball_euclidean_xc_inside(self):
ball = og.constraints.Ball2([1, 2], 1)
x = [1.2, 1.55]
x_sym = cs.SX.sym("x", 2)
d_num = ball.distance_squared(x)
d_sym = cs.substitute(ball.distance_squared(x_sym), x_sym, x)
self.assertAlmostEqual(d_sym, d_num, 8, "computation of distance")
correct_squared_distance = 0.0
self.assertAlmostEqual(d_sym, correct_squared_distance,
8, "expected squared distance")
def test_ball_euclidean_origin_convex(self):
ball = og.constraints.Ball2()
self.assertTrue(ball.is_convex())
def test_ball_euclidean_origin_compact(self):
ball = og.constraints.Ball2()
self.assertTrue(ball.is_compact())
# -----------------------------------------------------------------------
# Rectangle
# -----------------------------------------------------------------------
def test_rectangle_simple(self):
rect = og.constraints.Rectangle([-1, -2], [4, -1])
# some basic assertions
self.assertListEqual([0, 1], rect.idx_bound_finite_all())
self.assertTrue(len(rect.idx_infinite_only_xmax()) == 0)
self.assertTrue(len(rect.idx_infinite_only_xmin()) == 0)
self.assertEqual(2, rect.dimension())
# squared distance
self.assertAlmostEqual(1, rect.distance_squared([3, 0]), 8)
self.assertAlmostEqual(4, rect.distance_squared([0, 1]), 8)
self.assertAlmostEqual(1, rect.distance_squared([5, -1.5]), 8)
self.assertAlmostEqual(5, rect.distance_squared([5, 1]), 8)
# symbolic
x_sym = cs.SX.sym("x", 2)
d_sym = float(cs.substitute(
rect.distance_squared(x_sym), x_sym, [5, 1]))
self.assertAlmostEqual(5, d_sym, 8)
def test_rectangle_pos_quant(self):
n = 3
rect = og.constraints.Rectangle([0.0]*n, None)
# some basic assertions
self.assertTrue(0 == len(rect.idx_bound_finite_all()))
self.assertTrue(0 == len(rect.idx_infinite_only_xmin()))
self.assertEqual([*range(n)], rect.idx_infinite_only_xmax())
# some squared distances
self.assertAlmostEqual(0.0, rect.distance_squared([0.0]*n), 8)
self.assertAlmostEqual(0.0, rect.distance_squared([1.0] * n), 8)
self.assertAlmostEqual(
1.0, rect.distance_squared([-1.0] + [1.0] * (n-1)), 8)
self.assertAlmostEqual(
5.0, rect.distance_squared([-1.0, -2.0, 5.0]), 8)
def test_rectangle_semiinf_corridor(self):
rect = og.constraints.Rectangle([-1.0, -2.0], [float('inf'), 3.0])
self.assertEqual([0], rect.idx_infinite_only_xmax())
self.assertAlmostEqual(0.0, rect.distance_squared([1e16, 1.5]), 8)
self.assertAlmostEqual(1.0, rect.distance_squared([1e16, 4.0]), 8)
self.assertAlmostEqual(4.0, rect.distance_squared([1e16, -4.0]), 8)
def test_rectangle_convex_i(self):
rect = og.constraints.Rectangle([-1.0, -2.0], [1.0, 3.0])
self.assertTrue(rect.is_convex())
def test_rectangle_convex_ii(self):
rect = og.constraints.Rectangle([-1.0, -2.0], [float('inf'), 3.0])
self.assertTrue(rect.is_convex())
def test_rectangle_compact(self):
rect = og.constraints.Rectangle([-1.0, -2.0], [0.0, 3.0])
self.assertTrue(rect.is_compact())
def test_rectangle_noncompact(self):
rect = og.constraints.Rectangle([-1.0, float('-inf')], [10.0, 3.0])
self.assertFalse(rect.is_compact())
# -----------------------------------------------------------------------
# Second-Order Cone (SOC)
# -----------------------------------------------------------------------
def test_second_order_cone_origin_inside(self):
soc = og.constraints.SecondOrderCone(2.0)
# dist_C^2(0, 0, 0) = 0 [origin is in the cone]
sq_dist = soc.distance_squared([0.0, 0.0, 0.0])
self.assertAlmostEqual(0, sq_dist, 16)
def test_second_order_cone_close_origin(self):
soc = og.constraints.SecondOrderCone(2.0)
# dist_C^2(0, 0, 0) = 0 [close-origin]
sq_dist = soc.distance_squared([1e-12, 1e-12, 1e-12])
self.assertAlmostEqual(0, sq_dist, 16)
def test_second_order_cone_case_i(self):
# dist_C^2(1, 1, 0.75) = 0 [case II]
soc = og.constraints.SecondOrderCone(2.0)
sq_dist = soc.distance_squared([1.0, 1.0, 0.75])
self.assertAlmostEqual(0, sq_dist, 16)
def test_second_order_cone_case_ii(self):
# dist_C^2(3, 4, -11) = 146.0 [case II]
soc = og.constraints.SecondOrderCone(2.0)
sq_dist = soc.distance_squared([3.0, 4.0, -11.0])
self.assertAlmostEqual(146.0, sq_dist, 16)
sq_dist = soc.distance_squared([4.0, 3.0, -11.0])
self.assertAlmostEqual(146.0, sq_dist, 16)
def test_second_order_cone_case_iii(self):
# dist_C^2(2, 3, 0.5) = 1.357... [case III]
soc = og.constraints.SecondOrderCone(2.0)
sq_dist = soc.distance_squared([2.0, 3.0, 0.5])
self.assertAlmostEqual(1.35777948981440, sq_dist, 12)
def test_second_order_cone_symbolic(self):
soc = og.constraints.SecondOrderCone(2.0)
u = cs.MX.sym('u', 3, 1)
sq_dist = soc.distance_squared(u)
u0 = [4.0, 3.0, -11.0]
sq_dist_sx_fun = cs.Function('sqd1', [u], [sq_dist])
self.assertAlmostEqual(146.0, sq_dist_sx_fun(u0), 16)
sq_dist_m2 = soc.distance_squared(u)
sq_dist_mx_fun = cs.Function('sqd2', [u], [sq_dist_m2])
self.assertAlmostEqual(146.0, sq_dist_mx_fun(u0), 16)
def test_second_order_cone_jacobian(self):
soc = og.constraints.SecondOrderCone()
# Important note: the second-order cone constraint does not work with cs.MX
# An exception will be raised if the user provides an SX
u = cs.MX.sym('u', 3)
sq_dist = soc.distance_squared(u)
sq_dist_jac = cs.jacobian(sq_dist, u)
sq_dist_jac_fun = cs.Function('sq_dist_jac', [u], [sq_dist_jac])
v = sq_dist_jac_fun([0., 0., 0.])
for i in range(3):
self.assertFalse(math.isnan(v[i]), "v[i] is NaN")
self.assertAlmostEqual(0, cs.norm_2(v), 12)
def test_second_order_cone_convex(self):
soc = og.constraints.SecondOrderCone()
self.assertTrue(soc.is_convex())
def test_second_order_cone_convex(self):
soc = og.constraints.SecondOrderCone()
self.assertFalse(soc.is_compact())
# -----------------------------------------------------------------------
# No Constraints
# -----------------------------------------------------------------------
def test_no_constraints(self):
whole_rn = og.constraints.NoConstraints()
u = [1., 2., 3., 4.]
self.assertAlmostEqual(0.0, whole_rn.distance_squared(u), 16)
self.assertListEqual(u, whole_rn.project(u))
def test_no_constraints_convex(self):
whole_rn = og.constraints.NoConstraints()
self.assertTrue(whole_rn.is_convex())
def test_no_constraints_compact(self):
whole_rn = og.constraints.NoConstraints()
self.assertFalse(whole_rn.is_compact())
# -----------------------------------------------------------------------
# Cartesian product of constraints
# -----------------------------------------------------------------------
def test_cartesian(self):
inf = float('inf')
ball_inf = og.constraints.BallInf(None, 1)
ball_eucl = og.constraints.Ball2(None, 1)
rect = og.constraints.Rectangle(xmin=[0.0, 1.0, -inf, 2.0],
xmax=[1.0, inf, 10.0, 10.0])
# Segments:
# [0, 1]
# [2, 3, 4]
# [5, 6, 7, 8]
cartesian = og.constraints.CartesianProduct(
[1, 4, 8], [ball_inf, ball_eucl, rect])
sq_dist = cartesian.distance_squared([5, 10, 1, 1, 1, 0.5, -1, 0, 11])
correct_sq_distance = 102.0 + (math.sqrt(3)-1.0)**2
self.assertAlmostEqual(correct_sq_distance, sq_dist, 12)
def test_cartesian_sx(self):
inf = float('inf')
ball_inf = og.constraints.BallInf(None, 1)
ball_eucl = og.constraints.Ball2(None, 1)
rect = og.constraints.Rectangle(xmin=[0.0, 1.0, -inf, 2.0],
xmax=[1.0, inf, 10.0, 10.0])
cartesian = og.constraints.CartesianProduct(
[1, 4, 8], [ball_inf, ball_eucl, rect])
u_sx = cs.SX.sym("u", 9, 1)
_sqd_sx = cartesian.distance_squared(u_sx)
u_mx = cs.SX.sym("u", 9, 1)
_sqd_mx = cartesian.distance_squared(u_mx)
def test_cartesian_segments_not_increasing(self):
no_constraints = og.constraints.NoConstraints()
sets = [no_constraints, no_constraints, no_constraints]
segments = [0, 2, 2] # should be increasing
with self.assertRaises(ValueError) as __context:
og.constraints.CartesianProduct(segments, sets)
def test_cartesian_segments_negative_elements(self):
no_constraints = og.constraints.NoConstraints()
sets = [no_constraints, no_constraints]
segments = [-1, 2] # -1 is not allowed
with self.assertRaises(ValueError) as __context:
og.constraints.CartesianProduct(segments, sets)
def test_cartesian_segments_different_lengths(self):
no_constraints = og.constraints.NoConstraints()
sets = [no_constraints, no_constraints]
segments = [0, 2, 4] # 3 elements (but sets has two elements)
with self.assertRaises(ValueError) as __context:
og.constraints.CartesianProduct(segments, sets)
def test_cartesian_segments_empty_args(self):
no_constraints = og.constraints.NoConstraints()
sets = [no_constraints, no_constraints]
with self.assertRaises(ValueError) as __context:
og.constraints.CartesianProduct([], sets)
# -----------------------------------------------------------------------
# Finite Set
# -----------------------------------------------------------------------
def test_finite_set_dim_card(self):
c = og.constraints.FiniteSet()
self.assertEqual(0, c.dimension())
self.assertEqual(0, c.cardinality())
c = og.constraints.FiniteSet([])
self.assertEqual(0, c.dimension())
self.assertEqual(0, c.cardinality())
c = og.constraints.FiniteSet([[1, 2, 3], [4, 5, 6]])
self.assertEqual(3, c.dimension())
self.assertEqual(2, c.cardinality())
def test_finite_set_fail(self):
with self.assertRaises(Exception) as __context:
og.constraints.FiniteSet([[1., 2.], [1., 2., 3.]])
def test_finite_set_convex(self):
c = og.constraints.FiniteSet([[1, 2, 3]])
self.assertTrue(c.is_convex())
def test_finite_set_nonconvex(self):
c = og.constraints.FiniteSet([[1, 2, 3], [4, 5, 6]])
self.assertFalse(c.is_convex())
def test_finite_set_compact(self):
c = og.constraints.FiniteSet([[1, 2, 3], [4, 5, 6]])
self.assertTrue(c.is_compact())
# -----------------------------------------------------------------------
# Halfspaces
# -----------------------------------------------------------------------
def test_halfspace_dimension(self):
h = og.constraints.Halfspace([1, 4, 5], 1)
self.assertEqual(3, h.dimension())
def test_halfspace_not_compact(self):
h = og.constraints.Halfspace([1, 4, 5], 1)
self.assertFalse(h.is_compact())
def test_halfspace_compact(self):
h = og.constraints.Halfspace([0, 0, 0], -1)
self.assertTrue(h.is_compact())
def test_halfspace_convex(self):
h = og.constraints.Halfspace([1, 4, 5], 1)
self.assertTrue(h.is_convex())
# -----------------------------------------------------------------------
# Set Y (from C)
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Simplex
# -----------------------------------------------------------------------
def test_simplex_projection(self):
simplex = og.constraints.Simplex(alpha=2)
y = [1, 8, 0, -4]
z = simplex.project(y)
self.assertAlmostEqual(
sum(z), 2, 12, "Simplex projection sum not equal to alpha")
def test_simplex_projection_random_spam(self):
simplex = og.constraints.Simplex(alpha=2)
n = 10
for _ in range(5000):
x = np.random.uniform(low=-100, high=100, size=n)
alpha = np.random.uniform(low=1e-4, high=100)
simplex = og.constraints.Simplex(alpha)
z = simplex.project(x)
self.assertAlmostEqual(
sum(z), alpha, 10, "Simplex projection sum not equal to alpha")
self.assertTrue(min(z) >= -1e-12, "Simplex projection is negative")
def test_simplex_projection_random_optimality(self):
# According to the projection theorem, x_star is the projection of z on a set C
# iff <x - x_star, z - x_star> <= 0, for all x in C. Here we are testing whether
# this holds for all x which are extreme points of C.
for n in range(5, 60, 5):
for _ in range(10*n):
z = np.random.uniform(low=-100, high=100, size=n)
alpha = np.random.uniform(low=1e-4, high=100)
simplex = og.constraints.Simplex(alpha)
x_star = simplex.project(z)
# test optimality conditions:
for j in range(n):
x = np.zeros((n,))
x[j] = alpha
self.assertLessEqual(
np.dot(x-x_star, z-x_star), 1e-10, "Simplex optimality conditions failed")
# -----------------------------------------------------------------------
# Ball1
# -----------------------------------------------------------------------
def test_ball1_project_inside_points(self):
n = 5
ball1 = og.constraints.Ball1(radius=1)
for _ in range(100):
x = np.random.uniform(low=-1, high=1, size=n)
x /= sum(abs(x))
x *= 1 - 1e-10
x_star = ball1.project(x)
self.assertLessEqual(np.linalg.norm(x - x_star, np.inf), 1e-10)
def test_ball1_project_random_points(self):
n = 5
for _ in range(5000):
r = np.random.uniform(low=1e-3, high=10)
ball1 = og.constraints.Ball1(
radius=r)
x = np.random.uniform(low=-50, high=50, size=n)
x_star = ball1.project(x)
# Check whether the projection is inside the set
self.assertLessEqual(np.linalg.norm(x_star, 1), r * (1 + 1e-10))
# Check the optimality conditions
for j in range(n):
e = np.zeros((n,))
e[j] = r
self.assertLessEqual(
np.dot(e-x_star, x-x_star), 1e-10, "Ball1 optimality conditions failed (1)")
e[j] = -r
self.assertLessEqual(
np.dot(e-x_star, x-x_star), 1e-10, "Ball1 optimality conditions failed (2)")
def test_ball1_project_random_points_center(self):
n = 5
for _ in range(5000):
xc = np.random.uniform(low=-20, high=20, size=n)
r = np.random.uniform(low=1e-3, high=10)
ball1 = og.constraints.Ball1(xc, r)
x = np.random.uniform(low=-50, high=50, size=n)
x_star = ball1.project(x)
# Check whether the projection is inside the set
self.assertLessEqual(np.linalg.norm(x_star - xc, 1),
r * (1 + 1e-10))
# Check the optimality conditions
for j in range(n):
e = ball1.center.copy()
e[j] += r
self.assertLessEqual(
|
np.dot(e-x_star, x-x_star)
|
numpy.dot
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Create the echocardiography dataset.
"""
# Imports
import os
import glob
import urllib
import tarfile
import numpy as np
import skimage.io as skio
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
class EchocardiographyDataset(Dataset):
""" Create the echocardiography dataset.
"""
url = ("https://deepimaging2019.sciencesconf.org/data/pages/"
"ge_insa_lyon_datasets_camus_dataset_2.tar")
label_map = {"ES": 0, "ED": 1}
def __init__(self, root, train=True, transform=None, seed=None):
""" Init class.
Parameters
----------
root: str
root directory of dataset where the data will be saved.
train: str, default True
specifies training or test dataset.
transform: callable, default None
optional transform to be applied on a sample.
"""
super(EchocardiographyDataset).__init__()
self.root = root
self.train = train
self.transform = transform
self.height, self.width = (256, 256)
self.data_file = os.path.join(root, "echocardiography.npz")
self.download()
self.data = np.load(self.data_file, mmap_mode="r")
splitdata = train_test_split(
self.data["image"], self.data["segmentation"], self.data["name"],
self.data["label"], test_size=0.25, shuffle=True,
random_state=seed)
if train:
self.X = splitdata[::2]
else:
self.X = splitdata[1::2]
def download(self):
""" Download data.
"""
if not os.path.isfile(self.data_file):
# Download & extract data
print("Downloading {0}.".format(self.url))
tarball = os.path.join(self.root, "echocardiography.tar")
urllib.request.urlretrieve(self.url, tarball)
downloaddir = tarball.replace(".tar", "")
tar = tarfile.open(tarball)
tar.extractall(path=downloaddir)
tar.close()
# Parse data
files = glob.glob(os.path.join(downloaddir, "images", "*.png"))
nb_files = len(files)
data = []
masks = []
metadata = dict((key, []) for key in ("name", "label"))
for path in files:
basename = os.path.basename(path)
im = skio.imread(path)
im = im / np.max(im)
mask_path = os.path.join(downloaddir, "masks", basename)
mask = skio.imread(mask_path)
mask = mask.astype(np.single)
mask = (mask / 255. * 3.).astype(int)
mask = self.to_categorical(y=mask, num_classes=4)
data.append(im)
masks.append(mask)
basename = basename.replace(".png", "")
metadata["name"].append(basename)
metadata["label"].append(basename[-2:])
data = np.expand_dims(data, axis=1)
dataset = {
"image": np.asarray(data).astype("float32"),
"segmentation": np.asarray(masks).astype("float32"),
"name": np.array(metadata["name"]),
"label":
|
np.array(metadata["label"])
|
numpy.array
|
import os
import numpy as np
from sklearn.metrics import roc_auc_score
from pynet.history import History
from pynet.plotting.image import plot_data_reduced
from pynet.utils import get_pickle_obj
import matplotlib.pyplot as plt
import seaborn
seaborn.set_style("darkgrid")
## Multi-modality Integration with Missing Modality at training time
## Plotting of performances during contrastive learning with dMRI + sMRI
# n="10K"
# baseline_n = [get_pickle_obj("/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/"
# "DenseNet/N_%s/Age/old_training/Test_DenseNet_SCZ_VIP_block4_fold%s_epoch149.pkl"%(n,f))
# for f in range(1)]
# Age Pretraining:
# SCZ Results:
# For N=10K, AUC = 0.745 +/- 0.016; Bacc = 0.69 +/- 0.016
# For N=1600: AUC = 0.745 +/- 0.010 ; BAcc = 0.69 +/- 0.015
# For N=500: AUC = 0.74 +/- 0.020 ; BAcc = 0.68 +/- 0.016
# Bipolar Results:
# For N=10K, AUC = 0.63 +/- 0.02; Bacc = 0.60 +/- 0.014
# For N=1600: AUC = 0.64 +/- 0.005; Bacc = 0.59 +/- 0.012
# For N=500: AUC = 0.61 +/- 0.015; BAcc = 0.58 +/- 0.020
net = "DenseNet"
root_ = os.path.join('/neurospin/psy_sbox/bd261576/checkpoints/self_supervision/multimodal_simCLR/{net}/N_%s/'
'missing_modality/diffusion/ratio_0.6'.format(net=net))
augmentations = ['cutout_crop_flip_blur_noise/CMCNTXEnLoss/alpha_1/']
augmentation_names = ['All TF']
hyperparams = ['']
dbs = ['SCZ_VIP', 'BIOBD', 'ADNI']#, 'HCP_IXI_age', 'HCP_IXI_sex']
prediction_tasks = ['SCZ vs CTL', 'Bipolar vs CTL', 'AD vs CTL']
epochs = [299, 299, 299, 299, 299, 299]
all_N = [1600]#[500, 1600, '10K']
nb_folds = 1
tested_epochs = list(range(20, 300, 20)) #list(range(10, 300, 10)) + [299] # list(range(10, 300, 10))+[299]
## T1 Baselines
baseline = {'SCZ_VIP': {'b_acc': 0.72, 'auc': 0.78},
'BIOBD': {'b_acc': 0.63, 'auc': 0.68},
'ADNI': {'b_acc': 0.879, 'auc': 0.965}
}
baseline_age = {
'10K': {'SCZ_VIP': {'b_acc': 0.69, 'auc': 0.745},
'BIOBD': {'b_acc': 0.60, 'auc': 0.63},
'ADNI': {'b_acc': 0.805, 'auc': 0.903},
'HCP_IXI_age': {'mae': 4.02, 'r': 0.93}},
1600: {'SCZ_VIP': {'b_acc': 0.69, 'auc': 0.745},
'BIOBD': {'b_acc': 0.59, 'auc': 0.64},
'ADNI': {'b_acc': 0.799, 'auc': 0.896},
'HCP_IXI_age': {'mae': 5.65, 'r': 0.86}},
500: {'SCZ_VIP': {'b_acc': 0.68, 'auc': 0.74},
'BIOBD': {'b_acc': 0.58, 'auc': 0.61},
'ADNI': {'b_acc': 0.819, 'auc': 0.913},
'HCP_IXI_age': {'mae': 6.02, 'r': 0.83}}
}
baseline_sex = {
'10K': {'SCZ_VIP': {'b_acc': 0.65, 'auc': 0.72},
'BIOBD': {'b_acc': 0.55, 'auc': 0.57},
'ADNI': {'b_acc': 0.903, 'auc': 0.976},
'HCP_IXI_sex': {'b_acc': 0.92, 'auc': 0.98}},
1600: {'SCZ_VIP': {'b_acc': 0.65, 'auc': 0.70},
'BIOBD': {'b_acc': 0.55, 'auc': 0.58},
'ADNI': {'b_acc': 0.812, 'auc': 0.871},
'HCP_IXI_sex': {'b_acc': 0.88, 'auc': 0.95}},
500: {'SCZ_VIP': {'b_acc': 0.68, 'auc': 0.74},
'BIOBD': {'b_acc': 0.59, 'auc': 0.62},
'HCP_IXI_sex': {'b_acc': 0.82, 'auc': 0.91},
'ADNI': {'b_acc': 0.868, 'auc': 0.937}}
}
results = {aug: dict() for aug in augmentations}
results_batch_size = {aug: dict() for aug in augmentations}
res = {N: {aug + hyper: {db: [[get_pickle_obj(os.path.join(root_ % N, aug, "Test_%s_%s_%sblock4_fold%i_epoch%s.pkl" %
(net, db if (N != '10K' or db != 'HCP_IXI')
else 'Big_Healthy',hyper, f, e)))
for f in range(nb_folds)]
for e in tested_epochs]
for db in dbs
} for (aug, hyper) in zip(augmentations, hyperparams)}
for N in all_N}
metric = {'SCZ_VIP': roc_auc_score, 'BIOBD': roc_auc_score, 'ADNI': roc_auc_score,
'HCP_IXI_age': lambda y_true, y: np.mean(np.abs(y.ravel()-y_true.ravel())),
'HCP_IXI_sex': roc_auc_score
}
res_metric = {N: {aug+hyper: {db: [[metric[db](res[N][aug+hyper][db][e][f]['y_true'],
res[N][aug+hyper][db][e][f]['y'] if db=='HCP_IXI_age' else
res[N][aug+hyper][db][e][f]['y'][:, 1]) for f in range(nb_folds)]
for e in range(len(tested_epochs))]
for db in dbs}
for aug,hyper in zip(augmentations, hyperparams)}
for N in all_N}
for N in all_N:
fig, big_axes = plt.subplots(len(augmentations), 1, figsize=(5*len(dbs), 5*len(augmentations)), sharey='col', squeeze=False)
for row, (big_ax, aug_name) in enumerate(zip(big_axes[:,0], augmentation_names), start=1):
big_ax.set_title(aug_name, fontweight='bold', fontsize=16)
big_ax.axis('off')
big_ax._frameon = False
big_ax.title.set_position([.5, 1.08])
for k, (aug, hyper) in enumerate(zip(augmentations, hyperparams)):
for i, (db, task) in enumerate(zip(dbs, prediction_tasks)):
ax = fig.add_subplot(len(augmentations), len(dbs), k * len(dbs) + i + 1)
seaborn.lineplot(x=[e for e in tested_epochs for f in range(nb_folds)],
y=[res_metric[N][aug + hyper][db][e][f] for e in range(len(tested_epochs))
for f in range(nb_folds)],
marker='o', ax=ax)
ax.set_title('%s ($N_{pretrained}=%s$)' % (task, N))
ax.set_xlabel('Contrastive training epochs')
if db == "HCP_IXI_age":
ax.set_ylabel('MAE')
ax.axhline(baseline_age[N][db]['mae'], color='red', linestyle='dotted',
label="Standard Age Pretraining")
elif db == "HCP_IXI_sex":
ax.set_ylabel('AUC')
ax.axhline(baseline_sex[N][db]['auc'], color='orange', linestyle='dotted',
label="Standard Sex Pretraining")
else:
ax.set_ylabel('AUC')
ax.axhline(baseline[db]['auc'], color='gray', linestyle='dotted', label="Supervised on %s"%db)
ax.axhline(baseline_age[N][db]['auc'], color='red', linestyle='dotted',
label="Standard Age Pretraining")
ax.axhline(baseline_sex[N][db]['auc'], color='orange', linestyle='dotted',
label="Standard Sex Pretraining")
ax.legend()
fig.tight_layout(pad=1)
## Performance on downstream tasks when N_fine_tuned varies (nb of training samples for the fine-tuning)
## Pre-training: Age Pretraining or Contrastive Learning w/ Multi-Modality (dMRI + sMRI) or w/o Multi-Modality
seaborn.set_style('darkgrid')
N_pretraining = 1600
root = "/neurospin/psy_sbox/bd261576/checkpoints/"
# 1) Evaluation on sMRI with SCZ_VIP, BIOBD, ADNI
# N_finetuning = [[100, 300, 500], [100, 300, 500], [100, 200, 300]]
# pretraining_paths = [
# # Age-Aware Contrastive
# #"self_supervision/simCLR/DenseNet/N_{n}/exp_3/age_implicit_supervision/cutout_DA/sigma_5",
# # Contrastive learning w/ Multi-Modality (MultiModalLoss)
# "self_supervision/multimodal_simCLR/DenseNet/N_{n}/missing_modality/diffusion/ratio_0.6/cutout_crop_flip_blur_noise/MultiModalNTXEnLoss",
# # Contrastive Learning w/ Multi-Modality (CMCLoss)
# "self_supervision/multimodal_simCLR/DenseNet/N_{n}/missing_modality/diffusion/ratio_0.6/cutout_crop_flip_blur_noise/CMCNTXEnLoss/alpha_1",
# # Classical Contrastive learning (w/o Multi-Modality)
# "self_supervision/simCLR/DenseNet/N_{n}/exp_3/age_implicit_supervision/cutout_crop_flip_blur_noise/sigma_0",
# # Age Pretraining
# "regression_age_sex/Benchmark_IXI_HCP/DenseNet/N_{n}/Age",
# # Supervision from scratch SCZ vs CTL/BIP vs CTL/AD vs CTL
# "regression_age_sex/Benchmark_IXI_HCP/DenseNet/N_{n_finetune}/{pb}"
# ]
# nb_folds = [5, 5, 5, 5, 5]
# hyperparams = ['', '', '', '', '_Dx']
# blocks = ['_block4', '_block4', '_block4', '_block4', '']
# dbs = ['SCZ_VIP', 'BIOBD', 'ADNI']
# pb_names = ["SCZ vs CTL", "BIPOLAR vs CTL", "AD vs CTL"]
# pbs = ['Dx', 'Bipolar', 'Alzheimer']
# nets = ['DenseNet', 'DenseNet', 'DenseNet', 'DenseNet', 'DenseNet']
# exp_names = ["sMRI+dMRI $\\mathcal{L}_{MI}$", "sMRI+dMRI $\\mathcal{L}_{CMC}$",
# "sMRI SimCLR", "Age Supervision", "Fully Supervised"]
# all_epochs = [[399, 399, 399], [280, 280, 280], [49, 49, 49], [299, 299, 299], [299, 299, 99]] # N_exp x N_pb
# 2) Evaluation on dMRI with BIOBD only and CV (no independent test set)
N_finetuning = [[100, 300, 500], [100, 300, 500], [100, 200, 300]]
pretraining_paths = [
# Contrastive learning w/ Multi-Modality (MultiModalLoss)
"self_supervision/multimodal_simCLR/DenseNet/N_{n}/missing_modality/diffusion/ratio_0.6/cutout_crop_flip_blur_noise/MultiModalNTXEnLoss",
# Contrastive Learning w/ Multi-Modality (CMCLoss)
"self_supervision/multimodal_simCLR/DenseNet/N_{n}/missing_modality/diffusion/ratio_0.6/cutout_crop_flip_blur_noise/CMCNTXEnLoss/alpha_1",
# Classical Contrastive learning (w/o Multi-Modality)
"self_supervision/multimodal_simCLR/DenseNet/N_1600/diffusion/cutout_crop_flip_blur_noise",
# Age Pretraining
#"regression_age_sex/Benchmark_IXI_HCP/DenseNet/N_{n}/Age",
# Supervision from scratch SCZ vs CTL/BIP vs CTL/AD vs CTL
"regression_age_sex/Benchmark_IXI_HCP/DenseNet/N_{n_finetune}/{pb}"
]
nb_folds = [5, 5, 5, 5, 5]
hyperparams = ['', '', '', '', '_Dx']
blocks = ['_block4', '_block4', '_block4', '_block4', '']
dbs = ['SCZ_VIP', 'BIOBD', 'ADNI']
pb_names = ["SCZ vs CTL", "BIPOLAR vs CTL", "AD vs CTL"]
pbs = ['Dx', 'Bipolar', 'Alzheimer']
nets = ['DenseNet', 'DenseNet', 'DenseNet', 'DenseNet', 'DenseNet']
exp_names = ["sMRI+dMRI $\\mathcal{L}_{MI}$", "sMRI+dMRI $\\mathcal{L}_{CMC}$",
"sMRI SimCLR", "Age Supervision", "Fully Supervised"]
all_epochs = [[399, 399, 399], [280, 280, 280], [49, 49, 49], [299, 299, 299], [299, 299, 99]] # N_exp x N_pb
## Common part
CV = False
patterns_to_del=['validation_', ' on validation set']
results = {db: {name: {} for name in exp_names} for db in dbs}
fig, axes = plt.subplots(1, len(pbs), figsize=(len(pbs)*5, 5))
for i, (pb, db, pb_name) in enumerate(zip(pbs, dbs, pb_names)):
for (name, net, path, epochs, folds, hyper, b) in zip(exp_names, nets, pretraining_paths, all_epochs, nb_folds, hyperparams, blocks):
e = epochs[i]
for n_finetuning in N_finetuning[i]:
if name != "Fully Supervised":
if CV:
filename = "Test_{net}_{db}{hyper}_N{n_finetuning}_CV{block}_ModelFold0_fold{f}_epoch{e}.pkl"
else:
filename = "Test_{net}_{db}{hyper}_N{n_finetuning}{block}_ModelFold0_fold{f}_epoch{e}.pkl"
results[db][name][n_finetuning] = [get_pickle_obj(
os.path.join(root, path.format(n=N_pretraining, n_finetune=n_finetuning, pb=pb), filename.
format(net=net, db=db, pb=pb, hyper=hyper, n_finetuning=n_finetuning, block=b, f=fold,
e=e)))
for fold in range(folds)]
else:
if CV:
if pb == 'Alzheimer':
filename = "Validation_DenseNet_{pb}_{db}_CV_%i_epoch_{e}.pkl"
else:
filename = "Validation_DenseNet_{pb}_{db}_%i_epoch_{e}.pkl"
results[db][name][n_finetuning] = History.load(
os.path.join(root, path.format(n=N_pretraining,
n_finetune=n_finetuning, pb=pb), filename.
format(db=db, pb=pb, e=e)), folds=list(range(folds))). \
to_dict(patterns_to_del=patterns_to_del)
else:
filename = "Test_DenseNet_{pb}_{db}_fold{f}_epoch{e}.pkl"
results[db][name][n_finetuning] = [get_pickle_obj(
os.path.join(root, path.format(n=N_pretraining, n_finetune=n_finetuning, pb=pb), filename.
format(db=db, pb=pb, f=fold, e=e)))
for fold in range(folds)]
if name != "Fully Supervised":
ax = seaborn.lineplot(x=[n for n in N_finetuning[i] for _ in range(folds)],
y=[roc_auc_score(results[db][name][n][f]['y_true'],
results[db][name][n][f]['y'][:, 1])
for n in N_finetuning[i] for f in range(folds)],
ax=axes[i],
marker='o',
err_style='bars', err_kws={'capsize': 8},
label=name)
else:
X = [[n for n in N_finetuning[i]] for _ in range(folds)]
y = [[results[db][name][n]['roc_auc'][f][-1] if CV
else roc_auc_score(results[db][name][n][f]['y_true'],
results[db][name][n][f]['y_pred'])
for n in N_finetuning[i]] for f in range(folds)]
axes[i].errorbar(np.mean(X, axis=0), np.mean(y, axis=0), yerr=
|
np.std(y, axis=0)
|
numpy.std
|
import logging
import random
import string
import warnings
from collections import Counter, defaultdict
from time import sleep
from typing import (Any, Callable, Dict, Hashable, Iterable, List, Optional,
Tuple, Union)
import numpy as np
import torch
from probings.base import Metrics, ProbingDataset, ProbingModel, ProbingTask, ProbingNgramDataset
from sklearn.linear_model import \
SGDClassifier # LogisticRegressionCV, RidgeClassifierCV,
from sklearn.metrics import accuracy_score, balanced_accuracy_score
from sklearn.model_selection import GridSearchCV, train_test_split
# from sklearn.neural_network import MLPClassifier
from probings.mlp_utils import MLPClassifier
from probings.tfidf_utils import TfIdfClassifier
from utils.sample import Sample
log = logging.Logger("token_classification", level=logging.INFO)
class LowerBoundModel:
def __init__(self, bpe2most_common: Dict[str, Any], global_mostcommon: dict):
self.bpe2most_common = bpe2most_common
self.global_mostcommon = global_mostcommon
def __call__(self, bpe: str) -> Any:
if bpe in self.bpe2most_common:
return self.bpe2most_common[bpe]
else:
return self.global_mostcommon # handle OOV bpe token
class LowerBoundNgrammModel:
def __init__(self, bpe2most_common: Dict[str, Any], global_mostcommon: dict, n: int):
self.bpe2most_common = bpe2most_common
self.global_mostcommon = global_mostcommon
self.n = n
def __call__(self, data: ProbingNgramDataset) -> List[Any]:
preds = []
context_str = data.context_str(n=self.n)
for context in context_str:
if context in self.bpe2most_common:
preds.append(self.bpe2most_common[context])
else:
preds.append(self.global_mostcommon) # handle OOV bpe token
return preds
class TokenClassificationLinearModel(ProbingModel):
def __init__(self):
pass
def train_linear_model(self, X_train, y_train):
# model = LogisticRegressionCV(multi_class="multinomial", max_iter=50)
params = {"alpha": [0.0001, 0.001, 0.01, 0.1]}
model = SGDClassifier(loss="log", verbose=0, tol=0.0001)
grid = GridSearchCV(model, param_grid=params, verbose=3)
grid.fit(np.array(X_train), np.array(y_train))
return grid
def predict_linear_model(self, model, X_train):
return model.predict(np.array(X_train))
def train_upper_bound_model(self, X_train, y_train):
# model = MLPClassifier(hidden_layer_sizes=(100, 100, 100), verbose=True)
model = MLPClassifier()
model.fit(np.array(X_train), np.array(y_train))
return model
def predict_upper_bound_model(self, model, X_train):
return model.predict(np.array(X_train))
def eval_prediction(self, y_true: Any, y_pred: Any) -> Metrics:
return {
"1 - balanced_adj_acc": 1
- balanced_accuracy_score(y_true, y_pred, adjusted=True),
"1 - acc": 1 - accuracy_score(y_true, y_pred),
}
def train_lower_bound(
self, samples: List[str], y_train: List[Any]
) -> Callable[[str], Any]:
samples = ["" for _ in range(len(samples))]
assert len(samples) == len(y_train)
bpe2labels: Dict[str, Counter] = defaultdict(Counter)
for bpe, label in zip(samples, y_train):
bpe2labels[bpe].update([label])
global_mostcommon = Counter(y_train).most_common(1)[0][0]
bpe2most_common = {
bpe: counter.most_common(1)[0][0] for bpe, counter in bpe2labels.items()
}
model = LowerBoundModel(bpe2most_common, global_mostcommon)
print(bpe2most_common)
return model
def train_bag_of_words(
self, samples: List[Sample], y_train: List[Any], sample2hashes: Callable[[Sample], List[Hashable]]
) -> Any:
model = TfIdfClassifier()
X_train = [sample2hashes(sample) for sample in samples]
model.fit(X_train, y_train)
return model
def predict_bag_of_words(
self, model: Any, samples: List[Sample], sample2hashes: Callable[[Sample], List[Hashable]]
) -> Any:
X_test = [sample2hashes(sample) for sample in samples]
return model.predict(X_test)
def train_n_gramm_lower_bound(
self, data: ProbingNgramDataset, n=3
) -> Callable[[ProbingNgramDataset], Any]:
bpe2labels: Dict[str, Counter] = defaultdict(Counter)
context_str = data.context_str(n=n)
assert len(context_str) == len(data.y)
for context_str, label in zip(context_str, data.y):
bpe2labels[context_str].update([label])
global_mostcommon = Counter(data.y).most_common(1)[0][0]
bpe2most_common = {
bpe: counter.most_common(1)[0][0] for bpe, counter in bpe2labels.items()
}
model = LowerBoundNgrammModel(bpe2most_common, global_mostcommon, n=n)
return model
class NgramDataset(ProbingNgramDataset):
def __init__(self,
bpes: List[List[str]],
samples: List[Tuple[int, int]],
y: np.ndarray):
assert len(samples) == len(y)
self._bpes = bpes
self._y = y
self._samples = samples
@property
def bpes(self) -> List[List[str]]:
return self._bpes
@property
def positions(self) -> List[Tuple[int, int]]:
return self._samples
@property
def y(self) -> Any:
return self._y
class Dataset(ProbingDataset):
def __init__(
self,
X: Dict[int, Any],
y: np.ndarray,
samples: List[str],
node_types: List[str] = None,
all_bpes: List[List[str]] = None,
all_node_types: List[List[str]] = None,
token2id: List[Tuple[int, int]] = None,
):
self._X = X
self._y = y
self._samples = samples # list of bpe tokens
# additional information
self.node_types = node_types
self.all_bpes = all_bpes
self.all_node_types = all_node_types
self.token2id = token2id
@property
def X_by_layer(self) -> Dict[int, Any]:
return self._X
@property
def y(self) -> Any:
return self._y
@property
def samples(self) -> List[str]:
return self._samples
class TokensClassificationProbingTask(ProbingTask):
def __init__(
self,
name,
get_target: Callable[[Sample], List[Hashable]],
code_aug_type="identity",
description="",
max_classes=15,
skip_label: Optional[Any] = None,
):
self.name = name
self._get_target = get_target
self.skip_label = skip_label
self.description = description.strip()
self.code_aug_type = code_aug_type
self.r2id: Optional[dict] = None
self.max_classes = max_classes
def get_name(self) -> str:
return self.name
def get_description(self) -> str:
return self.description
def get_augmentation(self) -> str:
return self.code_aug_type # can be changed
def get_sample2hashable(self):
# return self._sample2hashable(sample)
return lambda sample: sample.bpe
def make_dict(
self, iterator: Iterable[Sample], max_classes, min_samples_per_class=30
) -> Dict[Hashable, int]:
"""preprocess a dict to enumerate classes
Args:
iterator (Iterable[Sample]): data samples
max_classes ([type]): how many classes to preserve for classification
min_samples_per_class (int, optional): If the class is less than min_samples_per_class, skip this class. Defaults to 30.
Returns:
Dict[Hashable, int]: a dictionary class -> label
"""
# Hashable -> idx
logging.info("make dict")
targets = [elem for sample in iterator for elem in self._get_target(sample)]
if self.skip_label is not None:
to_skip = self.skip_label
if not isinstance(to_skip, list):
to_skip = [self.skip_label]
for skip in to_skip:
warnings.warn(f"skip labels: {skip}")
targets = list(filter(lambda x: x != skip, targets))
r2id_counter = Counter(targets)
logging.info(f"classes: {r2id_counter.most_common(n=max_classes)}")
n_classes = min(
max_classes, len(r2id_counter.values())
) # all other are mapped to OOV
logging.info(f"max classes: {n_classes}")
r2id = {}
total = 0
for i, elem in enumerate(r2id_counter.items()):
total += elem[1]
logging.info(f"total={total}")
skipped = []
class_index = 0
for i, elem in enumerate(r2id_counter.items()):
if elem[1] < min_samples_per_class:
skipped.append(elem)
r2id[elem[0]] = n_classes
continue
else:
logging.info(f"added: {elem}")
r2id[elem[0]] = class_index
class_index += 1
if len(skipped) > 0:
warnings.warn(
f"skipped classes by min_fraction_per_class: {len(skipped)}: {skipped[0]}.."
)
return r2id
def get_target(self, x: List[Sample]) -> List[Hashable]:
return self._get_target(x)
def get_probing_model(self) -> TokenClassificationLinearModel:
return TokenClassificationLinearModel()
def _map_target(self, targets: List[Hashable]) -> List[int]:
"""get target for each token
Args:
targets (List[Hashable]): list of targets
Raises:
ValueError: if dict is not initialized
Returns:
List[int]: target for each token
"""
if self.r2id is None:
raise ValueError("r21d (dict relation 2 idx) is not initialized")
def get(dct, elem):
if not elem in dct:
# warnings.warn(f"elem {elem} not in r2id")
return self.max_classes
else:
return dct[elem]
return [get(self.r2id, elem) for elem in targets]
def _make_ngram_dataset(self, train_data: List[Sample], test_data: List[Sample], layers: List[int]
) -> Tuple[ProbingNgramDataset, ProbingNgramDataset]:
logging.info("make dataset")
self.r2id = self.make_dict(
train_data, self.max_classes
) # preprocess targets to enumerate them
def do(data: List[Sample], layers: List[int]) -> Dataset:
y_list: List[Union[float, int]] = []
samples: List[int] = []
bpes: List[List[str]] = []
for id_, elem in enumerate(data):
targets = self._get_target(elem)
y_list.extend(self._map_target(targets))
bpe = elem.bpe.split(" ")
bpes.append(bpe)
samples.extend([(id_, j) for j in range(len(bpe))])
y_numpy = np.array(y_list)
samples_numpy = np.array(samples)
where = y_numpy < self.max_classes # filter OOV
y_numpy = y_numpy[where]
samples_numpy = samples_numpy[where]
logging.info(f"remained y_numpy: {str(np.unique(y_numpy))}")
sleep(2)
logging.info(f"class distribution: {Counter(y_numpy).most_common()}")
return NgramDataset(
bpes=bpes, samples=samples_numpy, y=y_numpy
)
return do(train_data, layers), do(test_data, layers)
def _make_dataset(
self, train_data: List[Sample], test_data: List[Sample], layers: List[int]
) -> Tuple[ProbingDataset, ProbingDataset]:
logging.info("make dataset")
self.r2id = self.make_dict(
train_data, self.max_classes
) # preprocess targets to enumerate them
def do(data: List[Sample], layers: List[int]) -> Dataset:
X_by_layer = defaultdict(list)
y_list: List[Union[float, int]] = []
samples: List[str] = []
node_types: List[str] = []
token2id = []
all_bpes = []
all_node_types = []
for id_, elem in enumerate(data):
features = elem.features(handle="none")
for layer in layers:
assert len(features) > layer
X_by_layer[layer].extend(features[layer].numpy())
targets = self._get_target(elem)
y_list.extend(self._map_target(targets))
samples.extend(elem.bpe.split(" "))
node_types.extend([node.node.type for node in elem.nodes])
token2id.extend([(id_, j) for j in range(len(elem.nodes))])
all_bpes.append(elem.bpe.split(" "))
all_node_types.append([x.node.type for x in elem.nodes])
y_numpy =
|
np.array(y_list)
|
numpy.array
|
import numpy as np
import torch as t
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .decoder import Decoder
from .decoder_gru import DecoderGRU
from .decoder_lstm import DecoderLSTM
from .encoder import Encoder
from selfModules.embedding import Embedding
from selfModules.perplexity import Perplexity
from utils.functional import kld_coef, parameters_allocation_check, fold
class RVAE_dilated(nn.Module):
def __init__(self, params, prefix=''):
super(RVAE_dilated, self).__init__()
self.params = params
self.embedding = Embedding(self.params, '', prefix)
self.encoder = Encoder(self.params)
self.context_to_mu = nn.Linear(self.params.encoder_rnn_size * 2, self.params.latent_variable_size)
self.context_to_logvar = nn.Linear(self.params.encoder_rnn_size * 2, self.params.latent_variable_size)
if self.params.decoder_type == 'gru' or self.params.decoder_type == 'gru_emb':
self.decoder = DecoderGRU(self.params)
elif self.params.decoder_type == 'lstm':
self.decoder = DecoderLSTM(self.params)
elif self.params.decoder_type == 'dilation':
self.decoder = Decoder(self.params)
params_size = 0
params_num = 0
for p in self.parameters():
param_size = 1
for s in p.size():
param_size = param_size * s
if p.requires_grad: params_size = params_size + param_size
if p.requires_grad: params_num = params_num + 1
#if p.requires_grad: print('Grad Param', type(p.data), p.size())
print('RVAE parameters num[%s] size[%s]'%(params_num, params_size))
def forward(self, drop_prob,
encoder_word_input=None, encoder_character_input=None,
decoder_word_input=None,
z=None, initial_state=None):
"""
:param encoder_word_input: An tensor with shape of [batch_size, seq_len] of Long type
:param encoder_character_input: An tensor with shape of [batch_size, seq_len, max_word_len] of Long type
:param decoder_word_input: An tensor with shape of [batch_size, max_seq_len + 1] of Long type
:param drop_prob: probability of an element of decoder input to be zeroed in sense of dropout
:param z: context if sampling is performing
:return: unnormalized logits of sentence words distribution probabilities
with shape of [batch_size, seq_len, word_vocab_size]
kld loss estimation
"""
assert parameters_allocation_check(self), \
'Invalid CUDA options. Parameters should be allocated in the same memory'
use_cuda = self.embedding.word_embed.weight.is_cuda
if not self.params.word_is_char:
assert z is None and fold(lambda acc, parameter: acc and parameter is not None,
[encoder_word_input, encoder_character_input, decoder_word_input],
True) \
or (z is not None and decoder_word_input is not None), \
"Invalid input. If z is None then encoder and decoder inputs should be passed as arguments"
if z is None:
''' Get context from encoder and sample z ~ N(mu, std)
'''
[batch_size, _] = encoder_word_input.size()
encoder_input = self.embedding(encoder_word_input, encoder_character_input)
context = self.encoder(encoder_input)
mu = self.context_to_mu(context)
logvar = self.context_to_logvar(context)
std = t.exp(0.5 * logvar)
z = Variable(t.randn([batch_size, self.params.latent_variable_size]))
if use_cuda:
z = z.cuda()
z = z * std + mu
kld = (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean().squeeze()
else:
kld = None
decoder_input = self.embedding.word_embed(decoder_word_input)
logits_out, final_state = self.decoder(decoder_input, z, drop_prob, initial_state)
return logits_out, kld, z, final_state
def learnable_parameters(self):
# word_embedding is constant parameter thus it must be dropped from list of parameters for optimizer
return [p for p in self.parameters() if p.requires_grad]
def trainer(self, optimizer, batch_loader):
perplexity = Perplexity()
def train(i, batch_size, use_cuda, dropout):
input = batch_loader.next_batch(batch_size, 'train')
input = [(Variable(t.from_numpy(var)) if var is not None else None) for var in input]
input = [(var.long() if var is not None else None) for var in input]
input = [(var.cuda() if var is not None and use_cuda else var) for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, _, target] = input
logits_out, kld, _, _ = self(dropout,
encoder_word_input, encoder_character_input,
decoder_word_input,
z=None, initial_state=None)
if self.params.decoder_type == 'dilation' or self.params.decoder_type == 'gru' or self.params.decoder_type == 'lstm':
logits = logits_out.view(-1, self.params.word_vocab_size)
target = target.view(-1)
cross_entropy = F.cross_entropy(logits, target)
# since cross enctropy is averaged over seq_len, it is necessary to approximate new kld
loss = 79 * cross_entropy + kld_coef(i) * kld
logits = logits.view(batch_size, -1, self.params.word_vocab_size)
target = target.view(batch_size, -1)
ppl = perplexity(logits, target).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return ppl, kld, None
elif self.params.decoder_type == 'gru_emb':
decoder_target = self.embedding(target, None)
error = t.pow(logits_out - decoder_target, 2).mean()
'''
loss is constructed fromaveraged over whole batches error
formed from squared error between output and target
and KL Divergence between p(z) and q(z|x)
'''
loss = 400 * error + kld_coef(i) * kld
optimizer.zero_grad()
loss.backward()
optimizer.step()
return error, kld, kld_coef(i)
return train
def validater(self, batch_loader):
perplexity = Perplexity()
def validate(batch_size, use_cuda):
input = batch_loader.next_batch(batch_size, 'valid')
input = [Variable(t.from_numpy(var)) if var is not None else None for var in input]
input = [var.long() if var is not None else None for var in input]
input = [var.cuda() if use_cuda and var is not None else var for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, _, target] = input
logits_out, kld, _, _ = self(0.,
encoder_word_input, encoder_character_input,
decoder_word_input,
z=None, initial_state=None)
if self.params.decoder_type == 'dilation' or self.params.decoder_type == 'gru' or self.params.decoder_type == 'lstm':
ppl = perplexity(logits_out, target).mean()
return ppl, kld
elif self.params.decoder_type == 'gru_emb':
decoder_target = self.embedding(target, None)
error = t.pow(logits_out - decoder_target, 2).mean()
return error, kld
return validate
def style(self, batch_loader, seq, use_cuda, sample_size=30):
decoder_word_input_np, _ = batch_loader.go_input(1)
encoder_wids = []
for i in range(len(seq)):
word = seq[i]
wid = batch_loader.word_to_idx[word]
word = np.array([[wid]])
decoder_word_input_np = np.append(decoder_word_input_np, word, 1)
encoder_wids.append(wid)
encoder_wids = encoder_wids[::-1]
encoder_word_input_np = np.array([encoder_wids])
decoder_word_input = Variable(t.from_numpy(decoder_word_input_np)).long()
encoder_word_input = Variable(t.from_numpy(encoder_word_input_np)).long()
decoder_word_input = t.cat([decoder_word_input]*sample_size, 0)
encoder_word_input = t.cat([encoder_word_input]*sample_size, 0)
if use_cuda:
decoder_word_input = decoder_word_input.cuda()
encoder_word_input = encoder_word_input.cuda()
if self.params.word_is_char: #TODO only for chinese word right now
logits_out, kld, z, final_state = self(0.,
encoder_word_input, None,
decoder_word_input,
z=None, initial_state=None)
return z.data.cpu().numpy()
return None
def sample(self, batch_loader, seq_len, seeds, use_cuda, template=None, beam_size=50):
(z_num, _) = seeds.shape
print("z sample size", z_num, "beam size", beam_size)
beam_sent_wids, _ = batch_loader.go_input(1)
beam_sent_last_wid = beam_sent_wids[:,-1:]
results = []
end_token_id = batch_loader.word_to_idx[batch_loader.end_token]
initial_state = None
sentence = []
for i in range(seq_len):
beam_sent_num = len(beam_sent_wids)
if beam_sent_num == 0:
break
if len(results) >= beam_size:
break
if self.params.decoder_type == 'dilation' or not self.params.decoder_stateful:
beam_z_sent_wids = np.repeat(beam_sent_wids, [z_num], axis=0) if z_num > 1 else beam_sent_wids
elif self.params.decoder_type == 'gru' or self.params.decoder_type == 'lstm' or self.params.decoder_type == 'gru_emb':
beam_z_sent_wids = np.repeat(beam_sent_last_wid, [z_num], axis=0) if z_num > 1 else beam_sent_last_wid
decoder_word_input = Variable(t.from_numpy(beam_z_sent_wids).long())
decoder_word_input = decoder_word_input.cuda() if use_cuda else decoder_word_input
beam_seeds = Variable(t.from_numpy(seeds).float())
beam_seeds = t.cat([beam_seeds]*beam_sent_num, 0) if beam_sent_num > 1 else beam_seeds
beam_seeds = beam_seeds.cuda() if use_cuda else beam_seeds
if not self.params.decoder_stateful:
initial_state = None
elif initial_state is not None and z_num > 1:
initial_state = initial_state.view(-1, 1, self.params.decoder_rnn_size)
initial_state = initial_state.repeat(1, z_num, 1)
initial_state = initial_state.view(self.params.decoder_num_layers, -1, self.params.decoder_rnn_size)
beam_sent_logps = None
if template and len(template) > i and template[i] != '#':
beam_sent_wids = np.column_stack((beam_sent_wids, [batch_loader.word_to_idx[template[i]]]*beam_sent_num))
beam_sent_last_wid = beam_sent_wids[:,-1:]
else:
logits_out, _, _, initial_state = self(0., None, None,
decoder_word_input,
beam_seeds, initial_state)
if self.params.decoder_type == 'dilation' or self.params.decoder_type == 'gru' or self.params.decoder_type == 'lstm':
[b_z_n, sl, _] = logits_out.size()
logits = logits_out.view(-1, self.params.word_vocab_size)
prediction = F.softmax(logits)
prediction = prediction.view(beam_sent_num, z_num, sl, -1)
# take mean of sentence vocab probs for each beam group
beam_sent_vps = np.mean(prediction.data.cpu().numpy(), 1)
# get vocab probs of the sentence last word for each beam group
beam_last_vps = beam_sent_vps[:,-1]
beam_last_word_size = min(batch_loader.words_vocab_size, beam_size)
# choose last word candidate ids for each beam group
beam_choosed_wids = np.array([np.random.choice(range(batch_loader.words_vocab_size), beam_last_word_size, replace=False, p=last_vps.ravel()).tolist() for last_vps in beam_last_vps])
# print("candidate shape =", beam_choosed_wids.shape)
# dumplicate beam sentence word ids for choosed last word size
beam_sent_wids = np.repeat(beam_sent_wids, [beam_last_word_size], axis=0)
beam_sent_wids = np.column_stack((beam_sent_wids, beam_choosed_wids.reshape(-1)))
if not self.params.decoder_stateful:
initial_state = None
elif initial_state is not None:
initial_state = initial_state.view(-1, 1, self.params.decoder_rnn_size)
initial_state = initial_state.repeat(1, beam_last_word_size, 1)
initial_state = initial_state.view(self.params.decoder_num_layers, -1, self.params.decoder_rnn_size)
# get sentence word probs
beam_sent_wps = []
whole_or_last = 1 if self.params.decoder_type == 'dilation' or not self.params.decoder_stateful else (-1 if self.params.decoder_type == 'gru' or self.params.decoder_type == 'lstm' else 0)
for i, sent in enumerate(beam_sent_wids):
beam_sent_wps.append([])
for j, wid in enumerate(sent[whole_or_last:]):
beam_sent_wps[i].append(beam_sent_vps[i//beam_last_word_size][j][wid])
# desc sort sum of the beam sentence log probs
beam_sent_logps = np.sum(np.log(beam_sent_wps), axis=1)
beam_sent_ids = np.argsort(beam_sent_logps)[-(beam_size-len(results)):][::-1]
# get the top beam size sentences
beam_sent_wids = beam_sent_wids[beam_sent_ids]
beam_sent_logps = np.exp(beam_sent_logps[beam_sent_ids])
#print("candidate", "".join([batch_loader.idx_to_word[wid] for wid in beam_sent_wids[:,-1].reshape(-1)]))
if initial_state is not None and len(beam_sent_ids) > 0:
idx = Variable(t.from_numpy(beam_sent_ids.copy())).long()
initial_state = initial_state.index_select(1, idx)
elif self.params.decoder_type == 'gru_emb':
[b_z_n, sl, _] = logits_out.size()
#TODO
out = logits_out.view(-1, self.params.word_embed_size)
similarity = self.embedding.similarity(out)
similarity = similarity.data.cpu().numpy()
similarity = np.mean(similarity, 0)
similarity = similarity.view(beam_sent_num, z_num, sl, -1)
beam_last_word_size = min(batch_loader.words_vocab_size, beam_size)
# choose last word candidate ids for each beam group
beam_choosed_wids = np.array([np.random.choice(range(batch_loader.words_vocab_size), beam_last_word_size, replace=False, p=last_vps.ravel()).tolist() for last_vps in similarity])
idx = np.random.choice(range(batch_loader.words_vocab_size), replace=False, p=similarity.ravel())
if idx == end_token_id:
break
beam_sent_wids =
|
np.array([[idx]])
|
numpy.array
|
""" Policies to select unlabelled candidates among many active learning arms.
All policies on selecting the next unlabelled instance in the pool for labelling.
Module structure:
- Policy
- SingleSuggestion
- MultipleSuggestions
- ActiveBandit
- ThompsonSampling
- OCUCB
- KLUCB
- EXP3PP
- ActiveAggregator
"""
# Author: <NAME>
# License: BSD 3 clause
import numpy as np
from abc import ABC, abstractmethod
from numpy.random import RandomState
from mclearn.schulze import _aggregate_votes_schulze
__all__ = ['SingleSuggestion',
'ThompsonSampling',
'OCUCB',
'KLUCB',
'EXP3PP',
'ActiveAggregator',
'COMB']
class Policy(ABC):
""" Abstract base class for a policy.
This class cannot be used directly but instead serves as the base class for
all policies. Each policy needs to implement the `select` method, which
return the indices of the pool that we should query next for the label.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
"""
def __init__(self, pool, labels, classifier, random_state=None,
n_candidates=None, n_best_candidates=1):
self.pool = pool
self.labels = labels
self.classifier = classifier
self.n_candidates = n_candidates if n_candidates is not None else len(self.pool)
self.n_best_candidates = n_best_candidates
self.pool_size = len(self.pool)
if type(random_state) is RandomState:
self.seed = random_state
else:
self.seed = RandomState(random_state)
@abstractmethod
def select(self):
""" Needs to return an array of indices of objects from the pool. """
pass
def add(self, index, label):
""" Add a newly obtained label to the labelled pool and retrain.
Parameters
----------
index : int or array of ints
The index or indices of the object(s) that have just been labelled.
label : object or array of objects
The label(s) obtained from the oracle.
"""
self.labels[index] = label
train_idx = ~self.labels.mask
self.classifier.fit(self.pool[train_idx], self.labels[train_idx])
def receive_reward(self, reward):
""" Receive a reward from the environment and update the policy's parameters. """
pass
def history(self):
""" Return a dictionary containing the history of the policy. """
return {}
def _sample(self):
""" Take a random sample of candidates from the unlabelled pool. """
candidate_mask = self.labels.mask
if 0 < self.n_candidates < np.sum(candidate_mask):
unlabelled_index = np.where(candidate_mask)[0]
candidate_index = self.seed.choice(unlabelled_index, self.n_candidates, replace=False)
candidate_mask = np.zeros(self.pool_size, dtype=bool)
candidate_mask[candidate_index] = True
return candidate_mask
class SingleSuggestion(Policy):
""" Select candidates from the pool according to one particular active learning rule.
This class is a wrapper for a particular active learning rule.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
arm : Arm object
A particular active learning rule. The arm needs to implement the ``select``
method that returns an array of indices of objects from the pool for
labelling.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
"""
def __init__(self, pool, labels, classifier, arm, random_state=None,
n_candidates=None, n_best_candidates=1):
super().__init__(pool, labels, classifier, random_state,
n_candidates, n_best_candidates)
self.arm = arm
def select(self):
""" Use the initialised arm to choose the next candidates for labelling.
Returns
-------
best_candidates : array of ints
An array of indices of objects in the pool.
"""
candidate_mask = self._sample()
predictions = self.classifier.predict_proba(self.pool[candidate_mask])
best_candidates = self.arm.select(candidate_mask, predictions, self.n_best_candidates)
return best_candidates
class MultipleSuggestions(Policy):
""" Abstract base class for a policy that takes multiple active learning rules.
This class cannot be used directly but instead serves as the base class for
all policies that takes multiple active learning rules.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
arms : array of Arm objects
Each arm is a particular active learning rule. The arm needs to implement
the ``select`` method that returns an array of indices of objects from the
pool for labelling.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
"""
def __init__(self, pool, labels, classifier, arms, random_state=None,
n_candidates=None, n_best_candidates=1):
super().__init__(pool, labels, classifier, random_state,
n_candidates, n_best_candidates)
self.arms = arms
self.n_arms = len(arms)
class ActiveBandit(MultipleSuggestions):
""" Abstract base class for a bandit policy that takes multiple active learning rules.
This class cannot be used directly but instead serves as the base class for
all bandit policies that take as input multiple active learning rules.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
arms : array of Arm objects
Each arm is a particular active learning rule. The arm needs to implement
the ``select`` method that returns an array of indices of objects from the
pool for labelling.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
"""
def __init__(self, pool, labels, classifier, arms, random_state=None,
n_candidates=None, n_best_candidates=1):
super().__init__(pool, labels, classifier, arms, random_state,
n_candidates, n_best_candidates)
self.time_step = 0
self.T = np.zeros(self.n_arms)
self.mu = np.zeros(self.n_arms)
self.sum_mu = np.zeros(self.n_arms)
self.reward_history = []
self.T_history = [self.T.copy()]
self.mu_history = [self.mu.copy()]
def _select_from_arm(self):
""" Use a particular arm to select candidates from the pool. """
candidate_mask = self._sample()
predictions = self.classifier.predict_proba(self.pool[candidate_mask])
best_candidates = self.arms[self.selected_arm].select(
candidate_mask, predictions, self.n_best_candidates)
return best_candidates
def receive_reward(self, reward):
""" Receive a reward from the environment and updates the policy's prior beliefs.
Parameters
----------
reward : float
The reward from the environment should be a good proxy for the decrease
in the generalisation error of the classifier.
"""
# update empirical estimate of the reward and the times an arm is selected
self.sum_mu[self.selected_arm] += reward
self.T[self.selected_arm] += 1
self.mu[self.selected_arm] = self.sum_mu[self.selected_arm] / self.T[self.selected_arm]
# store results in history
self.mu_history.append(self.mu.copy())
self.T_history.append(self.T.copy())
self.reward_history.append(reward)
def history(self):
""" Return a dictionary containing the history of the policy.
Returns
-------
history : dict
The dictionary contains the following keys: mu, T, and reward.
The corresponding value of each key is an array containing the state
in each time step.
"""
history = {}
history['mu'] = np.array(self.mu_history)
history['T'] = np.array(self.T_history)
history['reward'] = np.array(self.reward_history)
return history
class BaselineCombiner(ActiveBandit):
""" Baseline bandit combiner.
At each step, we randomly select an arm, ignoring the rewards.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
arms : array of Arm objects
Each arm is a particular active learning rule. The arm needs to implement
the ``select`` method that returns an array of indices of objects from the
pool for labelling.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
"""
def select(self):
""" Select an arm at random.
Returns
-------
best_candidates : array of ints
An array of indices of objects in the pool.
"""
self.selected_arm = self.seed.choice(self.n_arms)
return self._select_from_arm()
class ThompsonSampling(ActiveBandit):
""" Thompson Sampling with normally distributed rewards.
This class cannot be used directly but instead serves as the base class for
all bandit policies that takes as input multiple active learning rules.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
arms : array of Arm objects
Each arm is a particular active learning rule. The arm needs to implement
the ``select`` method that returns an array of indices of objects from the
pool for labelling.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
mu : float, optional (default=0.5)
The initial estimate of the mean of the distribution of the mean reward
from all arms.
sigma : float, optional (default=0.02)
The initial estimate of the variance of the distribution of the mean reward
from all arms.
tau : float, optional (default=0.02)
The initial estimate of the variance of the reward received from all arms.
"""
def __init__(self, pool, labels, classifier, arms, random_state=None,
n_candidates=None, n_best_candidates=1, mu=0.5, sigma=0.02, tau=0.02):
super().__init__(pool, labels, classifier, arms, random_state,
n_candidates, n_best_candidates)
self.mu = np.full(self.n_arms, mu, dtype=np.float64)
self.sigma = np.full(self.n_arms, sigma, dtype=np.float64)
self.tau = np.full(self.n_arms, tau, dtype=np.float64)
self.mu_history = [self.mu.copy()]
self.sigma_history = [self.sigma.copy()]
self.tau_history = [self.tau.copy()]
def select(self):
""" Use Thompson sampling to choose the next candidates for labelling.
Returns
-------
best_candidates : array of ints
An array of indices of objects in the pool.
"""
# take a sample of rewards from the current prior of heuristics
sample_rewards = self.seed.normal(self.mu, self.sigma)
self.selected_arm = np.argmax(sample_rewards)
return self._select_from_arm()
def receive_reward(self, reward):
""" Receive a reward from the environment and updates the policy's prior beliefs.
Parameters
----------
reward : float
The reward from the environment should be a good proxy for the decrease
in the generalisation error of the classifier.
"""
mu = self.mu[self.selected_arm]
sigma = self.sigma[self.selected_arm]
tau = self.tau[self.selected_arm]
self.mu[self.selected_arm] = (mu * tau + reward * sigma) / (tau + sigma)
self.sigma[self.selected_arm] = (sigma * tau) / (tau + sigma)
self.T[self.selected_arm] += 1
# store results in history
self.mu_history.append(self.mu.copy())
self.sigma_history.append(self.sigma.copy())
self.tau_history.append(self.tau.copy())
self.T_history.append(self.T.copy())
self.reward_history.append(reward)
def history(self):
""" Return a dictionary containing the history of the policy.
Returns
-------
history : dict
The dictionary contains the following keys: mu, sigma, T, and reward.
The corresponding value of each key is an array containing the state
in each time step.
"""
history = {}
history['mu'] = np.array(self.mu_history)
history['sigma'] = np.array(self.sigma_history)
history['tau'] = np.array(self.tau_history)
history['T'] = np.array(self.T_history)
history['reward'] = np.array(self.reward_history)
return history
class OCUCB(ActiveBandit):
""" Optimally Confident UCB (OC-UCB) Policy, based on Lattimore (2015).
The OC-UCB algorithm is presented in the paper `Optimally Confident UCB
Improved Regret for Finite-Armed Bandits` by <NAME> in 2015.
The algorithm is based on UCB and contains two tunable variables,
``alpha`` and ``psi``.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
arms : array of Arm objects
Each arm is a particular active learning rule. The arm needs to implement
the ``select`` method that returns an array of indices of objects from the
pool for labelling.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
alpha : float, optional (default=3)
Lattimore (2015) found that alpha=3 leads to good results.
psi : float, optional (default=2)
Lattimore (2015) found that psi=2 leads to good results.
horizon : int
The OC-UCB algorithm requires the knowledge of the horizon, i.e.
the maximum number of time steps.
"""
def __init__(self, pool, labels, classifier, arms, random_state=None,
n_candidates=None, n_best_candidates=1, alpha=3, psi=2,
horizon=1000):
super().__init__(pool, labels, classifier, arms, random_state,
n_candidates, n_best_candidates)
self.alpha = 3
self.psi = 2
self.mu = np.zeros(self.n_arms)
self.sum_mu = np.zeros(self.n_arms)
self.mu_history = [self.mu.copy()]
self.horizon = horizon
def select(self):
""" Use the OC-UCB algorithm to choose the next candidates for labelling.
Returns
-------
best_candidates : array of ints
An array of indices of objects in the pool.
"""
self.time_step += 1
if self.time_step <= self.n_arms:
self.selected_arm = self.time_step - 1
return self._select_from_arm()
else:
ucb = self.mu + np.sqrt((self.alpha / self.T) *
np.log(self.psi * self.horizon / self.time_step))
self.selected_arm = np.argmax(ucb)
return self._select_from_arm()
class EXP3PP(ActiveBandit):
""" EXP3++ policy, as described by Seldin (2014).
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
arms : array of Arm objects
Each arm is a particular active learning rule. The arm needs to implement
the ``select`` method that returns an array of indices of objects from the
pool for labelling.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
"""
def __init__(self, pool, labels, classifier, arms, random_state=None,
n_candidates=None, n_best_candidates=1):
super().__init__(pool, labels, classifier, arms, random_state,
n_candidates, n_best_candidates)
self.loss = np.zeros(self.n_arms)
self.loss_history = [self.loss]
def select(self):
""" Use the EXP++ algorithm to choose the next candidates for labelling.
Returns
-------
best_candidates : array of ints
An array of indices of objects in the pool.
"""
self.time_step += 1
# eta is the learning rate
# xi is the exploration parameter
beta = 0.5 * np.sqrt(np.log(self.n_arms) / (self.time_step * self.n_arms))
gap = np.minimum(1, (1 / self.time_step) * (self.loss - np.min(self.loss)))
xi = 18 * np.log(self.time_step)**2 / (self.time_step * gap**2)
xi[np.isnan(xi)] = np.inf
epsilon = np.minimum(1/(2 * self.n_arms), beta)
epsilon = np.minimum(epsilon, xi)
eta = beta
rho = np.exp(-eta * self.loss)
rho /= np.sum(rho)
self.rho = (1 - np.sum(epsilon)) * rho + epsilon
self.selected_arm = self.seed.choice(self.n_arms, p=rho)
return self._select_from_arm()
def receive_reward(self, reward):
""" Receive a reward from the environment and updates the policy's prior beliefs.
Parameters
----------
reward : float
The reward from the environment should be a good proxy for the decrease
in the generalisation error of the classifier.
"""
loss = 1 - reward
self.loss[self.selected_arm] += loss / self.rho[self.selected_arm]
self.T[self.selected_arm] += 1
# store results in history
self.T_history.append(self.T.copy())
self.reward_history.append(reward)
self.loss_history.append(self.loss.copy())
def history(self):
""" Return a dictionary containing the history of the policy.
Returns
-------
history : dict
The dictionary contains the following keys: mu, sigma, T, and reward.
The corresponding value of each key is an array containing the state
in each time step.
"""
history = {}
history['T'] = np.array(self.T_history)
history['reward'] = np.array(self.reward_history)
history['loss'] = np.array(self.loss_history)
return history
class KLUCB(ActiveBandit):
""" kl-UCB policy with normally distributed rewards, as described by Cappé (2013).
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
classifier : Classifier object
The classifier should have the same interface as scikit-learn classifier.
In particular, it needs to have the fit and predict methods.
arms : array of Arm objects
Each arm is a particular active learning rule. The arm needs to implement
the ``select`` method that returns an array of indices of objects from the
pool for labelling.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
n_candidates : int, optional (default=None)
The number of candidates in the unlabelled pool to be chosen for evaluation
at each iteration. For very large datasets, it might be useful to limit
the the number of candidates to a small number (like 300) since some
policies can take a long time to run. If not set, the whole unlabelled
pool will be used.
n_best_candidates : int, optional (default=1)
The number of candidates returned at each iteration for labelling. Batch-mode
active learning is where this parameter is greater than 1.
mu : float, optional (default=0)
The initial estimate of the mean of the distribution of the mean reward
from all arms.
sigma : float, optional (default=0.02)
The initial estimate of the variance of the distribution of reward from all arms.
"""
def __init__(self, pool, labels, classifier, arms, random_state=None,
n_candidates=None, n_best_candidates=1, mu=0, sigma=0.02):
super().__init__(pool, labels, classifier, arms, random_state,
n_candidates, n_best_candidates)
self.mu = np.full(self.n_arms, mu, dtype=np.float64)
self.sum_mu = np.full(self.n_arms, mu, dtype=np.float64)
self.sigma = sigma
self.mu_history = [self.mu.copy()]
def select(self):
""" Use the kl-UCB algorithm to choose the next candidates for labelling.
Returns
-------
best_candidates : array of ints
An array of indices of objects in the pool.
"""
self.time_step += 1
if self.time_step <= self.n_arms:
self.selected_arm = self.time_step - 1
return self._select_from_arm()
else:
max_kl = np.log(self.time_step) / self.T
ucb = self.mu + np.sqrt(2 * self.sigma * max_kl)
self.selected_arm =
|
np.argmax(ucb)
|
numpy.argmax
|
""" Generates the patterns (W and H matrices) from JNMF application.
"""
import sys
import os
import time
import numpy as np
import pandas as pd
import ccobra
import matplotlib.pyplot as plt
import seaborn as sns
import batchprocjnmf as nmf
def df_to_matrix(df):
""" Converts a CCOBRA dataset into matrix form.
"""
clean = df[["id", "task", "response"]]
usr = list(clean["id"].unique())
matrix = np.zeros((576, len(usr)))
for _, row in clean.iterrows():
usr_idx = usr.index(row["id"])
syl_item = ccobra.Item(usr_idx, "syllogistic", row["task"], "single-choice", "", 0)
syllog = ccobra.syllogistic.Syllogism(syl_item)
enc_resp = syllog.encode_response(row["response"].split(";"))
syl_idx = ccobra.syllogistic.SYLLOGISMS.index(syllog.encoded_task)
resp_idx = ccobra.syllogistic.RESPONSES.index(enc_resp)
comb_idx = syl_idx * 9 + resp_idx
if matrix[comb_idx, usr_idx] != 0:
print("Tried to write twice to field")
exit()
matrix[comb_idx, usr_idx] = 1
return matrix
def get_response_vector(pattern):
""" Obtain the response vector from a pattern.
"""
result = []
prediction_matrix = pattern.reshape(64, 9)
for i in range(64):
result.append(ccobra.syllogistic.RESPONSES[prediction_matrix[i].argmax()])
return result
def evaluate(data_df, model_pattern, factor=1.0):
""" Evaluate a model pattern on the data.
"""
model_pred = get_response_vector(model_pattern)
result = []
for subj, subj_df in data_df.groupby("id"):
for _, task in subj_df.iterrows():
task_list = [x.split(";") for x in task["task"].split("/")]
resp_list = task["response"].split(";")
task_enc = ccobra.syllogistic.encode_task(task_list)
resp_enc = ccobra.syllogistic.encode_response(resp_list, task_list)
pred = model_pred[ccobra.syllogistic.SYLLOGISMS.index(task_enc)]
hit = factor if (resp_enc == pred) else 0
result.append(hit)
return result
def get_model_performance(data1_df, data2_df, models):
""" Obtain model performances.
"""
result = []
# High group model
result.extend(evaluate(data1_df, models[0]))
result.extend(evaluate(data1_df, models[1], factor=0.25))
result.extend(evaluate(data2_df, models[1], factor=0.25))
result.extend(evaluate(data2_df, models[2]))
return np.mean(result)
def criterion(X1, X2, W_high, H_high, W_low, H_low, data1_df, data2_df, perf_weight=2):
""" Grid search performance optimization criterion.
"""
data1_pattern = W_high[:,1]
data2_pattern = W_low[:,1]
common_pattern = (W_high[:,0] + W_low[:,0]) / 2
models = [data1_pattern, common_pattern, data2_pattern]
# Total Model performance
model_perf = 1 - get_model_performance(data1_df, data2_df, models)
difference_error = np.sum(data1_pattern * data2_pattern)
common_error = 1 -
|
np.sum(W_high[:,0] * W_low[:,0])
|
numpy.sum
|
import unittest
import numpy as np
from bioslds.monitor import AttributeMonitor
from bioslds.cepstral import givens, add_lq_column, OnlineCepstralNorm
class TestGivens(unittest.TestCase):
def test_squared_c_plus_squared_s_is_one(self):
c, s = givens(1.3, -0.4)
self.assertAlmostEqual(c * c + s * s, 1)
def test_rotation_zeros_second_element(self):
abs_small = 0.3
abs_large = 1.7
for signs_str in ["++", "+-", "-+", "--"]:
for abs_order_str in ["ab", "ba"]:
with self.subTest(signs_str=signs_str, abs_order_str=abs_order_str):
if abs_order_str == "ab":
crt_abs_a, crt_abs_b = abs_small, abs_large
else:
crt_abs_a, crt_abs_b = abs_large, abs_small
crt_a = crt_abs_a if signs_str[0] == "+" else -crt_abs_a
crt_b = crt_abs_b if signs_str[1] == "+" else -crt_abs_b
crt_c, crt_s = givens(crt_a, crt_b)
crt_r = np.asarray([[crt_c, crt_s], [-crt_s, crt_c]])
crt_v = np.asarray([crt_a, crt_b])
crt_vp = crt_v @ crt_r
self.assertLess(np.abs(crt_vp[1]), 1e-10)
def test_rotation_sets_first_element_appropriately(self):
abs_small = 0.8
abs_large = 2.1
d = np.sqrt(abs_small ** 2 + abs_large ** 2)
for signs_str in ["++", "+-", "-+", "--"]:
for abs_order_str in ["ab", "ba"]:
with self.subTest(signs_str=signs_str, abs_order_str=abs_order_str):
if abs_order_str == "ab":
crt_abs_a, crt_abs_b = abs_small, abs_large
else:
crt_abs_a, crt_abs_b = abs_large, abs_small
crt_a = crt_abs_a if signs_str[0] == "+" else -crt_abs_a
crt_b = crt_abs_b if signs_str[1] == "+" else -crt_abs_b
crt_c, crt_s = givens(crt_a, crt_b)
crt_r = np.asarray([[crt_c, crt_s], [-crt_s, crt_c]])
crt_v = np.asarray([crt_a, crt_b])
crt_vp = crt_v @ crt_r
self.assertAlmostEqual(crt_vp[0], d)
def test_when_second_element_is_already_zero(self):
abs_a = 1.5
for sign in [-1, +1]:
with self.subTest(sign=sign):
crt_a = sign * abs_a
crt_c, crt_s = givens(crt_a, 0)
self.assertAlmostEqual(crt_c, sign)
self.assertLess(np.abs(crt_s), 1e-10)
def test_when_first_element_is_zero(self):
abs_b = 1.5
for sign in [-1, +1]:
with self.subTest(sign=sign):
crt_b = sign * abs_b
crt_c, crt_s = givens(0, crt_b)
self.assertAlmostEqual(crt_s, -sign)
self.assertLess(np.abs(crt_c), 1e-10)
def _get_lq_l(m: np.ndarray) -> np.ndarray:
""" Calculate L term from LQ decomposition, ensuring the diagonal is non-negative.
Parameters
----------
m
Matrix to process.
Returns the L term in the LQ decomposition, using the convention that all diagonal
elements are non-negative. This is the same as getting the Cholesky decomposition of
`m @ m.T`.
"""
ll = np.linalg.cholesky(m @ m.T)
return ll
class TestAddLQColumn(unittest.TestCase):
def test_add_to_square_matrix(self):
rng = np.random.default_rng(1)
n = 5
m = rng.normal(size=(n, n))
v = rng.normal(size=n)
ll = _get_lq_l(m)
m_alt = np.column_stack((m, v))
ll_alt = _get_lq_l(m_alt)
add_lq_column(ll, v)
np.testing.assert_allclose(ll, ll_alt)
def test_add_to_rectangular_matrix(self):
rng = np.random.default_rng(1)
shape = (4, 7)
m = rng.normal(size=shape)
v = rng.normal(size=shape[0])
ll = _get_lq_l(m)
m_alt = np.column_stack((m, v))
ll_alt = _get_lq_l(m_alt)
add_lq_column(ll, v)
np.testing.assert_allclose(ll, ll_alt)
def test_l_stays_diagonal(self):
rng = np.random.default_rng(2)
shape = (4, 5)
m = rng.normal(size=shape)
v = rng.normal(size=shape[0])
ll = _get_lq_l(m)
add_lq_column(ll, v)
ll_triu =
|
np.triu(ll, k=1)
|
numpy.triu
|
"""
an implementation of quasi-discrete Hankel transform.
Reference: <NAME>, et.al, Quais-discrete Hankel transform, Optics Letters, 23, 409, 1998.
@author: <NAME>
@date: 2021/12/19
"""
import numpy as np
from scipy.special import jn_zeros, j0, j1, jv
class Hankel_qDHT:
"""
an implementation of quasi-discrete Hankel transform.
R1 = R2 == R is assumed.
Reference: Li Yu, et.al, Quais-discrete Hankel transform, Optics Letters, 23, 409, 1998.
"""
def __init__(self, N=1000):
"""
N: total nodes of used Bessel 0-order function.
generate:
.r: 1D-array, the corresponding radius axis data.
._S: _S = j_(N), the (N+1)-th zero point of J_0(x) function.
.js: the N+1 positive zeros of J_0(x) function.
.j1_inv: the function value of 1/(J_1(x)) for x at each .js.
.R: = (._S/2/pi)**0.5, the
.r = [j_1, j_2, j_3, ..., j_N ] /(2pi*R)
.C: 2D matrix, symmetric, C_ij = 2/S*J_0(j_i*j_j/S) / |J_1(j_i)| / |J_1(J_j)|
.F_multiplier" 1D array, F_multiplier_i = R/|J_1(j_i)|
"""
self.N = N
self.js = jn_zeros(0, self.N + 1)
self._S = self.js[-1] # j_(N+1)
self.R = (self._S/2.0/np.pi)**0.5 # r's cutoff position.
self.r = self.js[0:-1] / (2.0*np.pi*self.R) # the r axis for the field.
self.j1_inv = 1.0 / np.abs(j1(self.js))
self.F_multiplier = self.j1_inv[0:-1]*self.R
self.F_multiplier_inv = 1.0 / self.F_multiplier
# 1 / |J_1(n)*J_1(m)|:
#J1_inv_mesh_x, J1_inv_mesh_y = np.meshgrid(self.j1_inv[0:-1]*(2.0/self._S), self.j1_inv[0:-1])
#self.Cmn = J1_inv_mesh_x * J1_inv_mesh_y * j0(np.outer(self.js[0:-1], self.js[0:-1]/self._S) ) #
self.Cmn = np.outer(self.j1_inv[0:-1]*(2.0/self._S), self.j1_inv[0:-1]) * j0(np.outer(self.js[0:-1], self.js[0:-1]/self._S) ) #
def transform(self, f1):
r"""
perform 0-order Hankel transform. f2(r2) = 2pi*\int_{0}^{+\infty} f1(r1)*J_0(2pi*r1*r2)*r1*dr1
return f2
Note: f1 must defined in self.r axis.
len(f1)==len(self.r)==self.N, and each r-point corresponding to J_0(x)'s zeros jn by:
rn = jn/(2*pi*R), wher 2*pi*R^2==j_(N+1)~(N+1)*pi
==> R ~ \sqrt((N+1)/2)
f1: 1D-array, float or complex valued. length == self.N, defined on self.r axis.
"""
if len(f1) != self.N:
print("invalid f1")
return None
f2 = self.F_multiplier_inv * np.matmul(self.Cmn, self.F_multiplier*f1) # transform,
return f2
# test and compare:
if __name__ == "__main__":
import matplotlib.pyplot as plt
import time
def timming(func, total=100, name=""):
start = time.time()
for _ in range(total):
func()
end = time.time()
print(name, "total time [sec]: ", end-start)
#
N = 4096
# qDHT:
ht = Hankel_qDHT(N)
r = ht.r
# sum as integral:
rho =
|
np.linspace(0., ht.R, N)
|
numpy.linspace
|
#!/usr/bin/env python
import numpy as np
from seb.plot import Plot3D, Plot, Container, Animation
def grad_descent(x, y, dfnx, dfny, alpha=0.2, length=50):
trace = [(x, y)]
for _ in range(length):
x = x - alpha * dfnx(x)
y = y - alpha * dfny(y)
trace.append((x, y))
return np.array(trace), (x, y)
if __name__ == '__main__':
point_considered = -36
x_init = -1.9
y_init = -1
x = np.linspace(-7, 7, 50)
# 3D example
fn = lambda x, y: -np.sin(x / 2.0) + y**2
dfnx = lambda x: -0.5 * np.cos(x/2.0)
dfny = lambda y: 2*y
fig3d = Plot3D()
fig3d.surface(x,
|
np.cos(x + 0.5)
|
numpy.cos
|
# -*- coding: utf-8 -*-
"""
biosppy.signals.ppg
-------------------
This module provides methods to process Photoplethysmogram (PPG) signals.
:copyright: (c) 2015-2018 by Instituto de Telecomunicacoes
:license: BSD 3-clause, see LICENSE for more details.
"""
# Imports
# compat
from __future__ import absolute_import, division, print_function
from six.moves import range
# 3rd party
import numpy as np
import scipy.signal as ss
# local
from . import tools as st
from .. import plotting, utils
def ppg(signal=None, sampling_rate=1000., show=True):
"""Process a raw PPG signal and extract relevant signal features using
default parameters.
Parameters
----------
signal : array
Raw PPG signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
show : bool, optional
If True, show a summary plot.
Returns
-------
ts : array
Signal time axis reference (seconds).
filtered : array
Filtered PPG signal.
onsets : array
Indices of PPG pulse onsets.
heart_rate_ts : array
Heart rate time axis reference (seconds).
heart_rate : array
Instantaneous heart rate (bpm).
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# ensure numpy
signal = np.array(signal)
sampling_rate = float(sampling_rate)
# filter signal
filtered, _, _ = st.filter_signal(signal=signal,
ftype='butter',
band='bandpass',
order=4,
frequency=[1, 8],
sampling_rate=sampling_rate)
# find onsets
onsets, _ = find_onsets_elgendi2013(signal=filtered, sampling_rate=sampling_rate)
# compute heart rate
hr_idx, hr = st.get_heart_rate(beats=onsets,
sampling_rate=sampling_rate,
smooth=True,
size=3)
# get time vectors
length = len(signal)
T = (length - 1) / sampling_rate
ts =
|
np.linspace(0, T, length, endpoint=False)
|
numpy.linspace
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing interface with SciKit-Learn clustering
Created on Feb 13, 2015
@author: senrs
TODO:
For Clustering:
1) paralleization: n_jobs parameter to some of the algorithms
"""
#for future compatibility with Python 3-----------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3-------------------------------------------
#External Modules---------------------------------------------------------------
import scipy.cluster as hier
import numpy as np
import abc
import ast
import copy
import platform
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from .utils import utils
from .utils import mathUtils
from .BaseClasses import MessageUser
from .EntityFactoryBase import EntityFactory
#Internal Modules End-----------------------------------------------------------
# FIXME: temporarily force to use Agg backend for now, otherwise it will cause segmental fault for test:
# test_dataMiningHierarchical.xml in tests/framework/PostProcessors/DataMiningPostProcessor/Clustering
# For the record, when using dendrogram, we have to force matplotlib.use('Agg')
# In the future, I think all the plots should moved to OutStreamPlots -- wangc
#display = utils.displayAvailable()
#if not display:
# matplotlib.use('Agg')
if utils.displayAvailable() and platform.system() != 'Windows':
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pylab as plt
class unSupervisedLearning(utils.metaclass_insert(abc.ABCMeta), MessageUser):
"""
This is the general interface to any unSuperisedLearning learning method.
Essentially it contains a train, and evaluate methods
"""
returnType = '' ## this describe the type of information generated the
## possibility are 'boolean', 'integer', 'float'
modelType = '' ## the broad class of the interpolator
@staticmethod
def checkArrayConsistency(arrayIn):
"""
This method checks the consistency of the in-array
@ In, arrayIn, a 1D numpy array, the array to validate
@ Out, (consistent, errorMsg), tuple,
consistent is a boolean where false means the input array is not a
1D numpy array.
errorMsg, string, the error message if the input array is inconsistent.
"""
if type(arrayIn) != np.ndarray:
return (False, ' The object is not a numpy array')
## The input data matrix kind is different for different clustering
## algorithms, e.g.:
## [n_samples, n_features] for MeanShift and KMeans
## [n_samples,n_samples] for AffinityPropogation and SpectralCLustering
## In other words, MeanShift and KMeans work with points in a vector space,
## whereas AffinityPropagation and SpectralClustering can work with
## arbitrary objects, as long as a similarity measure exists for such
## objects. The input matrix supplied to unSupervisedLearning models as 1-D
## arrays of size [n_samples], (either n_features of or n_samples of them)
if len(arrayIn.shape) != 1:
return(False, ' The array must be 1-d')
return (True, '')
def __init__(self, **kwargs):
"""
constructor for unSupervisedLearning class.
@ In, kwargs, dict, arguments for the unsupervised learning algorithm
"""
super().__init__()
self.printTag = 'unSupervised'
## booleanFlag that controls the normalization procedure. If true, the
## normalization is performed. Default = True
if kwargs != None:
self.initOptionDict = kwargs
else:
self.initOptionDict = {}
## Labels are passed, if known a priori (optional), they used in quality
## estimate
if 'Labels' in self.initOptionDict.keys():
self.labelFeature = self.initOptionDict['Labels']
self.initOptionDict.pop('Labels')
else:
self.labelFeature = None
if 'Features' in self.initOptionDict.keys():
self.features = self.initOptionDict['Features'].split(',')
self.initOptionDict.pop('Features')
else:
self.features = None
if 'verbosity' in self.initOptionDict:
self.verbosity = self.initOptionDict['verbosity']
self.initOptionDict.pop('verbosity')
else:
self.verbosity = None
# average value and sigma are used for normalization of the feature data
# a dictionary where for each feature a tuple (average value, sigma)
self.muAndSigmaFeatures = {}
#these need to be declared in the child classes!!!!
self.amITrained = False
## The normalized training data
self.normValues = None
def updateFeatures(self, features):
"""
Change the Features that this classifier targets. If this ROM is trained already, raises an error.
@ In, features, list(str), list of new features
@ Out, None
"""
self.raiseAWarning('Features for learning engine type "{}" have been reset, so ROM is untrained!'.format(self.printTag))
self.amITrained = False
self.features = features
def train(self, tdict, metric=None):
"""
Method to perform the training of the unSuperVisedLearning algorithm
NB. The unSuperVisedLearning object is committed to convert the dictionary
that is passed (in), into the local format the interface with the kernels
requires. So far the base class will do the translation into numpy.
@ In, tdict, dict, training dictionary
@ Out, None
"""
self.metric = metric
if not isinstance(tdict, dict):
self.raiseAnError(IOError, ' method "train". The training set needs to be provided through a dictionary. Type of the in-object is ' + str(type(tdict)))
featureCount = len(self.features)
if not isinstance(tdict[utils.first(tdict.keys())],dict):
realizationCount = utils.first(tdict.values()).size
############################################################################
## Error-handling
## Do all of our error handling upfront to make the meat of the code more
## readable:
## Check if the user requested something that is not available
unidentifiedFeatures = set(self.features) - set(tdict.keys())
if len(unidentifiedFeatures) > 0:
## Me write English good!
if len(unidentifiedFeatures) == 1:
msg = 'The requested feature: %s does not exist in the training set.' % list(unidentifiedFeatures)[0]
else:
msg = 'The requested features: %s do not exist in the training set.' % str(list(unidentifiedFeatures))
self.raiseAnError(IOError, msg)
## Check that all of the values have the same length
if not isinstance(utils.first(tdict.values()), dict):
for name, val in tdict.items():
if name in self.features and realizationCount != val.size:
self.raiseAnError(IOError, ' In training set, the number of realizations are inconsistent among the requested features.')
## Check if a label feature is provided by the user and in the training data
if self.labelFeature in tdict:
self.labelValues = tidct[self.labelFeature]
resp = self.checkArrayConsistency(self.labelValues)
if not resp[0]:
self.raiseAnError(IOError, 'In training set for ground truth labels ' + self.labelFeature + ':' + resp[1])
else:
self.raiseAWarning(' The ground truth labels are not known a priori')
self.labelValues = None
## Not sure when this would ever happen, but check that the data you are
## given is a 1D array?
# for name,val in tdict.items():
# if name in self.features:
# resp = self.checkArrayConsistency(val)
# if not resp[0]:
# self.raiseAnError(IOError, ' In training set for feature ' + name + ':' + resp[1])
## End Error-handling
############################################################################
if metric is None:
self.normValues = np.zeros(shape = (realizationCount, featureCount))
for cnt, feat in enumerate(self.features):
featureValues = tdict[feat]
(mu,sigma) = mathUtils.normalizationFactors(featureValues)
## Store the normalized training data, and the normalization factors for
## later use
self.normValues[:, cnt] = (featureValues - mu) / sigma
self.muAndSigmaFeatures[feat] = (mu,sigma)
else:
# metric != None
## The dictionary represents a HistorySet
if isinstance(utils.first(tdict.values()),dict):
## normalize data
## But why this way? This should be one of the options, this looks like
## a form of shape matching, however what if I don't want similar
## shapes, I want similar valued curves in space? sigma and mu should
## not be forced to be computed within a curve.
tdictNorm={}
for key in tdict:
tdictNorm[key]={}
for var in tdict[key]:
(mu,sigma) = mathUtils.normalizationFactors(tdict[key][var])
tdictNorm[key][var] = (tdict[key][var]-mu)/sigma
cardinality = len(tdictNorm.keys())
self.normValues = np.zeros((cardinality,cardinality))
keys = list(tdictNorm.keys())
for i in range(cardinality):
for j in range(i,cardinality):
# process the input data for the metric, numpy.array is required
assert(list(tdictNorm[keys[i]].keys()) == list(tdictNorm[keys[j]].keys()))
numParamsI = len(tdictNorm[keys[i]].keys())
numStepsI = len(utils.first(tdictNorm[keys[i]].values()))
numStepsJ = len(utils.first(tdictNorm[keys[j]].values()))
inputI =
|
np.empty((numParamsI, numStepsI))
|
numpy.empty
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
import os
import pickle
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
np.set_printoptions(precision=4, suppress=True)
# -
# load annotations of this set
data_type = os.path.expanduser("~/dataset/RHD_published_v2/training")
with open(os.path.join(data_type, 'anno_training.pickle'), 'rb') as f:
anno_all = pickle.load(f)
sample_id = 128
anno = anno_all[sample_id]
file_format = "{:05d}.png".format(sample_id)
img_file = os.path.join(data_type, "color", file_format)
mask_file = os.path.join(data_type, "mask", file_format)
depth_file = os.path.join(data_type, "depth", file_format)
# auxiliary function
def depth_two_uint8_to_float(top_bits, bottom_bits):
""" Converts a RGB-coded depth into float valued depth. """
depth_map = (top_bits * 2**8 + bottom_bits).astype('float32')
depth_map /= float(2**16 - 1)
depth_map *= 5.0
return depth_map
import cv2
depth = cv2.imread(depth_file)
# process rgb coded depth into float: top bits are stored in red, bottom in green channel
# depth in meters from the camera
depth = depth_two_uint8_to_float(depth[:, :, 0], depth[:, :, 1])
# get info from annotation dictionary
# u, v coordinates of 42 hand keypoints, pixel
kp_coord_uv = anno['uv_vis'][:, :2]
# visibility of the keypoints, boolean
kp_visible = (anno['uv_vis'][:, 2] == 1)
kp_coord_xyz = anno['xyz'] # x, y, z coordinates of the keypoints, in meters
camera_intrinsic_matrix = anno['K'] # matrix containing intrinsic parameters
# Project world coordinates into the camera frame
kp_coord_uv_proj = np.matmul(
kp_coord_xyz, np.transpose(camera_intrinsic_matrix))
kp_coord_uv_proj = kp_coord_uv_proj[:, :2] / kp_coord_uv_proj[:, 2:]
from chainercv.utils import read_image
from chainercv.visualizations import vis_image
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(111)
vis_image(read_image(img_file), ax=ax1)
ax1.plot(kp_coord_uv[kp_visible, 0], kp_coord_uv[kp_visible, 1], 'ro')
ax1.plot(kp_coord_uv_proj[kp_visible, 0],
kp_coord_uv_proj[kp_visible, 1], 'gx')
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(111)
ax1.imshow(depth)
ax1.plot(kp_coord_uv[kp_visible, 0], kp_coord_uv[kp_visible, 1], 'ro')
ax1.plot(kp_coord_uv_proj[kp_visible, 0],
kp_coord_uv_proj[kp_visible, 1], 'gx')
vis_image(255*read_image(mask_file, dtype=np.uint8))
np.unique(cv2.imread(mask_file))
# +
# %matplotlib notebook
fig = plt.figure()
ax1 = fig.add_subplot(111, projection="3d")
ax1.scatter(kp_coord_xyz[kp_visible, 0],
kp_coord_xyz[kp_visible, 1], kp_coord_xyz[kp_visible, 2])
# aligns the 3d coord with the camera view
ax1.view_init(azim=-90.0, elev=-90.0)
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_zlabel('z')
# +
import itertools
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
"""
Keypoints available:
0: left wrist, 1-4: left thumb [tip to palm], 5-8: left index, ..., 17-20: left pinky,
21: right wrist, 22-25: right thumb, ..., 38-41: right pinky
"""
KEYPOINT_NAMES = [
"wrist",
"thumb_tip",
"thumb_dip",
"thumb_pip",
"thumb_mcp",
"index_tip",
"index_dip",
"index_pip",
"index_mcp",
"middle_tip",
"middle_dip",
"middle_pip",
"middle_mcp",
"ring_tip",
"ring_dip",
"ring_pip",
"ring_mcp",
"little_tip",
"little_dip",
"little_pip",
"little_mcp",
]
ONESIDE_KEYPOINT_NAMES = []
for k in ["wrist", "thumb", "index", "middle", "ring", "little"]:
if k == "wrist":
joint_name = "_".join([k])
ONESIDE_KEYPOINT_NAMES.append(joint_name)
else:
for p in ["tip", "dip", "pip", "mcp"]:
joint_name = "_".join([k, p])
ONESIDE_KEYPOINT_NAMES.append(joint_name)
assert KEYPOINT_NAMES == ONESIDE_KEYPOINT_NAMES
# (R,G,B)
BASE_COLOR = {
"index": (0, 255, 0),
"middle": (0, 0, 255),
"ring": (255, 0, 255),
"little": (255, 255, 0),
"thumb": (255, 0, 0),
"wrist": (50, 50, 50),
}
# convert tuple to numpy array
BASE_COLOR = {k: np.array(v) for k, v in BASE_COLOR.items()}
COLOR_MAP = {"wrist": BASE_COLOR["wrist"]}
EDGES_BY_NAME = []
for f in ["index", "middle", "ring", "little", "thumb"]:
for p, q in pairwise(["wrist", "mcp", "pip", "dip", "tip"]):
color = BASE_COLOR[f]
if p != "wrist":
p = "_".join([f, p])
q = "_".join([f, q])
COLOR_MAP[p, q] = color
COLOR_MAP[q] = color
EDGES_BY_NAME.append([p, q])
EDGES = [[KEYPOINT_NAMES.index(s), KEYPOINT_NAMES.index(t)]
for s, t in EDGES_BY_NAME]
for s, t in EDGES_BY_NAME:
i_s = KEYPOINT_NAMES.index(s)
i_t = KEYPOINT_NAMES.index(t)
COLOR_MAP[i_s, i_t] = COLOR_MAP[s, t]
COLOR_MAP[KEYPOINT_NAMES.index(s)] = COLOR_MAP[s]
COLOR_MAP[KEYPOINT_NAMES.index(t)] = COLOR_MAP[t]
# +
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib notebook
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
kp_xyz = anno_all[sample_id]["xyz"]
left_joints = kp_xyz[:21]
right_joints = kp_xyz[21:]
# aligns the 3d coord with the camera view
ax.view_init(azim=-90.0, elev=-90.0)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
for joints in [left_joints, right_joints]:
xs = joints[:, 0]
ys = joints[:, 1]
zs = joints[:, 2]
color = [COLOR_MAP[k]/255. for k in KEYPOINT_NAMES]
ax.scatter(xs, ys, zs, color=color)
for s, t in EDGES:
sx, sy, sz = xs[s], ys[s], zs[s]
tx, ty, tz = xs[t], ys[t], zs[t]
color = COLOR_MAP[s, t]/255.
ax.plot([sx, tx], [sy, ty], [sz, tz], color=color)
# -
# # Rotate hand to create Canonical Representation
# We will treat left_joints only
kp_xyz = anno_all[sample_id]["xyz"]
hand_side = "left"
ROOT_JOINT = "wrist"
root_idx = ONESIDE_KEYPOINT_NAMES.index(ROOT_JOINT)
REFERENCE_MCP = "middle_mcp"
REFERENCE_PIP = "middle_pip"
ref_mcp_idx = ONESIDE_KEYPOINT_NAMES.index(REFERENCE_MCP)
ref_pip_idx = ONESIDE_KEYPOINT_NAMES.index(REFERENCE_PIP)
PINKY = "little_mcp"
little_mcp_idx = ONESIDE_KEYPOINT_NAMES.index(PINKY)
print(little_mcp_idx)
HAND_SIDE = "left"
if HAND_SIDE == "right":
offset = len(KEYPOINT_NAMES)
root_idx += offset
ref_mcp_idx += offset
ref_pip_idx += offset
little_mcp_idx += offset
def get_oneside_hand(kp_xyz, hand_side):
hand_side = "left"
if hand_side == "left":
joints = kp_xyz[:21]
if hand_side == "right":
joints = kp_xyz[21:]
return joints
# ## normalize
def normalize_joints(joints):
ref_length = np.linalg.norm(joints[ref_mcp_idx]-joints[ref_pip_idx])
joints = (joints-joints[root_idx])/ref_length
return joints
# # calc angle[rad] of rotation around z axis
#
# ```
# Y
# A
# | /
# y|----/
# |θ /|
# |__/ |
# | / |
# |/---x-----> X
# ```
#
def rot_z(theta):
return np.array([
[math.cos(theta), -np.sin(theta), 0],
[np.sin(theta), math.cos(theta), 0],
[0, 0, 1],
])
# # calc angle[rad] of rotation around x axis
#
# ```
# Z
# A
# |z_____/
# | / |
# | / |
# | / |
# | /|θ |
# |/-|---y--> Y
# ```
def rot_x(theta):
return np.array([
[1, 0, 0],
[0, math.cos(theta), -math.sin(theta)],
[0, math.sin(theta), math.cos(theta)],
])
# # calc angle[rad] of rotation around y axis
#
# ```
# Z
# A
# ------|z
# | \ |
# | \ |
# | \ |
# | θ| \|
# <---|x--|-◎-
# ```
def rot_y(theta):
return np.array([
[math.cos(-theta), 0, -math.sin(-theta)],
[0, 1, 0],
[math.sin(-theta), 0, math.cos(-theta)],
])
def canonicalize_joints(joints):
joints = normalize_joints(joints.copy())
rot = np.eye(3)
# rotate around z
mcp_joint_x, mcp_joint_y, _ = joints[ref_mcp_idx]
theta = math.atan2(mcp_joint_x, mcp_joint_y)
joints = joints @ rot_z(theta).transpose()
rot = rot_z(theta) @ rot
# rotate around x
_, mcp_joint_y, mcp_joint_z = joints[ref_mcp_idx]
theta = math.atan2(mcp_joint_z, mcp_joint_y)
# note that specify `-theta`. NOT `theta`
joints = joints @ rot_x(-theta).transpose()
rot = rot_x(-theta) @ rot
# rotate around y
mcp_joint_x, _, mcp_joint_z = joints[little_mcp_idx]
theta = math.atan2(mcp_joint_z, -mcp_joint_x)
# note that specify `-theta`. NOT `theta`
joints = joints @ rot_y(-theta).transpose()
rot = rot_y(-theta) @ rot
mcp_joint_x, _, mcp_joint_z = joints[little_mcp_idx]
return joints, rot
# # Let's Visualize
# +
from collections.abc import Iterable
# %matplotlib inline
def label_xyz(axis_object):
if not isinstance(axis_object, Iterable):
axes = [axis_object]
else:
axes = axis_object
for ax in axis_object:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
def visualize_canonical_representation(joints):
fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(131, projection="3d")
ax2 = fig.add_subplot(132, projection="3d")
ax3 = fig.add_subplot(133, projection="3d")
canonical_joints, rot = canonicalize_joints(joints.copy())
normal_joints = normalize_joints(joints.copy())
ax1.scatter(*canonical_joints.transpose())
ax2.scatter(*joints.transpose())
ax3.scatter(*normal_joints.transpose())
# pullback canonical joints to normalized_joints
pullback_joints = [email protected](rot.transpose())
ax3.scatter(*pullback_joints.transpose(), alpha=0.5)
for s, t in EDGES:
color = COLOR_MAP[s, t]/255.
ax1.plot(*canonical_joints[[s, t]].transpose(), color=color)
ax2.plot(*joints[[s, t]].transpose(), color=color)
ax3.plot(*normal_joints[[s, t]].transpose(), color=color)
ax3.plot(*pullback_joints[[s, t]].transpose(), alpha=0.5)
ax1.view_init(0, -90)
ax2.view_init(-90, -90)
ax3.view_init(-90, -90)
ax1.set_title("canonical")
ax2.set_title("original")
ax3.set_title("nomalized")
label_xyz([ax1, ax2, ax3])
joints = get_oneside_hand(kp_xyz, hand_side=HAND_SIDE)
visualize_canonical_representation(joints)
# -
import cv2
mat, _ = cv2.Rodrigues(np.array([1., 2., 3.]))
mat.shape
def extract_euler_angles(mat):
"""
This algorith is aken from
Extracting Euler Angles from a Rotation Matrix
<NAME>, Insomniac Games
<EMAIL>
https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2012/07/euler-angles1.pdf
The authors follow the notational conventions of Shoemake’s “Euler Angle Conversion”, Graphics Gems IV, pp.
222-9, with the exception that their vectors are row vectors instead of column vectors. Thus, all their
matrices are transposed relative to Shoemake’s, and a sequence of rotations will be written from left to
right.
"""
[[m00, m01, m02],
[m10, m11, m12],
[m20, m21, m22]] = mat
theta_x = math.atan2(m12, m22)
c2 = math.sqrt(m00**2+m01**2)
theta_y = math.atan2(-m02, c2)
s1 = math.sin(theta_x)
c1 = math.cos(theta_x)
theta_z = math.atan2(s1*m20-c1*m10, c1*m11-s1*m21)
"""
multiply minus one for each theta_*. this is equivalent to RE-consider vector is column instead of rows
i.e. back to common world that says vector is column.
"""
return -theta_x, -theta_y, -theta_z
# +
# Test code
mat = rot_x(math.pi/3)
expected = mat.copy()
radians = extract_euler_angles(mat)
mat, _ = cv2.Rodrigues(np.asarray(radians))
assert np.allclose(mat, expected)
mat = rot_y(math.pi/3)
expected = mat.copy()
radians = extract_euler_angles(mat)
mat, _ = cv2.Rodrigues(np.asarray(radians))
assert np.allclose(mat, expected)
mat = rot_z(math.pi/3)
expected = mat.copy()
radians = extract_euler_angles(mat)
mat, _ = cv2.Rodrigues(np.asarray(radians))
assert np.allclose(mat, expected)
mat = rot_z(-math.pi/6) @ rot_z(2*math.pi/3) @ rot_z(-math.pi/3)
expected = mat.copy()
radians = extract_euler_angles(mat)
mat, _ = cv2.Rodrigues(
|
np.asarray(radians)
|
numpy.asarray
|
import copy
import glob
import os
import cv2
import numpy as np
import scipy.io as sio
from PIL import Image
## This script convert oct layer annotation to pixelwise labels to train pixelwise semantic segmentation
def mkdir(path, *args):
"""
Gives a root path and and subpath, makes directory all the way from root to subpaths if they do not exist
:param path: root path
:param args:
:return:
"""
if(not os.path.exists(path)):
os.mkdir((path))
new_path = path
for dir in args:
new_path = os.path.join(new_path, dir)
# print (new_path)
if (not os.path.exists(new_path)):
os.mkdir(new_path)
return new_path
def annonate2(image, layer1, layer2):
"""
Converts layer points to mask
:param image:
:param layer1:
:param layer2:
:return:
"""
mask_filled = convert2mask(image, [layer1], [layer2])
mask_filled = np.asarray(mask_filled, dtype=np.uint8)
return image, mask_filled
def convert2mask(image, layer1_pts, layer2_pts):
contour = []
contour.extend(layer1_pts)
layer2_pts_cp = copy.deepcopy(layer2_pts)
layer2_pts_cp[0].reverse()
contour.extend(layer2_pts_cp)
mask_new = np.zeros(np.shape(image))
# (N,2)
contour_arr = np.vstack(contour)
# (N,1,2)
contour_arr = np.expand_dims(contour_arr, 1)
cnt_list = []
cnt_list.append(contour_arr)
# print 'cnt_list', np.shape(cnt_list)
contour_arr = contour_arr.astype(np.int32)
# print contour_arr
cv2.drawContours(mask_new, [contour_arr], -1, 255, -1) # 3=-1
return mask_new
def layers2mask(image, layers):
masks_all = []
for i in range(len(layers) - 1):
_, maski = annonate2(image, layers[i], layers[i + 1])
masks_all.append(maski)
_, maskbg_inv = annonate2(image, layers[0], layers[len(layers) - 1])
mask_bg = 255 - maskbg_inv
masks_all.append(mask_bg)
# labelmap= np.argmax(mask_all, axis = 2)
return masks_all
class OCTVolReader:
def __init__(self, path):
self.path = path
self.file_names = []
self.load()
def load(self):
print (self.path + '/*.mat')
self.file_names = glob.glob(self.path + '/*.mat')
def get_data(self):
for f in self.file_names:
data = sio.loadmat(f)
yield [data.get('images'), data.get('layerMaps'), os.path.basename(f)]
def get_pts(lm):
layers = []
x = np.asarray(list(range(lm.shape[0])))
for i in range(lm.shape[1]):
bdr = lm[:, i]
bdr_f = bdr[~
|
np.isnan(bdr)
|
numpy.isnan
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pdb, math
import kplr
import fnmatch
from astropy.convolution import convolve, Gaussian1DKernel, Box1DKernel
from astropy.stats import LombScargle
from scipy.signal import savgol_filter as savgol
from astropy.io import fits
import glob, re
from astropy.io import ascii
from astropy.stats import mad_std
# subroutine to perform rough sigma clipping
# Get Kps for all stars:
whitenoise=np.loadtxt('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/whitenoisevalues.txt',skiprows=1,delimiter=',')
kpfile ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/KIC_Kepmag_Berger2018.csv'
df =pd.read_csv(kpfile,usecols=['KIC','kic_kepmag'])
kp_kics =list(df['KIC'])
kps =list(df['kic_kepmag'])
def sigclip(x,y,subs,sig):
keep = np.zeros_like(x)
start=0
end=subs
nsubs=int((len(x)/subs)+1)
for i in range(0,nsubs):
me=np.mean(y[start:end])
sd=np.std(y[start:end])
good=np.where((y[start:end] > me-sig*sd) & (y[start:end] < me+sig*sd))[0]
keep[start:end][good]=1
start=start+subs
end=end+subs
return keep
##plt.ion()
##plt.clf()
def getclosest(num,collection):
'''Given a number and a list, get closest number in the list to number given.'''
return min(collection,key=lambda x:abs(x-num))
def getkp(file):
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
kp=kps[kp_kics.index(kic)]
if kp in whitenoise[:,0]:
idx=np.where(whitenoise[:,0]==kp)[0]
closestkp=whitenoise[idx,0][0]
wnoise=whitenoise[idx,1][0]
#print(closestkp,wnoise)
else:
closestkp=getclosest(kp,whitenoise[:,0])
idx=np.where(whitenoise[:,0]==closestkp)[0]
wnoise=whitenoise[idx,1][0]
#print(closestkp,wnoise)
return wnoise
# main program starts here
if __name__ == '__main__':
d='/Users/maryumsayeed/Desktop/pande/pande_lcs/'
# d='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/data/large_train_sample/'
# d='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/cannon_vs_LLR/cannon_no_wnoise/'
# d='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/data/large_train_sample_cannon/'
# d='/Users/maryumsayeed/Downloads/pande_remaining/'
# d='/Users/maryumsayeed/Downloads/pande_sc/'
files=glob.glob(d+'*.fits')
# files=files[0:10]
print('# of files',len(files))
kics=np.zeros(len(files),dtype='int')
rads=np.zeros(len(files))
teffs=np.zeros(len(files))
lums=np.zeros(len(files))
gaia=ascii.read('DR2PapTable1.txt',delimiter='&')
kepler_catalogue=pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/GKSPC_InOut_V4.csv')#,skiprows=1,delimiter=',',usecols=[0,1])
data=ascii.read('smoothing_relation/width_vs_radius_test1.txt',delimiter= ' ')
fit_radii,fit_width=np.array(data['Radii']),np.array(data['Width'])
# check=ascii.read('/Users/maryumsayeed/LLR_updates/Aug17/stars_with_high_snr.txt',names=['KIC'])
# check=np.array(check).astype(int)
# check=[11767251, 3539778, 10160035, 11507559, 11876278, 3862456, 8960478, 8453152, 7522019]
# check=[9071948, 4648211, 7465477, 1572317, 3954440, 8417232, 8359236, 11234317, 7622294, 5445912, 4473030, 8645063, 8144907, 9418101, 3842398, 12110908, 6960601, 6525349, 10199289, 10000279, 9697262, 7384966, 2436676, 4168909, 4633774, 6612496]
more=np.zeros(len(files))
nstars_below_duty_cycle=0
kics_below_duty_cycle=[]
stars_less_than_89_days=[]
days=[]
fbins=[] #3535
for i in range(0,len(files)):
f=files[i]
kicid=int(files[i].split('/')[-1].split('-')[0].split('kplr')[-1].lstrip('0'))
if kicid >0:
data=fits.open(files[i])
head=data[0].data
dat=data[1].data
time=dat['TIME']
qual=dat['SAP_QUALITY']
flux=dat['PDCSAP_FLUX']
um=np.where(gaia['KIC'] == kicid)[0]
if (len(um) == 0.):
continue
kics[i]=kicid
try:
row=kepler_catalogue.loc[kepler_catalogue['KIC']==kicid]
teff =row['iso_teff'].item()
rad =row['iso_rad'].item()
except:
idx=np.where(gaia['KIC']==kicid)[0]
teff=gaia['teff'][idx][0]
rad=gaia['rad'][idx][0]
if math.isnan(rad) is True:
idx=np.where(gaia['KIC']==kicid)[0]
teff=gaia['teff'][idx][0]
rad=gaia['rad'][idx][0]
rads[i]=rad
teffs[i]=teff
lums[i]=rad**2.*(teff/5777.)**4.
if (teffs[i] == 0):
continue
# if (rads[i] <50.):
# continue
# only keep data with good quality flags
good=np.where(qual == 0)[0]
time=time[good]
flux=flux[good]
# plot the light curve
# #plt.ion()
# #plt.clf()
#plt.figure(figsize=(9,10))
#plt.subplot(3,1,1)
#plt.plot(time,flux)
#plt.xlabel('Time (Days)')
#plt.ylabel('Flux (counts)')
# sigma-clip outliers from the light curve and overplot it
res=sigclip(time,flux,50,3)
good=np.where(res == 1)[0]
time=time[good]
flux=flux[good]
if len(time)==0:
continue
# Check Duty Cycle:
ndays=time[-1]-time[0]
nmins=ndays*24.*60.
expected_points=nmins/30.
observed_points=len(time)
if observed_points < expected_points*0.5:
nstars_below_duty_cycle+=1
kics_below_duty_cycle.append(kicid)
print(kicid,'below')
continue
# UNCOMMENT for long-cadence data!
if time[-1]-time[0] < 89.: # remove photometry below 89 days from the sample
stars_less_than_89_days.append(kicid)
continue
# === IF CHANGING LENGTH OF LIGHT CURVE, LOOK @ NEXT 5 LINES:
# Use first cut of sample:
baseline=65.
baseline_time=time[0]+baseline
cut=np.where(time<=baseline_time)[0] #take all times below baseline time
time=time[cut]
flux=flux[cut]
# === END
#plt.plot(time,flux)
## if rads[i] <= 36.8.: #from width_vs_radius_test2.txt
closestrad=getclosest(rads[i],fit_radii)
idx =np.where(fit_radii==closestrad)[0]
best_fit_width=fit_width[idx][0]
width =best_fit_width
print(i,kicid,width)
boxsize =int(width/(30./60./24.))
box_kernel = Box1DKernel(boxsize)
if boxsize % 2 == 0:
smoothed_flux = savgol(flux,int(boxsize)-1,1,mode='mirror')
else:
smoothed_flux = savgol(flux,int(boxsize),1,mode='mirror')
flux =flux/smoothed_flux
# overplot this smoothed version, and then divide the light curve through it
#plt.plot(time,smoothed_flux)
#plt.axvspan(time[1000],time[1000+int(boxsize)],color='g',zorder=1,alpha=0.2)
#flux=flux+np.random.randn(len(flux))*0.01
# plot the filtered light curve
#plt.subplot(3,1,2)
#plt.plot(time,flux)
#plt.xlabel('Time (Days)')
#plt.ylabel('Relative flux')
# Remove data points > 5*sigma:
std =mad_std(flux,ignore_nan=True)
med =
|
np.median(flux)
|
numpy.median
|
import numpy as np
def Reflection_yRot(array):
R = np.array([[-1, 0], [0, 1]])
r = np.matmul(R,array)
return r
def Reflection_oRot(array):
R = np.array([[-1,0], [0,-1]])
r = np.matmul(R,array)
return r
u =
|
np.array([1,2])
|
numpy.array
|
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from ..database import Tables, SessionStatus, SessionAttr, ExperimentAttr, \
RefAttr, ConfigRef, LogAttr
from itertools import product
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from . import ExperimentDB
class MultiExperimentDB: # pragma: no cover
def __init__(self, dbclient):
self.dbclient = dbclient
@property
def experiments(self):
"""
List all experiements
"""
all_exp = self.dbclient.find_all(Tables.EXPERIMENTS)
return self.dbclient.to_pandas(all_exp)
def sessions_from_experiments(self, experiment_id):
"""
List all sessions (created, training, finished, failed) of
an experiments
"""
sessions = self.dbclient.find_by_col(
Tables.SESSIONS, SessionAttr.EXPERIMENT_ID,
self.dbclient.to_fk(experiment_id))
return self.dbclient.to_pandas(sessions)
def run_new_session(self, experiment_id, epochs, log_base_path='logs',
model_checkpoint_period=2,
prediction_checkpoint_period=2):
"""
Start a new session of an experimenton]
"""
exp = ExperimentDB(
self.dbclient, experiment_id=experiment_id,
log_base_path=log_base_path
).run_experiment(
model_checkpoint_period=model_checkpoint_period,
prediction_checkpoint_period=prediction_checkpoint_period,
save_origin_images=False, verbose=1, epochs=epochs)
return exp
def run_multiple_new_session(self, num, experiment_id, epochs,
log_base_path='logs',
model_checkpoint_period=2,
prediction_checkpoint_period=2):
"""
Run `num` number of new sessions of an experiment
"""
return_exps = []
for _ in range(num):
try:
exp = ExperimentDB(
self.dbclient, experiment_id=experiment_id,
log_base_path=log_base_path
).run_experiment(
model_checkpoint_period=model_checkpoint_period,
prediction_checkpoint_period=prediction_checkpoint_period,
save_origin_images=False, verbose=1, epochs=epochs)
return_exps.append(exp)
except Exception:
pass
return return_exps
def continue_session(self, session_id, epochs, log_base_path='logs',
model_checkpoint_period=2,
prediction_checkpoint_period=2):
"""
Continue a session from a model checkpoint
"""
exp = ExperimentDB(
self.dbclient, session_id=session_id, log_base_path=log_base_path
).run_experiment(
model_checkpoint_period=model_checkpoint_period,
prediction_checkpoint_period=prediction_checkpoint_period,
save_origin_images=False, verbose=1, epochs=epochs)
return exp
def continue_multiple_session(self, session_id_list, epochs,
log_base_path='logs',
model_checkpoint_period=2,
prediction_checkpoint_period=2):
"""
Continue multiple sessions
"""
return_exps = []
for session_id in session_id_list:
try:
exp = ExperimentDB(
self.dbclient, session_id=session_id,
log_base_path=log_base_path
).run_experiment(
model_checkpoint_period=model_checkpoint_period,
prediction_checkpoint_period=prediction_checkpoint_period,
save_origin_images=False, verbose=1, epochs=epochs)
return_exps.append(exp)
except Exception:
pass
return return_exps
def session_performance(self, session_id, metrics=None,
ax=None, shading='std'):
perf = self.dbclient.to_pandas(
self.dbclient.find_by_col(
Tables.LOGS, LogAttr.SESSION_ID,
self.dbclient.to_fk(session_id)))
not_include = ['_id', LogAttr.SESSION_ID, LogAttr.EPOCH]
metric_list = metrics or [
col for col in perf.columns if col not in not_include]
perf = perf.groupby(LogAttr.EPOCH).agg({
metric: ['mean', lambda val: np.mean(
val) - np.std(val), lambda val:
|
np.mean(val)
|
numpy.mean
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_approx_equal,
assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal_nulp, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy import signal
from scipy.fft import fftfreq
from scipy.signal import (periodogram, welch, lombscargle, csd, coherence,
spectrogram, stft, istft, check_COLA, check_NOLA)
from scipy.signal.spectral import _spectral_helper
class TestPeriodogram(object):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_odd(self):
x = np.zeros(15)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0)
q[0] = 0
assert_allclose(p, q)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, scaling='spectrum')
g, q = periodogram(x, scaling='density')
assert_allclose(f, np.linspace(0, 0.5, 9))
assert_allclose(p, q/16.0)
def test_integer_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_integer_odd(self):
x = np.zeros(15, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0)
q[0] = 0
assert_allclose(p, q)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 5.0/16.0)
q[0] = 0
assert_allclose(p, q)
def test_unk_scaling(self):
assert_raises(ValueError, periodogram, np.zeros(4, np.complex128),
scaling='foo')
def test_nd_axis_m1(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((2,1,10))
x[:,:,0] = 1.0
f, p = periodogram(x)
assert_array_equal(p.shape, (2, 1, 6))
assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60)
f0, p0 = periodogram(x[0,0,:])
assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)
def test_nd_axis_0(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((10,2,1))
x[0,:,:] = 1.0
f, p = periodogram(x, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60)
f0, p0 = periodogram(x[:,0,0])
assert_array_almost_equal_nulp(p0, p[:,1,0])
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, 10, 'hann')
win = signal.get_window('hann', 16)
fe, pe = periodogram(x, 10, win)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, periodogram, x,
10, win_err) # win longer than signal
def test_padded_fft(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
fp, pp = periodogram(x, nfft=32)
assert_allclose(f, fp[::2])
assert_allclose(p, pp[::2])
assert_array_equal(pp.shape, (17,))
def test_empty_input(self):
f, p = periodogram([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_nfft(self):
x = np.zeros(18)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_nfft_is_xshape(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9, 'f')
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(15, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8, 'f')
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0, 'f')
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 5.0/16.0, 'f')
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
class TestWelch(object):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, welch, np.zeros(4, np.complex128),
scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = welch(x, nperseg=10, detrend=False)
f2, p2 = welch(x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = welch(x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = welch(x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = welch(x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, 10, 'hann', nperseg=8)
win = signal.get_window('hann', 8)
fe, pe = welch(x, 10, win, nperseg=None)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
assert_array_equal(fe.shape, (5,)) # because win length used as nperseg
assert_array_equal(pe.shape, (5,))
assert_raises(ValueError, welch, x,
10, win, nperseg=4) # because nperseg != win.shape[-1]
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, welch, x,
10, win_err, nperseg=None) # win longer than signal
def test_empty_input(self):
f, p = welch([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = welch(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = welch(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8")
f, p = welch(x,window='hann') # default nperseg
f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg
f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f2)
assert_allclose(p1, p2)
def test_window_long_or_nd(self):
assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1]))
assert_raises(ValueError, welch, np.zeros(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = welch(x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
0.17072113], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
nfft = 24
f = fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
def test_window_correction(self):
A = 20
fs = 1e4
nperseg = int(fs//10)
fsig = 300
ii = int(fsig*nperseg//fs) # Freq index of fsig
tt = np.arange(fs)/fs
x = A*np.sin(2*np.pi*fsig*tt)
for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']:
_, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window,
scaling='spectrum')
freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window,
scaling='density')
# Check peak height at signal frequency for 'spectrum'
assert_allclose(p_spec[ii], A**2/2.0)
# Check integrated spectrum RMS for 'density'
assert_allclose(np.sqrt(np.trapz(p_dens, freq)), A*np.sqrt(2)/2,
rtol=1e-3)
def test_axis_rolling(self):
np.random.seed(1234)
x_flat = np.random.randn(1024)
_, p_flat = welch(x_flat)
for a in range(3):
newshape = [1,]*3
newshape[a] = -1
x = x_flat.reshape(newshape)
_, p_plus = welch(x, axis=a) # Positive axis index
_, p_minus = welch(x, axis=a-x.ndim) # Negative axis index
assert_equal(p_flat, p_plus.squeeze(), err_msg=a)
assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim)
def test_average(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, average='median')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([.1, .05, 0., 1.54074396e-33, 0.])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_raises(ValueError, welch, x, nperseg=8,
average='unrecognised-average')
class TestCSD:
def test_pad_shorter_x(self):
x = np.zeros(8)
y = np.zeros(12)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_pad_shorter_y(self):
x = np.zeros(12)
y = np.zeros(8)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, csd, np.zeros(4, np.complex128),
np.ones(4, np.complex128), scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = csd(x, x, nperseg=10, detrend=False)
f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = csd(x, x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = csd(x, x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = csd(x, x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, 10, 'hann', 8)
win = signal.get_window('hann', 8)
fe, pe = csd(x, x, 10, win, nperseg=None)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
assert_array_equal(fe.shape, (5,)) # because win length used as nperseg
assert_array_equal(pe.shape, (5,))
assert_raises(ValueError, csd, x, x,
10, win, nperseg=256) # because nperseg != win.shape[-1]
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, csd, x, x,
10, win_err, nperseg=None) # because win longer than signal
def test_empty_input(self):
f, p = csd([],np.zeros(10))
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
f, p = csd(np.zeros(10),[])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = csd(np.empty(shape), np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
f, p = csd(np.ones(10), np.empty((5,0)))
assert_array_equal(f.shape, (5,0))
assert_array_equal(p.shape, (5,0))
f, p = csd(np.empty((5,0)), np.ones(10))
assert_array_equal(f.shape, (5,0))
assert_array_equal(p.shape, (5,0))
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = csd(np.empty(shape), np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1)
assert_array_equal(f.shape, (10,0,3))
assert_array_equal(p.shape, (10,0,3))
f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1)
assert_array_equal(f.shape, (10,0,3))
assert_array_equal(p.shape, (10,0,3))
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8")
f, p = csd(x, x, window='hann') # default nperseg
f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg
f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f2)
assert_allclose(p1, p2)
def test_window_long_or_nd(self):
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
np.array([1,1,1,1,1]))
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = csd(x, x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann',
2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3,
nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
0.17072113], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
y = np.ones(12)
nfft = 24
f = fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
feven, _ = csd(x, y, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
feven, _ = csd(x, y, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
class TestCoherence(object):
def test_identical_input(self):
x = np.random.randn(20)
y = np.copy(x) # So `y is x` -> False
f = np.linspace(0, 0.5, 6)
C = np.ones(6)
f1, C1 = coherence(x, y, nperseg=10)
assert_allclose(f, f1)
assert_allclose(C, C1)
def test_phase_shifted_input(self):
x = np.random.randn(20)
y = -x
f = np.linspace(0, 0.5, 6)
C = np.ones(6)
f1, C1 = coherence(x, y, nperseg=10)
assert_allclose(f, f1)
assert_allclose(C, C1)
class TestSpectrogram(object):
def test_average_all_segments(self):
x = np.random.randn(1024)
fs = 1.0
window = ('tukey', 0.25)
nperseg = 16
noverlap = 2
f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
fw, Pw = welch(x, fs, window, nperseg, noverlap)
assert_allclose(f, fw)
assert_allclose(np.mean(P, axis=-1), Pw)
def test_window_external(self):
x = np.random.randn(1024)
fs = 1.0
window = ('tukey', 0.25)
nperseg = 16
noverlap = 2
f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
win = signal.get_window(('tukey', 0.25), 16)
fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2)
assert_array_equal(fe.shape, (9,)) # because win length used as nperseg
assert_array_equal(Pe.shape, (9,73))
assert_raises(ValueError, spectrogram, x,
fs, win, nperseg=8) # because nperseg != win.shape[-1]
win_err = signal.get_window(('tukey', 0.25), 2048)
assert_raises(ValueError, spectrogram, x,
fs, win_err, nperseg=None) # win longer than signal
def test_short_data(self):
x = np.random.randn(1024)
fs = 1.0
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
f, _, p = spectrogram(x, fs, window=('tukey',0.25)) # default nperseg
with suppress_warnings() as sup:
sup.filter(UserWarning,
"nperseg = 1025 is greater than input length = 1024, using nperseg = 1024")
f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25),
nperseg=1025) # user-specified nperseg
f2, _, p2 = spectrogram(x, fs, nperseg=256) # to compare w/default
f3, _, p3 = spectrogram(x, fs, nperseg=1024) # compare w/user-spec'd
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f3)
assert_allclose(p1, p3)
class TestLombscargle(object):
def test_frequency(self):
"""Test if frequency location of peak corresponds to frequency of
generated input signal.
"""
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
P = lombscargle(t, x, f)
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
delta = f[1] - f[0]
assert_(w - f[np.argmax(P)] < (delta/2.))
def test_amplitude(self):
# Test if height of peak in normalized Lomb-Scargle periodogram
# corresponds to amplitude of the generated input signal.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f)
# Normalize
pgram = np.sqrt(4 * pgram / t.shape[0])
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
assert_approx_equal(np.max(pgram), ampl, significant=2)
def test_precenter(self):
# Test if precenter gives the same result as manually precentering.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
offset = 0.15 # Offset to be subtracted in pre-centering
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi) + offset
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f, precenter=True)
pgram2 = lombscargle(t, x - x.mean(), f, precenter=False)
# check if centering worked
assert_allclose(pgram, pgram2)
def test_normalize(self):
# Test normalize option of Lomb-Scarge.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f)
pgram2 = lombscargle(t, x, f, normalize=True)
# check if normalization works as expected
assert_allclose(pgram * 2 / np.dot(x, x), pgram2)
assert_approx_equal(np.max(pgram2), 1.0, significant=2)
def test_wrong_shape(self):
t = np.linspace(0, 1, 1)
x = np.linspace(0, 1, 2)
f = np.linspace(0, 1, 3)
assert_raises(ValueError, lombscargle, t, x, f)
def test_zero_division(self):
t = np.zeros(1)
x = np.zeros(1)
f = np.zeros(1)
assert_raises(ZeroDivisionError, lombscargle, t, x, f)
def test_lombscargle_atan_vs_atan2(self):
# https://github.com/scipy/scipy/issues/3787
# This raised a ZeroDivisionError.
t = np.linspace(0, 10, 1000, endpoint=False)
x = np.sin(4*t)
f = np.linspace(0, 50, 500, endpoint=False) + 0.1
lombscargle(t, x, f*2*np.pi)
class TestSTFT(object):
def test_input_validation(self):
assert_raises(ValueError, check_COLA, 'hann', -10, 0)
assert_raises(ValueError, check_COLA, 'hann', 10, 20)
assert_raises(ValueError, check_COLA, np.ones((2,2)), 10, 0)
assert_raises(ValueError, check_COLA, np.ones(20), 10, 0)
assert_raises(ValueError, check_NOLA, 'hann', -10, 0)
assert_raises(ValueError, check_NOLA, 'hann', 10, 20)
assert_raises(ValueError, check_NOLA, np.ones((2,2)), 10, 0)
assert_raises(ValueError, check_NOLA, np.ones(20), 10, 0)
assert_raises(ValueError, check_NOLA, 'hann', 64, -32)
x = np.zeros(1024)
z = np.array(stft(x), dtype=object)
assert_raises(ValueError, stft, x, window=np.ones((2,2)))
assert_raises(ValueError, stft, x, window=np.ones(10), nperseg=256)
assert_raises(ValueError, stft, x, nperseg=-256)
assert_raises(ValueError, stft, x, nperseg=256, noverlap=1024)
assert_raises(ValueError, stft, x, nperseg=256, nfft=8)
assert_raises(ValueError, istft, x) # Not 2d
assert_raises(ValueError, istft, z, window=np.ones((2,2)))
assert_raises(ValueError, istft, z, window=np.ones(10), nperseg=256)
assert_raises(ValueError, istft, z, nperseg=-256)
assert_raises(ValueError, istft, z, nperseg=256, noverlap=1024)
assert_raises(ValueError, istft, z, nperseg=256, nfft=8)
assert_raises(ValueError, istft, z, nperseg=256, noverlap=0,
window='hann') # Doesn't meet COLA
assert_raises(ValueError, istft, z, time_axis=0, freq_axis=0)
assert_raises(ValueError, _spectral_helper, x, x, mode='foo')
assert_raises(ValueError, _spectral_helper, x[:512], x[512:],
mode='stft')
assert_raises(ValueError, _spectral_helper, x, x, boundary='foo')
def test_check_COLA(self):
settings = [
('boxcar', 10, 0),
('boxcar', 10, 9),
('bartlett', 51, 26),
('hann', 256, 128),
('hann', 256, 192),
('blackman', 300, 200),
(('tukey', 0.5), 256, 64),
('hann', 256, 255),
]
for setting in settings:
msg = '{0}, {1}, {2}'.format(*setting)
assert_equal(True, check_COLA(*setting), err_msg=msg)
def test_check_NOLA(self):
settings_pass = [
('boxcar', 10, 0),
('boxcar', 10, 9),
('boxcar', 10, 7),
('bartlett', 51, 26),
('bartlett', 51, 10),
('hann', 256, 128),
('hann', 256, 192),
('hann', 256, 37),
('blackman', 300, 200),
('blackman', 300, 123),
(('tukey', 0.5), 256, 64),
(('tukey', 0.5), 256, 38),
('hann', 256, 255),
('hann', 256, 39),
]
for setting in settings_pass:
msg = '{0}, {1}, {2}'.format(*setting)
assert_equal(True, check_NOLA(*setting), err_msg=msg)
w_fail = np.ones(16)
w_fail[::2] = 0
settings_fail = [
(w_fail, len(w_fail), len(w_fail) // 2),
('hann', 64, 0),
]
for setting in settings_fail:
msg = '{0}, {1}, {2}'.format(*setting)
assert_equal(False, check_NOLA(*setting), err_msg=msg)
def test_average_all_segments(self):
np.random.seed(1234)
x = np.random.randn(1024)
fs = 1.0
window = 'hann'
nperseg = 16
noverlap = 8
# Compare twosided, because onesided welch doubles non-DC terms to
# account for power at negative frequencies. stft doesn't do this,
# because it breaks invertibility.
f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False,
return_onesided=False, boundary=None)
fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False,
scaling='spectrum', detrend=False)
assert_allclose(f, fw)
assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw)
def test_permute_axes(self):
np.random.seed(1234)
x = np.random.randn(1024)
fs = 1.0
window = 'hann'
nperseg = 16
noverlap = 8
f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap)
f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap,
axis=0)
t3, x1 = istft(Z1, fs, window, nperseg, noverlap)
t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0,
freq_axis=-1)
assert_allclose(f1, f2)
assert_allclose(t1, t2)
assert_allclose(t3, t4)
assert_allclose(Z1, Z2[:, 0, 0, :])
assert_allclose(x1, x2[:, 0, 0])
def test_roundtrip_real(self):
np.random.seed(1234)
settings = [
('boxcar', 100, 10, 0), # Test no overlap
('boxcar', 100, 10, 9), # Test high overlap
('bartlett', 101, 51, 26), # Test odd nperseg
('hann', 1024, 256, 128), # Test defaults
(('tukey', 0.5), 1152, 256, 64), # Test Tukey
('hann', 1024, 256, 255), # Test overlapped hann
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window)
msg = '{0}, {1}'.format(window, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
def test_roundtrip_not_nola(self):
np.random.seed(1234)
w_fail =
|
np.ones(16)
|
numpy.ones
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 20 15:20:49 2017
@author: robertcarson
"""
import numpy as np
import FePX_Data_and_Mesh as fepxDM
import FiniteElement as fe
#from latorifem import mainlatprogram as latfem
import Rotations as rot
import Misori as mis
#%%
#Getting the location of all of our simulation data and then the mesh file name
#fileLoc = '/Users/robertcarson/Research_Local_Code/Output/LOFEM_STUDY/n456_cent/low/'
#fileLoc = '/media/robert/My Passport for Mac/Simulations/LOFEM_Study/n456_cent_m15/mid_txt/'
fileLoc = '/home/rac428/Outputs/LOFEM_Study/n456_cent_uori_m15/low_txt/'
#fileLoc = '/media/robert/DataDrives/LOFEM_Study/n456_NF/mono/low_txt/'
#fileLoc = '/Users/robertcarson/Research_Local_Code/fepx_robert/Examples/ControlMode/LOFEM_REFACTOR2/data/'
fileName = 'n456-cent-rcl05'
#fileName = 'n456_nf_raster_L2_r1_v2_rcl075'
#fileName = 'n6'
#What we want the basename of the file where we save our kinematic metrics saved along with a few other variables.
fBname = 'grainData'
#fileLoc = '/media/robert/DataDrives/n1k_pois_iso_reg_pt2/'
#fileName = 'n1k-id6k-rcl05'
#The number of processors and steps within the simulation.
nproc = 64
#nsteps = 16
nsteps = 46
#nsteps = 19
#nsteps = 43
#nsteps = 44
#nsteps = 52
#nsteps = 64
#nsteps = 86
frames = np.arange(0,nsteps)
#Reading in our mesh data
mesh = fepxDM.readMesh(fileLoc, fileName, LOFEM = True)
#How many grains that our polycrystal had
#ngrains = 6
ngrains = 456
#ngrains = 1000
grains = np.r_[1:(ngrains+1)]
#Misorientation difference variable that shows the relative angle of rotation between the discrete and smooth lattice methods
#from element to element
misoriD = np.zeros((mesh['grains'].shape[0], nsteps))
#%%
print('About to start processing data')
#Tells us what our angle file data is whether its a rod vec or kocks angles
kor = 'rod'
#Reading in our LOFEM data
ldata = fepxDM.readLOFEMData(fileLoc, nproc, lofemData=['strain', 'ang'])
print('Finished Reading LOFEM data')
print('Starting to read DISC data')
data = fepxDM.readData(fileLoc, nproc, fepxData=['ang', 'adx', 'strain'], restart=False)
print('Finished Reading DISC data')
#%%
#Global connectivity array reordered such that it goes grain by grain
gconn = np.asarray([], dtype='float64')
gconn = np.atleast_2d(gconn)
#The unique pts and elements that correspond to the above
gupts = np.asarray([], dtype=np.int32)
guelem = np.asarray([], dtype=np.int32)
#Finding the nodal points and elements upper and lowere bounds for all of the grain data
se_bnds = np.zeros((ngrains*2), dtype='int32')
se_el_bnds = np.zeros((ngrains*2), dtype='int32')
st_bnd = 0
en_bnd = 0
st_bnd2 = 0
en_bnd2 = 0
for i in grains:
lcon, lcrd, lupts, luelem = fe.localConnectCrd(mesh, i)
st_bnd = en_bnd
en_bnd = st_bnd + lupts.shape[0]
j = (i - 1) * 2
se_bnds[j] = st_bnd
se_bnds[j+1] = en_bnd
st_bnd2 = en_bnd2
en_bnd2 = st_bnd2 + luelem.shape[0]
j = (i - 1) * 2
se_el_bnds[j] = st_bnd2
se_el_bnds[j+1] = en_bnd2
gconn, gupts, guelem = fe.concatConnArray(gconn, lcon, gupts, lupts, guelem, luelem)
npts = gupts.shape[0]
nelem = guelem.shape[0]
#%%
#The below is the same as the above but here we just use the LOFEM connectivity array
gconn2 = np.asarray([], dtype='float64')
gconn2 = np.atleast_2d(gconn2)
gupts2 = np.asarray([], dtype=np.int32)
guelem2 = np.asarray([], dtype=np.int32)
se_bnds2 = np.zeros((ngrains*2), dtype='int32')
se_el_bnds2 = np.zeros((ngrains*2), dtype='int32')
st_bnd = 0
en_bnd = 0
st_bnd2 = 0
en_bnd2 = 0
for i in grains:
lcon, lupts, luelem = fe.localGrainConnectCrd(mesh, i)
st_bnd = en_bnd
en_bnd = st_bnd + lupts.shape[0]
j = (i - 1) * 2
se_bnds2[j] = st_bnd
se_bnds2[j+1] = en_bnd
st_bnd2 = en_bnd2
en_bnd2 = st_bnd2 + luelem.shape[0]
j = (i - 1) * 2
se_el_bnds2[j] = st_bnd2
se_el_bnds2[j+1] = en_bnd2
gconn2, gupts2, guelem2 = fe.concatConnArray(gconn2, lcon, gupts2, lupts, guelem2, luelem)
npts2 = gupts2.shape[0]
nelem2 = guelem2.shape[0]
#%%
#
#These are variables telling us the relative rotation away from the current grain average orientation for either
#nodal or elemental data
gr_angs = np.zeros((1, npts, nsteps), dtype='float64')
lofem_angs = np.zeros((1, nelem, nsteps), dtype='float64')
disc_angs = np.zeros((1, nelem, nsteps), dtype='float64')
#Telling us the origin in 3D space
origin = np.zeros((3,1), dtype='float64')
#%%
#
for i in grains:
print('###### Starting Grain Number '+str(i)+' ######')
#Reading in our local connectivity arrays in terms of our regular connectivity array and the one generated for the LOFEM simulations
lcon, lcrd, ucon, uelem = fe.localConnectCrd(mesh, i)
lcon2, ucon2, uelem2 = fe.localGrainConnectCrd(mesh, i)
# # of elements and nodes in a grain
nel = lcon.shape[1]
npts = ucon.shape[0]
#Tells us globally what points correspond to the grain we're examing
indlog = mesh['grains'] == i
indlog2 = mesh['crd_grains'] == i
#Here we're getting the misorientation angle and quaternion for our angles when taken with respect the original orientation
#for the discrete method
misAngs, misQuats = mis.misorientationGrain(mesh['kocks'][:,i-1], data['angs'][:,indlog,:], frames, 'kocks')
#Legacy code but just setting our deformation gradient to the identity array
defgrad = np.tile(np.atleast_3d(np.identity(3)), (1,1,nel))
#A list holding our deformation stats for the discrete and lofem methods
deflist = []
ldeflist = []
#el_angs is a temporary variable that will hold the grain values that go into misoriD
el_angs = np.zeros((3,nel,nsteps))
#Our difference quats, lofem quaternion at nodes, lofem quaternion at the centroid of the element, and discrete method quats
diff_misQuats = np.zeros((4,nel,nsteps))
lQuats = np.zeros((4, npts, nsteps))
leQuats = np.zeros((4, nel, nsteps))
dQuats = np.zeros((4, nel, nsteps))
#Just converting from our inputted orientation data to quaternions
for j in range(nsteps):
el_angs[:,:,j] = fe.elem_fe_cen_val(ldata['angs'][:,indlog2,j], lcon2)
lQuats[:,:,j] = rot.QuatOfRod(np.squeeze(ldata['angs'][:,indlog2,j]))
leQuats[:,:,j] = rot.QuatOfRod(np.squeeze(el_angs[:,:,j]))
dQuats[:,:,j] = rot.OrientConvert(np.squeeze(data['angs'][:,indlog,j]), 'kocks', 'quat', 'degrees', 'radians')
#Here we're getting the misorientation angle and quaternion for our angles when taken with respect the original orientation
#for the lofem method
lemisAngs, lemisQuats = mis.misorientationGrain(mesh['kocks'][:,i-1], el_angs, frames, kor)
for j in range(nsteps):
#Getting misorientation between the lofem and disc elements
temp2, tempQ = mis.misorientationGrain(data['angs'][:,indlog, j], el_angs[:,:,j], [0], kor)
diff_misQuats[:,:,j] = np.squeeze(tempQ)
misoriD[indlog, j] = np.squeeze(temp2)
crd = np.squeeze(data['coord'][:,ucon, j])
#Getting strain data
epsVec = np.squeeze(ldata['strain'][:, indlog, j])
#Taking the strain data and putting it into the tensorial view
#FEpX saves strain data off as 11, 21, 31, 22, 32, 33 so we also have to do some other
#fanagling of the data
strain = fepxDM.fixStrain(epsVec)
#Calculating the volume and wts of the element assumming no curvature to the element
#The wts are used in all of the calculations and these are relative wts where each element wts is based on
#vol_elem/vol_grain
vol, wts = fe.calcVol(crd, lcon)
#Getting our deformation data but this method is old so we can actually update it a bit
ldefdata = fe.deformationStats(defgrad, wts, crd, lcon, lemisQuats[:, :, j], el_angs[:,:,j], strain, kor)
ldeflist.append(ldefdata)
#Doing the same as the above but now for the discrete data case
epsVec = np.squeeze(data['strain'][:, indlog, j])
strain = fepxDM.fixStrain(epsVec)
defdata = fe.deformationStats(defgrad, wts, crd, lcon, misQuats[:, :, j], data['angs'][:, indlog, j], strain, 'kocks')
deflist.append(defdata)
print('Grain #'+str(i)+'% done: {:.3f}'.format(((j+1)/nsteps)))
#Saving off all of the data now
with open(fileLoc+fBname+'LOFEM'+'.vespread','ab') as f_handle:
f_handle.write(bytes('%Grain number'+str(i)+'\n','UTF-8'))
for j in range(nsteps):
np.savetxt(f_handle,ldeflist[j]['veSpread'])
with open(fileLoc+fBname+'DISC'+'.vespread','ab') as f_handle:
f_handle.write(bytes('%Grain number'+str(i)+'\n','UTF-8'))
for j in range(nsteps):
np.savetxt(f_handle,deflist[j]['veSpread'])
with open(fileLoc+fBname+'LOFEM'+'.fespread','ab') as f_handle:
f_handle.write(bytes('%Grain number'+str(i)+'\n','UTF-8'))
for j in range(nsteps):
np.savetxt(f_handle,ldeflist[j]['feSpread'])
with open(fileLoc+fBname+'DISC'+'.fespread','ab') as f_handle:
f_handle.write(bytes('%Grain number'+str(i)+'\n','UTF-8'))
for j in range(nsteps):
np.savetxt(f_handle,deflist[j]['feSpread'])
#Calculating all of our misorientation data now
stats = mis.misorientationTensor(lQuats, lcrd, lcon, data['coord'][:, ucon, :], i, True)
lmisAngs, lmisQuats = mis.misorientationGrain(origin, stats['angaxis'], frames, 'axis', True)
with open(fileLoc+fBname+'LOFEM'+'.misori','ab') as f_handle:
f_handle.write(bytes('%Grain number '+str(i)+'\n','UTF-8'))
|
np.savetxt(f_handle,stats['gSpread'])
|
numpy.savetxt
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Created on Mon Dec 24 21:00:00 2018
@author: gsutanto
"""
import re
import warnings as wa
import numpy as np
import numpy.linalg as npla
import numpy.matlib as npma
import os
import sys
import copy
import pyplot_util
from utilities import *
division_epsilon = 1.0e-100
def normalizeQuaternion(Q_input, warning_threshold=0.98):
assert (
(len(Q_input.shape) >= 1) and
(len(Q_input.shape) <= 2)), "Q_input has invalid number of dimensions!"
if (len(Q_input.shape) == 1):
Q_input = Q_input.reshape(1, 4)
assert (
Q_input.shape[1] == 4), "Each row of Q_input has to be 4-dimensional!!!"
tensor_length = Q_input.shape[0]
Q_input_norm = npla.norm(Q_input, ord=2, axis=1).reshape(tensor_length, 1)
if ((Q_input_norm < warning_threshold).any()):
wa.warn("(Q_input_norm < %f).any() == True ; Q_input_norm=\n" %
warning_threshold + str(Q_input_norm))
# Normalize (make sure that norm(Quaternion) == 1)
Q_output = Q_input / npma.repmat(Q_input_norm, 1, 4)
if (tensor_length == 1):
Q_output = Q_output[0, :]
return Q_output
def standardizeNormalizeQuaternion(Q_input):
assert (
(len(Q_input.shape) >= 1) and
(len(Q_input.shape) <= 2)), "Q_input has invalid number of dimensions!"
if (len(Q_input.shape) == 1):
Q_input = Q_input.reshape(1, 4)
assert (
Q_input.shape[1] == 4), "Each row of Q_input has to be 4-dimensional!!!"
Q_output = copy.deepcopy(Q_input)
# Standardize (make sure that unique Quaternion represents
# unique orientation)
Q_idx_tobe_std = np.where(Q_output[:, 0] < 0.0)[0]
if (len(Q_idx_tobe_std) > 0):
# print('Standardizing some Quaternions for uniqueness ...');
Q_output[Q_idx_tobe_std, :] = -Q_output[Q_idx_tobe_std, :]
Q_output = normalizeQuaternion(Q_output)
return Q_output
def computeQuaternionLogMap(Q_input, div_epsilon=division_epsilon):
assert (
(len(Q_input.shape) >= 1) and
(len(Q_input.shape) <= 2)), "Q_input has invalid number of dimensions!"
if (len(Q_input.shape) == 1):
Q_input = Q_input.reshape(1, 4)
assert (
Q_input.shape[1] == 4), "Each row of Q_input has to be 4-dimensional!!!"
assert (np.iscomplex(Q_input).any() == False)
tensor_length = Q_input.shape[0]
# normalize the input Quaternion first:
Q_prep = normalizeQuaternion(np.real(Q_input)).reshape(tensor_length, 4)
u = Q_prep[:, 0].reshape(tensor_length, 1)
q = Q_prep[:, 1:4]
arccos_u = np.arccos(u)
sin_arccos_u = np.sin(arccos_u)
# arccos_u_div_sin_arccos_u = (arccos_u + div_epsilon)/(sin_arccos_u + div_epsilon)
#
# log_Q_output = npma.repmat(arccos_u_div_sin_arccos_u, 1, 3) * q
multiplier_sign = np.ones((tensor_length, 1))
log_multiplier = np.log(np.zeros((tensor_length, 1)) + division_epsilon)
Q_idx_w_positive_sin_arccos_u = np.where(sin_arccos_u[:, 0] > 0)[0]
Q_idx_w_negative_sin_arccos_u = np.where(sin_arccos_u[:, 0] < 0)[0]
Q_idx_w_nonzero_sin_arccos_u = np.union1d(Q_idx_w_positive_sin_arccos_u,
Q_idx_w_negative_sin_arccos_u)
log_Q_output = copy.deepcopy(q)
if (Q_idx_w_nonzero_sin_arccos_u.size > 0):
log_Q_output[Q_idx_w_nonzero_sin_arccos_u, :] = np.zeros(
(len(Q_idx_w_nonzero_sin_arccos_u), 3))
log_multiplier[Q_idx_w_nonzero_sin_arccos_u,
0] = np.log(arccos_u[Q_idx_w_nonzero_sin_arccos_u, 0])
if (Q_idx_w_positive_sin_arccos_u.size > 0):
log_multiplier[
Q_idx_w_positive_sin_arccos_u,
0] = log_multiplier[Q_idx_w_positive_sin_arccos_u, 0] - np.log(
sin_arccos_u[Q_idx_w_positive_sin_arccos_u, 0])
if (Q_idx_w_negative_sin_arccos_u.size > 0):
multiplier_sign[Q_idx_w_negative_sin_arccos_u,
0] = -multiplier_sign[Q_idx_w_negative_sin_arccos_u, 0]
log_multiplier[Q_idx_w_negative_sin_arccos_u, 0] = log_multiplier[
Q_idx_w_negative_sin_arccos_u,
0] - np.log(-sin_arccos_u[Q_idx_w_negative_sin_arccos_u, 0])
for i in range(3):
q_ith_col_idx_gt_zero = np.where(q[:, i] > 0)[0]
q_ith_col_idx_gt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u = np.intersect1d(
q_ith_col_idx_gt_zero, Q_idx_w_nonzero_sin_arccos_u)
if (q_ith_col_idx_gt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u.size > 0):
log_Q_output[
q_ith_col_idx_gt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u, i] = (
multiplier_sign[
q_ith_col_idx_gt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u,
0] *
np.exp(log_multiplier[
q_ith_col_idx_gt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u,
0] + np.log(q[
q_ith_col_idx_gt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u,
i])))
q_ith_col_idx_lt_zero = np.where(q[:, i] < 0)[0]
q_ith_col_idx_lt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u = np.intersect1d(
q_ith_col_idx_lt_zero, Q_idx_w_nonzero_sin_arccos_u)
if (q_ith_col_idx_lt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u.size > 0):
log_Q_output[
q_ith_col_idx_lt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u,
i] = (-multiplier_sign[
q_ith_col_idx_lt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u,
0] * np.exp(log_multiplier[
q_ith_col_idx_lt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u,
0] + np.log(-q[
q_ith_col_idx_lt_zero_intersect_Q_idx_w_nonzero_sin_arccos_u,
i])))
assert (np.isnan(log_Q_output).any() == False), "log_Q_output contains NaN!"
if (tensor_length == 1):
log_Q_output = log_Q_output[0, :]
assert (np.iscomplex(log_Q_output).any() == False)
return (2.0 * np.real(log_Q_output))
def computeQuaternionExpMap(log_Q_input, div_epsilon=division_epsilon):
assert ((len(log_Q_input.shape) >= 1) and (len(log_Q_input.shape) <= 2)
), "log_Q_input has invalid number of dimensions!"
if (len(log_Q_input.shape) == 1):
log_Q_input = log_Q_input.reshape(1, 3)
assert (log_Q_input.shape[1] == 3
), "Each row of log_Q_input has to be 3-dimensional!!!"
assert (np.iscomplex(log_Q_input).any() == False)
tensor_length = log_Q_input.shape[0]
r = np.real(log_Q_input) / 2.0
norm_r = npla.norm(r, ord=2, axis=1).reshape(tensor_length, 1)
cos_norm_r = np.cos(norm_r)
sin_norm_r = np.sin(norm_r)
# sin_norm_r_div_norm_r = (sin_norm_r + div_epsilon)/(norm_r + div_epsilon)
#
# Q_output = np.hstack([cos_norm_r, (npma.repmat(sin_norm_r_div_norm_r, 1, 3) * r)])
Q_output = np.zeros((tensor_length, 4))
Q_output[:, 0] = np.ones(tensor_length)
log_Q_input_idx_nonzero_norm_r = np.where(norm_r[:, 0] != 0)[0]
log_Q_input_idx_sin_norm_r_gt_zero = np.where(sin_norm_r[:, 0] > 0)[0]
log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r = np.intersect1d(
log_Q_input_idx_sin_norm_r_gt_zero, log_Q_input_idx_nonzero_norm_r)
if (log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r
.size > 0):
Q_output[
log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
0] = cos_norm_r[
log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
0]
Q_output[
log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
1:4] = (
npma.repmat(
np.exp(
np.log(sin_norm_r[
log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
0]) -
np.log(norm_r[
log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
0]))
.reshape(
log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r
.shape[0], 1), 1, 3) *
r[log_Q_input_idx_sin_norm_r_gt_zero_intersect_log_Q_input_idx_nonzero_norm_r, :]
)
log_Q_input_idx_sin_norm_r_lt_zero = np.where(sin_norm_r[:, 0] < 0)[0]
log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r = np.intersect1d(
log_Q_input_idx_sin_norm_r_lt_zero, log_Q_input_idx_nonzero_norm_r)
if (log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r
.size > 0):
Q_output[
log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
0] = cos_norm_r[
log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
0]
Q_output[
log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
1:4] = (
npma.repmat(
np.exp(
np.log(-sin_norm_r[
log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
0]) -
np.log(norm_r[
log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r,
0]))
.reshape(
log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r
.shape[0], 1), 1, 3) *
(-r[log_Q_input_idx_sin_norm_r_lt_zero_intersect_log_Q_input_idx_nonzero_norm_r, :]
))
assert (
|
np.isnan(Q_output)
|
numpy.isnan
|
#
#
# Copyright (C) University of Melbourne 2013
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
"""Module subclassing TxMultiGeneratorBase that provides an implementation for
multi-site generators.
"""
from tools import mureilexception, mureilbuilder
import copy
import numpy
from generator import txmultigeneratorbase
import logging
logger = logging.getLogger(__name__)
class TxMultiGeneratorMultiSite(txmultigeneratorbase.TxMultiGeneratorBase):
"""Module subclassing TxMultiGeneratorBase that provides an implementation of
state_handle and related handling functions for multi-site generators.
The 'capacity' term in state_handle is implemented as a dict with one item per site.
Each site item is a list of tuples containing (site_index,build_period,decommissioning_period),
describing the set of installed capacity.
"""
def __init__(self):
"""Initialise as for the base class, and also initialise the params_to_site map.
"""
txmultigeneratorbase.TxMultiGeneratorBase.__init__(self)
# params_to_site maps the index in the params list to the site indices.
self.params_to_site = []
def get_config_spec(self):
"""Return a list of tuples of format (name, conversion function, default),
e.g. ('capex', float, 2.0). Put None if no conversion required, or if no
default value, e.g. ('name', None, None)
Configuration:
time_period_yrs: float - the length of the time period in years
time_scale_up_mult: float - the value to multiply non-discounted items,
such as carbon emissions, by to account for a shorter dataset than the
calculation period length.
variable_cost_mult: as for time_scale_up_mult, but may include a factor for
cost discounting.
size: float, optional - relates param to new capacity
carbon_price_m: float - carbon price in $M/tonne
startup_data_name: string, optional - the name of the data array that contains
data on startup capacities.
startup_data_string: string, optional - a python format data array suitable for
input into set_startup_state, all on a single line.
params_to_site_data_name: string, optional - the name of the data array that
contains a list of how the input params list maps to site indices.
params_to_site_data_string: list of integers, optional - the site indices,
listed separated by spaces, defining the site index corresponding to
each optimisation param, in order.
vom: float, default 0 - variable operating and maintenance cost, in $/MWh, same for all sites
capital_cost: float, default 0 - cost in $M per MW for new capacity.
install_cost: float, default 0 - cost in $M per site, when site has an
installation from this generator for the first time.
decommissioning_cost: float, optional (default 0) - cost in $M per MW for
decommissioning.
lifetime_yrs: float, default 20 - the time in years that new capacity lasts
"""
return txmultigeneratorbase.TxMultiGeneratorBase.get_config_spec(self) + [
('variable_cost_mult', float, 1.0),
('time_scale_up_mult', float, 1.0),
('carbon_price_m', float, 0.0),
('startup_data_name', None, ''),
('startup_data_string', mureilbuilder.python_eval, 'None'),
('params_to_site_data_name', None, ''),
('params_to_site_data_string', mureilbuilder.make_int_list, ''),
('decommissioning_cost', float, 0),
('vom', float, 0),
('capital_cost', float, 0),
('install_cost', float, 0),
('time_period_yrs', float, None),
('lifetime_yrs', float, 20),
('size', float, 1.0),
('start_min_param', int, 1e20),
('start_max_param', int, 1e20),
('timestep_hrs', float, None)
]
def complete_configuration_pre_expand(self):
"""Complete the configuration prior to expanding the
period configs.
This implementation checks that the lifetime_yrs is a multiple
of time_period_yrs, and sets the startup state and params_to_site from the
configuration strings.
"""
time_period_yrs = self.config['time_period_yrs']
lifetime_yrs = self.config['lifetime_yrs']
error = None
if isinstance(lifetime_yrs, dict):
for value in lifetime_yrs.itervalues():
div = value / time_period_yrs
if not (float(int(div)) == div):
error = value
else:
div = lifetime_yrs / time_period_yrs
if not (float(int(div)) == div):
error = lifetime_yrs
if error is not None:
msg = ('In section ' + self.config['section'] + ', lifetime_yrs = ' +
str(error) + ' which is required to be a multiple of time_period_yrs of ' +
str(time_period_yrs))
raise mureilexception.ConfigException(msg, {})
# Set the startup state and the params to site from the configuration strings.
if self.config['startup_data_string'] is not None:
self.set_startup_state(self.config['startup_data_string'])
if len(self.config['params_to_site_data_string']) > 0:
self.params_to_site = self.config['params_to_site_data_string']
def get_data_types(self):
"""Return a list of keys for each type of
data required, for example ts_wind, ts_demand.
Outputs:
data_type: list of strings - each a key name
describing the data required for this generator.
"""
data_types = []
if len(self.config['startup_data_name']) > 0:
data_types.append(self.config['startup_data_name'])
if len(self.config['params_to_site_data_name']) > 0:
data_types.append(self.config['params_to_site_data_name'])
return data_types
def set_data(self, data):
"""Set the data dict with the data series required
for the generator.
This implementation looks for the data types:
self.config['startup_data_name']: Interpets this into
the startup state, using the set_startup_state function.
self.config['params_to_site_data_name']: Sets self.params_to_site
to this.
Inputs:
data: dict - with keys matching those requested by
get_data_types.
"""
startup_data_name = self.config['startup_data_name']
if (len(startup_data_name) > 0) and (startup_data_name in data):
self.set_startup_state(data[startup_data_name])
params_to_site_name = self.config['params_to_site_data_name']
if (len(params_to_site_name) > 0) and (params_to_site_name in data):
self.params_to_site = data[params_to_site_name]
def set_startup_state(self, startup_data):
"""Set the startup state from the data provided. Sets
self.startup_state from this.
Inputs:
startup_data: An array of generators * 4:
[[site_index, capacity, build_date, decommissioning_period],
...]
"""
# Check if the startup data is empty. If so, just return.
if len(startup_data) == 0:
return
# Find out which build periods are covered.
startup_data =
|
numpy.array(startup_data)
|
numpy.array
|
#Instructions
#In this challenge, you are tasked with helping a small,
#rural town modernize its vote-counting process.
#You will be given two sets of poll data (election_data_1.csv and
#election_data_2.csv). Each dataset is composed of three columns:
#Voter ID, County, and Candidate. Your task is to create a Python
#script that analyzes the votes and calculates each of the following:
#The total number of votes cast
#A complete list of candidates who received votes
#The percentage of votes each candidate won
#The total number of votes each candidate won
#The winner of the election based on popular vote.
#As an example, your analysis should look similar to the one below:
#Election Results
#-------------------------
#Total Votes: 620100
#-------------------------
#Rogers: 36.0% (223236)
#Gomez: 54.0% (334854)
#Brentwood: 4.0% (24804)
#Higgins: 6.0% (37206)
#-------------------------
#Winner: Gomez
#-------------------------
#Your final script should both print the analysis to the terminal
#and export a text file with the results.
#-----------------------------------------------------------------------------
import numpy as np
import pandas as pd
import os
import csv
file = "C:/Users/nab226/Desktop/NUCHI201801DATA4-Class-Repository-DATA/MWS/Homework/03-Python/Instructions/PyPoll/raw_data/election_data_1.csv"
df = pd.read_csv(file)
#df.head()
#The total number of votes cast
n_votes = df["Voter ID"].count()
n_votes
print("#Election Results - Dataset #1")
print("------------------------------------")
print("Total Votes: "+str(n_votes))
print("------------------------------------")
cand = df.Candidate.unique()
cand1 = df.loc[(df["Candidate"]== str(cand[0]))]
cand2 = df.loc[(df["Candidate"]== str(cand[1]))]
cand3 = df.loc[(df["Candidate"]== str(cand[2]))]
cand4 = df.loc[(df["Candidate"]== str(cand[3]))]
nm_cand1 = str(cand1.Candidate.values[0])
nm_cand2 = str(cand2.Candidate.values[0])
nm_cand3 = str(cand3.Candidate.values[0])
nm_cand4 = str(cand4.Candidate.values[0])
nm = [nm_cand1,nm_cand2,nm_cand3,nm_cand4]
n_cand1 = np.array(cand1["Voter ID"])
n_cand1 = n_cand1.size
n_cand2 = np.array(cand2["Voter ID"])
n_cand2 = n_cand2.size
n_cand3 = np.array(cand3["Voter ID"])
n_cand3 = n_cand3.size
n_cand4 = np.array(cand4["Voter ID"])
n_cand4 = n_cand4.size
n = [n_cand1, n_cand2, n_cand3, n_cand4]
cand1_pct = np.round((n_cand1/n_votes)*100,2)
cand1_pct
cand2_pct = np.round((n_cand2/n_votes)*100,2)
cand2_pct
cand3_pct = np.round((n_cand3/n_votes)*100,2)
cand3_pct
cand4_pct = np.round((n_cand4/n_votes)*100,2)
cand4_pct
pct = [cand1_pct, cand2_pct, cand3_pct, cand4_pct]
dict_df = dict(zip(nm, zip(pct, n)))
print("Name"+" "+"( %"+" , "+"# Votes)")
for i in dict_df:
print(i, dict_df[i])
print("------------------------------------")
max_votes = max(dict_df, key=dict_df.get)
winner = print("Winner: "+str(max_votes))
with open("pypoll_output1.txt", "w") as text_file:
print("#Election Results - Dataset #1", file=text_file)
print("------------------------------------", file=text_file)
print("Total Votes: "+str(n_votes), file=text_file)
print("------------------------------------", file=text_file)
print("Name"+" "+"( %"+" , "+"# Votes)", file=text_file)
for i in dict_df:
print(i, dict_df[i], file=text_file)
print("------------------------------------", file=text_file)
print("Winner: "+str(max_votes), file=text_file)
print(" ")
print(" ")
#end of script for dataset #1
#re-run script for dataset #2
#-----------------------------------------------------------------------------
file = "C:/Users/nab226/Desktop/NUCHI201801DATA4-Class-Repository-DATA/MWS/Homework/03-Python/Instructions/PyPoll/raw_data/election_data_2.csv"
df = pd.read_csv(file)
#df.head()
#The total number of votes cast
n_votes = df["Voter ID"].count()
n_votes
print("#Election Results - Dataset #2")
print("------------------------------------")
print("Total Votes: "+str(n_votes))
print("------------------------------------")
cand = df.Candidate.unique()
cand1 = df.loc[(df["Candidate"]== str(cand[0]))]
cand2 = df.loc[(df["Candidate"]== str(cand[1]))]
cand3 = df.loc[(df["Candidate"]== str(cand[2]))]
cand4 = df.loc[(df["Candidate"]== str(cand[3]))]
nm_cand1 = str(cand1.Candidate.values[0])
nm_cand2 = str(cand2.Candidate.values[0])
nm_cand3 = str(cand3.Candidate.values[0])
nm_cand4 = str(cand4.Candidate.values[0])
nm = [nm_cand1,nm_cand2,nm_cand3,nm_cand4]
n_cand1 = np.array(cand1["Voter ID"])
n_cand1 = n_cand1.size
n_cand2 =
|
np.array(cand2["Voter ID"])
|
numpy.array
|
import numpy as np
import scipy.stats as ss
import scipy.special as sp
from .family import Family
from .flat import Flat
from .normal import Normal
from .gas_recursions import gas_recursion_skewt_orderone, gas_recursion_skewt_ordertwo
from .gas_recursions import gasx_recursion_skewt_orderone, gasx_recursion_skewt_ordertwo
from .gas_recursions import gas_llev_recursion_skewt_orderone, gas_llev_recursion_skewt_ordertwo
from .gas_recursions import gas_llt_recursion_skewt_orderone, gas_llt_recursion_skewt_ordertwo
from .gas_recursions import gas_reg_recursion_skewt_orderone, gas_reg_recursion_skewt_ordertwo
class Skewt(Family):
"""
Student Skew t Distribution
----
This class contains methods relating to the Student Skew t distribution for time series.
"""
def __init__(self, loc=0.0, scale=1.0, df=8.0, gamma=1.0, transform=None, **kwargs):
"""
Parameters
----------
loc : float
Location parameter for the Skew t distribution
scale : float
Scale parameter for the Skew t distribution
df : float
Degrees of freedom parameter for the Skew t distribution
gamma : float
Skewness parameter (1.0 is skewed; under 1.0, -ve skewed; over 1.0, +ve skewed)
transform : str
Whether to apply a transformation to the location variable - e.g. 'exp' or 'logit'
"""
super(Skewt, self).__init__(transform)
self.loc0 = loc
self.scale0 = scale
self.df0 = df
self.gamma0 = gamma
self.covariance_prior = False
self.gradient_only = kwargs.get('gradient_only', False) # used for GAS t models
if self.gradient_only is True:
self.score_function = self.first_order_score
else:
self.score_function = self.second_order_score
def approximating_model(self, beta, T, Z, R, Q, h_approx, data):
""" Creates approximating Gaussian state space model for Skewt measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no):
""" Creates approximating Gaussian state space model for Skewt measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
X: np.array
The regressors
state_no : int
Number of states
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
@staticmethod
def build_latent_variables():
""" Builds additional latent variables for this family
Returns
----------
- A list of lists (each sub-list contains latent variable information)
"""
lvs_to_build = []
lvs_to_build.append(['Skewness', Flat(transform='exp'), Normal(0, 3), 0.0])
lvs_to_build.append(['Skewt Scale', Flat(transform='exp'), Normal(0, 3), 0.01])
lvs_to_build.append(['v', Flat(transform='exp'), Normal(0, 3), 2.5])
return lvs_to_build
@staticmethod
def draw_variable(loc, scale, shape, skewness, nsims):
""" Draws random variables from Skew t distribution
Parameters
----------
loc : float
location parameter for the distribution
scale : float
scale parameter for the distribution
shape : float
tail thickness parameter for the distribution
skewness : float
skewness parameter for the distribution
nsims : int or list
number of draws to take from the distribution
Returns
----------
- Random draws from the distribution
"""
return loc + scale*Skewt.rvs(shape, skewness, nsims)
@staticmethod
def first_order_score(y, mean, scale, shape, skewness):
""" GAS Skew t Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Skew t distribution
scale : float
scale parameter for the Skew t distribution
shape : float
tail thickness parameter for the Skew t distribution
skewness : float
skewness parameter for the Skew t distribution
Returns
----------
- Score of the Skew t family
"""
m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
mean = mean + (skewness - (1.0/skewness))*scale*m1
if (y-mean)>=0:
return ((shape+1)/shape)*(y-mean)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape))
else:
return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
@staticmethod
def rvs(df, gamma, n):
""" Generates random variables from a Skew t distribution
Parameters
----------
df : float
degrees of freedom parameter
gamma : float
skewness parameter
n : int or list
Number of simulations to perform; if list input, produces array
"""
if type(n) == list:
u = np.random.uniform(size=n[0]*n[1])
result = Skewt.ppf(q=u, df=df, gamma=gamma)
result = np.split(result,n[0])
return np.array(result)
else:
u = np.random.uniform(size=n)
if isinstance(df, np.ndarray) or isinstance(gamma, np.ndarray):
return np.array([Skewt.ppf(q=np.array([u[i]]), df=df[i], gamma=gamma[i])[0] for i in range(n)])
else:
return Skewt.ppf(q=u, df=df, gamma=gamma)
@staticmethod
def logpdf_internal(x, df, loc=0.0, scale=1.0, gamma = 1.0):
result = np.zeros(x.shape[0])
result[x-loc<0] = np.log(2.0) - np.log(gamma + 1.0/gamma) + ss.t.logpdf(x=gamma*x[(x-loc) < 0], loc=loc[(x-loc) < 0]*gamma,df=df, scale=scale)
result[x-loc>=0] = np.log(2.0) - np.log(gamma + 1.0/gamma) + ss.t.logpdf(x=x[(x-loc) >= 0]/gamma, loc=loc[(x-loc) >= 0]/gamma,df=df, scale=scale)
return result
@staticmethod
def logpdf_internal_prior(x, df, loc=0.0, scale=1.0, gamma = 1.0):
if x-loc < 0.0:
return np.log(2.0) - np.log(gamma + 1.0/gamma) + ss.t.logpdf(x=gamma*x, loc=loc*gamma,df=df, scale=scale)
else:
return np.log(2.0) - np.log(gamma + 1.0/gamma) + ss.t.logpdf(x=x/gamma, loc=loc/gamma,df=df, scale=scale)
def logpdf(self, mu):
"""
Log PDF for Skew t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return self.logpdf_internal_prior(mu, df=self.df0, loc=self.loc0, scale=self.scale0, gamma=self.gamma0)
@staticmethod
def markov_blanket(y, mean, scale, shape, skewness):
""" Markov blanket for each likelihood term
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Skew t distribution
scale : float
scale parameter for the Skew t distribution
shape : float
tail thickness parameter for the Skew t distribution
skewness : float
skewness parameter for the Skew t distribution
Returns
----------
- Markov blanket of the Skew t family
"""
m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
mean = mean + (skewness - (1.0/skewness))*scale*m1
return Skewt.logpdf_internal(x=y, df=shape, loc=mean, gamma=skewness, scale=scale)
@staticmethod
def setup():
""" Returns the attributes of this family
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized
"""
name = "Skewt"
link = np.array
scale = True
shape = True
skewness = True
mean_transform = np.array
cythonized = True
return name, link, scale, shape, skewness, mean_transform, cythonized
@staticmethod
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Skew t distribution
scale : float
scale parameter for the Skew t distribution
shape : float
tail thickness parameter for the Skew t distribution
skewness : float
skewness parameter for the Skew t distribution
Returns
----------
- Negative loglikelihood of the Skew t family
"""
m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
mean = mean + (skewness - (1.0/skewness))*scale*m1
return -np.sum(Skewt.logpdf_internal(x=y, df=shape, loc=mean, gamma=skewness, scale=scale))
@staticmethod
def pdf_internal(x, df, loc=0.0, scale=1.0, gamma = 1.0):
"""
Raw PDF function for the Skew t distribution
"""
result = np.zeros(x.shape[0])
result[x<0] = 2.0/(gamma + 1.0/gamma)*stats.t.pdf(x=gamma*x[(x-loc) < 0], loc=loc[(x-loc) < 0]*gamma,df=df, scale=scale)
result[x>=0] = 2.0/(gamma + 1.0/gamma)*stats.t.pdf(x=x[(x-loc) >= 0]/gamma, loc=loc[(x-loc) >= 0]/gamma,df=df, scale=scale)
return result
def pdf(self, mu):
"""
PDF for Skew t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return self.pdf_internal(mu, df=self.df0, loc=self.loc0, scale=self.scale0, gamma=self.gamma0)
@staticmethod
def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS Skew t Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Skew t distribution
scale : float
scale parameter for the Skew t distribution
shape : float
tail thickness parameter for the Skew t distribution
skewness : float
skewness parameter for the Skew t distribution
Returns
----------
- Score of the Skew t family
"""
m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
mean = mean + (skewness - (1.0/skewness))*scale*m1
if (y-mean)>=0:
return ((shape+1)/shape)*((y-mean)*X)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape))
else:
return ((shape+1)/shape)*((y-mean)*X)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
@staticmethod
def second_order_score(y, mean, scale, shape, skewness):
""" GAS Skew t Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Skew t distribution
scale : float
scale parameter for the Skew t distribution
shape : float
tail thickness parameter for the Skew t distribution
skewness : float
skewness parameter for the Skew t distribution
Returns
----------
- Adjusted score of the Skew t family
"""
m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
mean = mean + (skewness - (1.0/skewness))*scale*m1
if (y-mean)>=0:
return ((shape+1)/shape)*(y-mean)/(np.power(skewness*scale,2) + (
|
np.power(y-mean,2)
|
numpy.power
|
import sys
from StringIO import StringIO
from collections import OrderedDict
from itertools import chain
import numpy
import networkx as nx
from zope.interface import implements
# pylint: disable-msg=E0611,F0401
from openmdao.main.mpiwrap import MPI, MPI_info, mpiprint, PETSc
from openmdao.main.exceptions import RunStopped
from openmdao.main.finite_difference import FiniteDifference, DirectionalFD
from openmdao.main.linearsolver import ScipyGMRES, PETSc_KSP, LinearGS
from openmdao.main.mp_support import has_interface
from openmdao.main.interfaces import IDriver, IAssembly, IImplicitComponent, \
ISolver, IPseudoComp, IComponent, ISystem
from openmdao.main.vecwrapper import VecWrapper, InputVecWrapper, DataTransfer, \
idx_merge, petsc_linspace, _filter, _filter_subs, \
_filter_flat, _filter_ignored
from openmdao.main.depgraph import break_cycles, get_node_boundary, gsort, \
collapse_nodes, simple_node_iter
from openmdao.main.derivatives import applyJ, applyJT
from openmdao.util.graph import base_var
class System(object):
implements(ISystem)
def __init__(self, scope, graph, nodes, name):
self.name = str(name)
self.node = name
self.scope = scope
self._nodes = nodes
self.variables = OrderedDict() # dict of all vars owned by this System (flat and non-flat)
self.flat_vars = OrderedDict() # all vars used in vectors, whether they add to vector size or not
self.noflat_vars = OrderedDict() # all vars that are not flattenable to float arrays (so are not part of vectors)
self.vector_vars = OrderedDict() # all vars that contribute to the size of vectors
self._inputs = None
self._outputs = None
self._states = None
self._residuals = None
self._reduced_graph = graph.full_subgraph(nodes)
self._mapped_resids = {}
self._out_nodes = []
# find our output nodes (outputs from our System and any child Systems)
for node in nodes:
if node in graph:
for succ in graph.successors(node):
if succ not in self._out_nodes:
self._out_nodes.append(succ)
if hasattr(self, '_comp') and \
IImplicitComponent.providedBy(self._comp):
states = set(['.'.join((self.name,s))
for s in self._comp.list_states()])
else:
states = ()
pure_outs = [out for out in self._out_nodes if out not in states]
all_outs = set(nodes)
all_outs.update(pure_outs)
# get our input nodes from the depgraph
ins, _ = get_node_boundary(graph, all_outs)
self._in_nodes = []
for i in ins:
if 'comp' not in graph.node[i]:
self._in_nodes.append(i)
elif i in self.scope.name2collapsed and self.scope.name2collapsed[i] in graph:
n = self.scope.name2collapsed[i]
if i != self.scope.name2collapsed[i] and n not in self._in_nodes:
self._in_nodes.append(n)
self._combined_graph = graph.subgraph(list(all_outs)+list(self._in_nodes))
self._in_nodes = sorted(self._in_nodes)
self._out_nodes = sorted(self._out_nodes)
self.mpi = MPI_info()
self.mpi.requested_cpus = None
self.vec = {}
self.app_ordering = None
self.scatter_full = None
self.scatter_partial = None
# Derivatives stuff
self.mode = None
self.sol_vec = None
self.rhs_vec = None
self.ln_solver = None
self.fd_solver = None
self.dfd_solver = None
self.sol_buf = None
self.rhs_buf = None
self._parent_system = None
self.complex_step = False
def __getitem__(self, key):
"""A convenience method to allow easy access to descendant
Systems, either by name or by index.
"""
for i, sub in enumerate(self.subsystems()):
if key == i or key == sub.name:
return sub
if isinstance(key, basestring):
for sub in self.subsystems():
s = sub[key]
if s:
return s
return None
def is_differentiable(self):
"""Return True if analytical derivatives can be
computed for this System.
"""
return True
def pre_run(self):
""" Runs at assembly execution"""
pass
def subsystems(self, local=False):
if local:
return self.local_subsystems()
return self.all_subsystems()
def local_subsystems(self):
return ()
def all_subsystems(self):
return ()
def list_subsystems(self, local=False):
"""Returns the names of our subsystems."""
return [s.name for s in self.subsystems(local)]
def create_app_ordering(self):
"""Creates a PETSc application ordering."""
rank = self.mpi.rank
start = numpy.sum(self.local_var_sizes[:rank])
end = numpy.sum(self.local_var_sizes[:rank+1])
petsc_idxs = petsc_linspace(start, end)
app_idxs = []
for ivar in xrange(len(self.vector_vars)):
start = numpy.sum(self.local_var_sizes[:, :ivar]) + \
numpy.sum(self.local_var_sizes[:rank, ivar])
end = start + self.local_var_sizes[rank, ivar]
app_idxs.append(petsc_linspace(start, end))
if app_idxs:
app_idxs = numpy.concatenate(app_idxs)
app_ind_set = PETSc.IS().createGeneral(app_idxs, comm=self.mpi.comm)
petsc_ind_set = PETSc.IS().createGeneral(petsc_idxs, comm=self.mpi.comm)
return PETSc.AO().createBasic(app_ind_set, petsc_ind_set,
comm=self.mpi.comm)
def get_combined_J(self, J):
"""
Take a J dict that's distributed, i.e., has different values
across different MPI processes, and return a dict that
contains all of the values from all of the processes. If
values are duplicated, use the value from the lowest rank
process. Note that J has a nested dict structure.
"""
comm = self.mpi.comm
myrank = comm.rank
tups = []
# gather a list of tuples for J
for param, dct in J.items():
for output, value in dct.items():
tups.append((param, output))
dist_tups = comm.gather(tups, root=0)
tupdict = {}
if myrank == 0:
for rank, tups in enumerate(dist_tups):
for tup in tups:
if not tup in tupdict:
tupdict[tup] = rank
#get rid of tups from the root proc before bcast
for tup, rank in tupdict.items():
if rank == 0:
del tupdict[tup]
tupdict = comm.bcast(tupdict, root=0)
if myrank == 0:
for (param, output), rank in tupdict.items():
J[param][output] = comm.recv(source=rank, tag=0)
else:
for (param, output), rank in tupdict.items():
if rank == myrank:
comm.send(J[param][output], dest=0, tag=0)
# FIXME: rework some of this using knowledge of local_var_sizes in order
# to avoid any unnecessary data passing
# return the combined dict
return comm.bcast(J, root=0)
def _get_owned_args(self):
args = set()
for sub in self.simple_subsystems():
for arg in sub._in_nodes:
if arg in self.variables and \
(arg not in sub.variables or sub is self):
args.add(arg)
# ensure that args are in same order that they appear in
# variables
return [a for a in self.variables.keys() if a in args]
def get(self, name):
return self.vec['u'][name]
def clear_dp(self):
""" Recusively sets the dp vector to zero."""
self.vec['dp'].array[:] = 0.0
for system in self.local_subsystems():
system.clear_dp()
def _all_comp_nodes(self, local=False):
"""Return a set of comps for this system and all subsystems."""
comps = set()
for s in self.subsystems(local=local):
comps.update(s._all_comp_nodes(local=local))
return comps
def list_inputs(self):
"""Returns names of input variables from this System and all of its
children.
"""
if self._inputs is None:
inputs = set()
is_opaque = isinstance(self, OpaqueSystem)
for system in self.simple_subsystems():
comps = self._all_comp_nodes()
for tup in system._in_nodes:
# need this to prevent paramgroup inputs on same comp to be
# counted more than once
seen = set()
for dest in tup[1]:
comp = dest.split('.', 1)[0]
if comp in comps and comp not in seen:
inputs.add(dest)
# Since Opaque systems do finite difference on the
# full param groups, we should only include one input
# from each group.
if is_opaque:
seen.add(comp)
self._inputs = _filter(self.scope, inputs)
return self._inputs
def list_states(self):
"""Returns names of states (not collapsed edges) from this System and
all of its children.
"""
if self._states is None:
states = set()
for system in self.simple_subsystems():
try:
if system._comp.eval_only is False:
states.update(['.'.join((system.name,s))
for s in system._comp.list_states()])
except AttributeError:
pass
top = self.scope
states = [i for i in states if top.name2collapsed[i] in top._system.vector_vars
and not top._system.vector_vars[top.name2collapsed[i]].get('deriv_ignore')]
self._states = states
return self._states
def list_outputs(self):
"""Returns names of output variables (not collapsed edges)
from this System and all of its children. This only lists
outputs that are relevant to derivatives calculations.
"""
if self._outputs is None:
outputs = []
for system in self.simple_subsystems():
states = set()
try:
states.update(['.'.join((system.name,s))
for s in system._comp.list_states()])
except AttributeError:
pass
out_nodes = [node for node in system._out_nodes \
if node not in self._mapped_resids]
comps = self._all_comp_nodes()
for src, _ in out_nodes:
cname, _, vname = src.partition('.')
if cname in comps and src not in states:
outputs.append(src)
self._outputs = _filter(self.scope, outputs)
return self._outputs
def list_residuals(self):
"""Returns names of all residuals.
"""
if self._residuals is None:
outputs = []
for system in self.simple_subsystems():
try:
outputs.extend(['.'.join((system.name, s))
for s in system._comp.list_residuals()
if system._comp.eval_only is False])
except AttributeError:
pass
outputs.extend([n for n, m in self._mapped_resids.keys()])
self._residuals = outputs
return self._residuals
def get_size(self, names):
"""Return the combined size of the variables
corresponding to the given names. If a given
variable does not exist locally, the size will
be taken from the lowest rank process that does
contain that variable.
"""
if isinstance(names, basestring):
names = [names]
uvec = self.scope._system.vec['u']
varmeta = self.scope._var_meta
var_sizes = self.scope._system.local_var_sizes
varkeys = self.scope._system.vector_vars.keys()
collnames = self.scope.name2collapsed
size = 0
for name in names:
if isinstance(name, tuple):
name = name[0]
if name in uvec:
size += uvec[name].size
elif collnames[name] in varkeys:
idx = varkeys.index(collnames[name])
for proc in range(self.mpi.size):
if var_sizes[proc, idx] > 0:
size += var_sizes[proc, idx]
break
else:
size += varmeta[name]['size']
return size
def set_ordering(self, ordering, opaque_map):
pass
def is_active(self):
return MPI is None or self.mpi.comm != MPI.COMM_NULL
def get_req_cpus(self):
return self.mpi.requested_cpus
def setup_variables(self, resid_state_map=None):
self.variables = OrderedDict()
if resid_state_map is None:
resid_state_map = {}
variables = {}
for sub in self.local_subsystems():
if not isinstance(sub, ParamSystem):
sub.setup_variables(resid_state_map)
variables.update(sub.variables)
for sub in self.local_subsystems():
if isinstance(sub, ParamSystem):
sub.setup_variables(variables, resid_state_map)
# now loop through a final time to keep order of all subsystems the same
# as local_subsystems()
for sub in self.local_subsystems():
self.variables.update(sub.variables)
self._create_var_dicts(resid_state_map)
def _create_var_dicts(self, resid_state_map):
# now figure out all of the inputs we 'own'
self._owned_args = self._get_owned_args()
# split up vars into 3 categories:
# 1) flattenable vars that add to the size of the vectors
# 2) flattenable vars that don't add to the size of the vectors because they
# are slices of other vars already in the vectors
# 3) non-flattenable vars
# first, get all flattenable variables
for name in _filter_flat(self.scope, self.variables.keys()):
self.flat_vars[name] = self.variables[name]
# now get all flattenable vars that add to vector size
self.vector_vars = self._get_vector_vars(self.flat_vars)
for name, info in self.variables.items():
if name not in self.flat_vars:
self.noflat_vars[name] = info
def setup_sizes(self):
"""Given a dict of variables, set the sizes for
those that are local.
"""
varmeta = self.scope._var_meta
comm = self.mpi.comm
if not self.is_active():
self.local_var_sizes = numpy.zeros((0,0), int)
self.input_sizes = numpy.zeros(0, int)
return
size = self.mpi.size
rank = self.mpi.rank
# pass the call down to any subdrivers/subsystems
# and subassemblies.
for sub in self.local_subsystems():
sub.setup_sizes()
# create an (nproc x numvars) var size vector containing
# local sizes across all processes in our comm
self.local_var_sizes = numpy.zeros((size, len(self.vector_vars)), int)
for i, (name, var) in enumerate(self.vector_vars.items()):
self.local_var_sizes[rank, i] = var['size']
# collect local var sizes from all of the processes in our comm
# these sizes will be the same in all processes except in cases
# where a variable belongs to a multiprocessor component. In that
# case, the part of the component that runs in a given process will
# only have a slice of each of the component's variables.
if MPI:
comm.Allgather(self.local_var_sizes[rank,:],
self.local_var_sizes)
# create a (1 x nproc) vector for the sizes of all of our
# local inputs
self.input_sizes = numpy.zeros(size, int)
for arg in _filter_flat(self.scope, self._owned_args):
self.input_sizes[rank] += varmeta[arg]['size']
if MPI:
comm.Allgather(self.input_sizes[rank], self.input_sizes)
# create an arg_idx dict to keep track of indices of
# inputs
# TODO: determine how we want the user to specify indices
# for distributed inputs...
self.arg_idx = OrderedDict()
for name in _filter_flat(self.scope, self._owned_args):
# FIXME: this needs to use the actual indices for this
# process' version of the arg once we have distributed
# components...
#flat_idx = varmeta[name].get('flat_idx')
#if flat_idx and varmeta[name]['basevar'] in varmeta: # var is an array index into a basevar
# self.arg_idx[name] = to_indices(flat_idx, self.scope.get(varmeta[name]['basevar']))
#else:
self.arg_idx[name] = numpy.array(range(varmeta[name]['size']), 'i')
def setup_vectors(self, arrays=None, state_resid_map=None):
"""Creates vector wrapper objects to manage local and
distributed vectors need to solve the distributed system.
"""
if not self.is_active():
return
rank = self.mpi.rank
if arrays is None: # we're the top level System in our Assembly
arrays = {}
# create top level vectors
size = numpy.sum(self.local_var_sizes[rank, :])
for name in ['u', 'f', 'du', 'df']:
arrays[name] = numpy.zeros(size)
for name in ['u', 'f', 'du', 'df']:
self.vec[name] = VecWrapper(self, arrays[name],
name='.'.join((self.name, name)))
insize = self.input_sizes[rank]
for name in ['p', 'dp']:
self.vec[name] = InputVecWrapper(self, numpy.zeros(insize),
name='.'.join((self.name, name)))
start, end = 0, 0
for sub in self.local_subsystems():
sz = numpy.sum(sub.local_var_sizes[sub.mpi.rank, :])
end += sz
if end-start > arrays['u'][start:end].size:
msg = "size mismatch: passing [%d,%d] view of size %d array from %s to %s" % \
(start,end,arrays['u'][start:end].size,self.name,sub.name)
dups = {}
for s in self.local_subsystems():
for k in s.vector_vars.keys():
dups.setdefault(k, set()).add(s.name)
multis = [(k,list(v)) for k,v in dups.items() if len(v) > 1]
if multis:
msg += " The following var nodes are duplicated in subsystems: "
for i, (v,s) in enumerate(multis):
msg += "%s duplicated in %s" % (v,s)
if i:
msg += ", "
raise RuntimeError(msg)
subarrays = {}
for n in ('u', 'f', 'du', 'df'):
subarrays[n] = arrays[n][start:end]
sub.setup_vectors(subarrays)
start += sz
return self.vec
def scatter(self, srcvecname, destvecname, subsystem=None):
""" Perform data transfer (partial or full scatter or
send/receive for data that isn't flattenable to a
float array.
"""
if subsystem is None:
scatter = self.scatter_full
else:
scatter = subsystem.scatter_partial
if scatter is not None:
srcvec = self.vec[srcvecname]
destvec = self.vec[destvecname]
scatter(self, srcvec, destvec)
if destvecname == 'p':
if self.complex_step is True:
scatter(self, self.vec['du'], self.vec['dp'],
complex_step = True)
if scatter is self.scatter_full:
destvec.set_to_scope(self.scope)
if self.complex_step is True:
self.vec['dp'].set_to_scope_complex(self.scope)
else:
if subsystem._in_nodes:
destvec.set_to_scope(self.scope, subsystem._in_nodes)
if self.complex_step is True:
self.vec['dp'].set_to_scope_complex(self.scope,
subsystem._in_nodes)
def dump(self, nest=0, stream=sys.stdout, verbose=False):
"""Prints out a textual representation of the collapsed
execution graph (with groups of component nodes collapsed
into Systems). It shows which
components run on the current processor.
"""
if stream is None:
getval = True
stream = StringIO()
else:
getval = False
if not self.is_active():
return stream.getvalue() if getval else None
if MPI is None:
world_rank = 0
else:
world_rank = MPI.COMM_WORLD.rank
name_map = { 'SerialSystem': 'ser', 'ParallelSystem': 'par',
'SimpleSystem': 'simp', 'FiniteDiffDriverSystem': 'drv',
'TransparentDriverSystem': 'tdrv', 'OpaqueSystem': 'opaq',
'InVarSystem': 'invar', 'VarSystem': 'outvar',
'SolverSystem': 'slv', 'ParamSystem': 'param',
'AssemblySystem': 'asm', }
stream.write(" "*nest)
stream.write(str(self.name).replace(' ','').replace("'",""))
klass = self.__class__.__name__
stream.write(" [%s](req=%d)(rank=%d)(vsize=%d)(isize=%d)\n" %
(name_map.get(klass, klass.lower()[:3]),
self.get_req_cpus(),
world_rank,
self.vec['u'].array.size,
self.input_sizes[self.mpi.rank]))
for v, (arr, start) in self.vec['u']._info.items():
if verbose or v not in self.vec['u']._subviews:
stream.write(" "*(nest+2))
if v in self.vec['p']:
stream.write("u (%s) p (%s): %s\n" %
(list(self.vec['u'].bounds([v])),
list(self.vec['p'].bounds([v])), v))
else:
stream.write("u (%s): %s\n" % (list(self.vec['u'].bounds([v])), v))
for v, (arr, start) in self.vec['p']._info.items():
if v not in self.vec['u'] and (verbose or v not in self.vec['p']._subviews):
stream.write(" "*(nest+2))
stream.write(" p (%s): %s\n" %
(list(self.vec['p'].bounds([v])), v))
if self.scatter_partial:
noflats = self.scatter_partial.noflat_vars
elif self.scatter_full:
noflats = self.scatter_full.noflat_vars
else:
noflats = ()
if noflats:
stream.write(' '*(nest+2) + "= noflats =\n")
for src, dest in noflats:
stream.write(" "*(nest+2))
stream.write("%s --> %s\n" % (src, dest))
stream.write(" "*(nest+2))
stream.write("_in_nodes: %s\n" % self._in_nodes)
stream.write(" "*(nest+2))
stream.write("_out_nodes: %s\n" % self._out_nodes)
stream.write(" "*(nest+2))
stream.write("list_inputs(): %s\n" % self.list_inputs())
stream.write(" "*(nest+2))
stream.write("list_outputs(): %s\n" % self.list_outputs())
stream.write(" "*(nest+2))
stream.write("list_states(): %s\n" % self.list_states())
stream.write(" "*(nest+2))
stream.write("list_residuals(): %s\n" % self.list_residuals())
nest += 4
if isinstance(self, OpaqueSystem):
self._inner_system.dump(nest, stream)
elif isinstance(self, AssemblySystem):
self._comp._system.dump(nest, stream)
else:
for sub in self.local_subsystems():
sub.dump(nest, stream)
return stream.getvalue() if getval else None
def _get_vector_vars(self, vardict):
"""Return vector_vars, which are vars that actually add to the
size of the vectors (as opposed to subvars of vars that are in
the vector, which don't add anything to the vector but just
use a subview of the view corresponding to their base var)
"""
keep_srcs = set(_filter_subs([n[0] for n in vardict]))
return OrderedDict([(k,v) for k,v in vardict.items() if k[0] in keep_srcs])
def set_options(self, mode, options):
""" Sets all user-configurable options for this system and all
subsystems. """
self.mode = mode
self.options = options
if mode in ('forward', 'fd'):
self.sol_vec = self.vec['du']
self.rhs_vec = self.vec['df']
elif mode == 'adjoint':
self.sol_vec = self.vec['df']
self.rhs_vec = self.vec['du']
else:
raise RuntimeError("invalid mode. must be 'forward' or 'adjoint' but value is '%s'" % mode)
for subsystem in self.local_subsystems():
subsystem.set_options(mode, options)
# ------- derivative stuff -----------
def initialize_gradient_solver(self):
""" Initialize the solver that will be used to calculate the
gradient. """
if self.ln_solver is None:
solver_choice = self.options.lin_solver
# scipy_gmres not supported in MPI, so swap with
# petsc KSP.
if MPI and solver_choice=='scipy_gmres':
solver_choice = 'petsc_ksp'
msg = "scipy_gmres optimizer not supported in MPI. " + \
"Using petsc_ksp instead."
self.options.parent._logger.warning(msg)
if solver_choice == 'scipy_gmres':
self.ln_solver = ScipyGMRES(self)
elif solver_choice == 'petsc_ksp':
self.ln_solver = PETSc_KSP(self)
elif solver_choice == 'linear_gs':
self.ln_solver = LinearGS(self)
def linearize(self):
""" Linearize local subsystems. """
for subsystem in self.local_subsystems():
subsystem.linearize()
def set_complex_step(self, complex_step=False):
""" Toggles complex_step plumbing for this system and all
local subsystems.
"""
self.complex_step = complex_step
for subsystem in self.local_subsystems():
subsystem.set_complex_step(complex_step)
def calc_gradient(self, inputs, outputs, mode='auto', options=None,
iterbase='', return_format='array'):
""" Return the gradient for this system. """
if options.force_fd or mode == 'fd':
self.set_options('fd', options)
self.vec['df'].array[:] = 0.0
self.vec['du'].array[:] = 0.0
self.clear_dp()
return self.solve_fd(inputs, outputs, iterbase, return_format)
# Mode Precedence
# -- 1. Direct call argument
# -- 2. Gradient Options
# -- 3. Auto determination (when implemented)
if mode == 'auto':
# Automatic determination of mode
if options.derivative_direction == 'auto':
num_input = self.get_size(inputs)
num_output = self.get_size(outputs)
if num_input > num_output:
mode = 'adjoint'
else:
mode = 'forward'
else:
mode = options.derivative_direction
self.set_options(mode, options)
self.initialize_gradient_solver()
self.linearize()
# Clean out all arrays.
self.vec['df'].array[:] = 0.0
self.vec['du'].array[:] = 0.0
self.clear_dp()
J = self.ln_solver.calc_gradient(inputs, outputs, return_format)
self.sol_vec.array[:] = 0.0
return J
def solve_fd(self, inputs, outputs, iterbase='', return_format='array'):
"""Finite difference solve."""
if self.fd_solver is None:
self.fd_solver = FiniteDifference(self, inputs, outputs,
return_format)
return self.fd_solver.solve(iterbase=iterbase)
def calc_newton_direction(self, options=None, iterbase=''):
""" Solves for the new state in Newton's method and leaves it in the
df vector.
"""
self.set_options('forward', options)
self.vec['du'].array[:] = 0.0
self.vec['df'].array[:] = 0.0
self.vec['dp'].array[:] = 0.0
self.initialize_gradient_solver()
self.linearize()
#print 'Newton Direction', self.vec['f'].array[:]
self.vec['df'].array[:] = -self.ln_solver.solve(self.vec['f'].array)
#print 'Newton Solution', self.vec['df'].array[:]
def solve_linear(self, options=None):
""" Single linear solve solution applied to whatever input is sitting
in the RHS vector.
"""
if numpy.linalg.norm(self.rhs_vec.array) < 1e-15:
self.sol_vec.array[:] = 0.0
return self.sol_vec.array
if options is not None:
self.set_options(self.mode, options)
self.initialize_gradient_solver()
""" Solve Jacobian, df |-> du [fwd] or du |-> df [rev] """
self.rhs_buf[:] = self.rhs_vec.array[:]
self.sol_buf[:] = self.sol_vec.array[:]
self.sol_buf = self.ln_solver.solve(self.rhs_buf)
self.sol_vec.array[:] = self.sol_buf[:]
def _compute_derivatives(self, vname, ind):
""" Solves derivatives of system (direct/adjoint).
ind must be a global petsc index.
"""
self.rhs_vec.array[:] = 0.0
self.sol_vec.array[:] = 0.0
self.vec['dp'].array[:] = 0.0
varkeys = self.vector_vars.keys()
ivar = varkeys.index(vname)
if self.local_var_sizes[self.mpi.rank, ivar] > 0:
ind += numpy.sum(self.local_var_sizes[:, :ivar])
ind +=
|
numpy.sum(self.local_var_sizes[:self.mpi.rank, ivar])
|
numpy.sum
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fig 7: Phase spiral in z-vz DF slice.
Created: September 2021
Author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
from os.path import exists
from matplotlib.colors import SymLogNorm, LinearSegmentedColormap
sys.path.append("../src")
from constants import kpc
from ml import load_flow_ensemble, calc_DF_ensemble
def create_cmap(clist):
"""Create a pyplot colormap from given list of colours."""
cmap = LinearSegmentedColormap.from_list('mycmap', clist)
return cmap
# grid extents
zlim = 2.5 * kpc
vlim = 80000
# check if datafile exists, otherwise create and save
datafile = "fig5_data.npz"
if not exists(datafile):
# set up coordinate arrays
N_px = 256
ones =
|
np.ones((N_px, N_px))
|
numpy.ones
|
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import StandardScaler
from multiprocessing import Pool
import matplotlib.pyplot as plt
import pickle
def sample_and_split(raw_data, train_percentage=10, dev_percentage=10):
np.random.shuffle(raw_data)
n_trn_data = np.int((train_percentage / 100) * raw_data.shape[0])
n_dev_data = np.int((dev_percentage / 100) * raw_data.shape[0])
x_trn_raw = raw_data[:n_trn_data, :-1]
x_dev_raw = raw_data[n_trn_data:n_trn_data + n_dev_data, :-1]
y_trn = raw_data[:n_trn_data, -1]
y_dev = raw_data[n_trn_data:n_trn_data + n_dev_data, -1]
print(f"Training with {x_trn_raw.shape[0]}({100*x_trn_raw.shape[0]/raw_data.shape[0]:.1f}%) points")
print(f"Validating with {x_dev_raw.shape[0]}({100*x_dev_raw.shape[0]/raw_data.shape[0]:.1f}%) points")
scaler = StandardScaler()
scaler.fit(x_trn_raw)
x_trn_scaled = scaler.transform(x_trn_raw)
x_dev_scaled = scaler.transform(x_dev_raw)
return x_trn_scaled, x_dev_scaled, y_trn, y_dev
class Trainer(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __call__(self, learner):
# print(f"Fitting learner {learner}")
learner.fit(self.x, self.y)
return learner
def sample_train(reg, raw_data, trn_p, dev_p):
print(f"Resamping...")
learners = [SGDClassifier(loss="log", penalty="l2", max_iter=500, alpha=r) for r in reg]
x_trn_scaled, x_dev_scaled, y_trn, y_dev = sample_and_split(raw_data, train_percentage=trn_p,
dev_percentage=dev_p)
with Pool(4) as p:
learners = p.map(Trainer(x_trn_scaled, y_trn), learners)
training_auc = [roc_auc_score(y_trn, l.predict_proba(x_trn_scaled)[:, 1]) for l in learners]
validating_auc = [roc_auc_score(y_dev, l.predict_proba(x_dev_scaled)[:, 1]) for l in learners]
return training_auc, validating_auc
def plot_linear(train_pickle, dev_pickle, name="2a"):
with open(train_pickle, "rb") as f:
training_auc = pickle.load(f)
with open(dev_pickle, "rb") as f:
validating_auc = pickle.load(f)
reg = np.logspace(-4, 0, 20)
reg = reg.round(6)
plt.figure()
plt.semilogx(reg, np.mean(training_auc, axis=0), marker="s", label="Training AUC", color="blue")
plt.semilogx(reg, np.mean(validating_auc, axis=0), marker="x", label="Validating AUC", color="red")
plt.fill_between(reg,
|
np.min(training_auc, axis=0)
|
numpy.min
|
# standard libraries
import functools
import math
import sys
# third party libraries
import numpy
import numpy.typing
import scipy
import scipy.interpolate
import typing
# local libraries
# None
ShapeType = typing.Sequence[int]
Shape2DType = typing.Tuple[int, int]
_ImageDataType = numpy.typing.NDArray[typing.Any]
_RGBAImageDataType = numpy.typing.NDArray[typing.Any]
_RGBImageDataType = numpy.typing.NDArray[typing.Any]
_RGBA8ImageDataType = numpy.typing.NDArray[typing.Any]
_U8ImageDataType = numpy.typing.NDArray[typing.Any]
def scale_multidimensional(image: _ImageDataType, scaled_size: ShapeType) -> _ImageDataType:
"""
Return image scaled to scaled_size. scaled_size should be a sequence
with the same length as image.
"""
# we make a list of slice objects like [0:image_x-1:scaled_size_x*1j]
# this will give us scaled_size_x equal points between 0 and image_x-1
slices = [slice(0, x-1, y*1j) for x, y in zip(image.shape, scaled_size)]
# we pass slices into ogrid, to gives us vectors for each dimension
# ogrid returns a list of floating numbers if we use complex so we have
# to convert to int. np.rint rounds to nearest for us, but doesn't cast to int!
coords = [numpy.rint(x).astype(int) for x in numpy.ogrid[slices]]
# coords is now, for an array image of dimension n, a list of n 1d arrays we the
# coords we want to take from image:
return typing.cast(_ImageDataType, image[coords])
# size is c-indexed (height, width)
def scaled(image: _ImageDataType, size: ShapeType, method: str = 'linear') -> _ImageDataType:
size = tuple(size)
if method=='nearest':
return scale_multidimensional(image, size)
assert numpy.ndim(image) in (2,3)
if numpy.ndim(image) == 2:
if method == 'cubic':
iy = numpy.linspace(0, image.shape[0]-1, size[0])
ix = numpy.linspace(0, image.shape[1]-1, size[1])
f = scipy.interpolate.RectBivariateSpline(numpy.arange(image.shape[0]), numpy.arange(image.shape[1]), image, ky=3, kx=3)
return typing.cast(_ImageDataType, f(iy, ix))
elif method == 'linear':
iy = numpy.linspace(0, image.shape[0]-1, size[0])
ix = numpy.linspace(0, image.shape[1]-1, size[1])
f = scipy.interpolate.RectBivariateSpline(numpy.arange(image.shape[0]), numpy.arange(image.shape[1]), image, ky=1, kx=1)
return typing.cast(_ImageDataType, f(iy, ix))
else: # nearest
dst: numpy.typing.NDArray[typing.Any] = numpy.empty(size, image.dtype)
indices = numpy.indices(size)
indices[0] = ((image.shape[0]-1) * indices[0].astype(float) / size[0]).round()
indices[1] = ((image.shape[1]-1) * indices[1].astype(float) / size[1]).round()
dst[:, :] = image[(indices[0], indices[1])]
return dst
elif numpy.ndim(image) == 3:
assert image.shape[2] in (3,4) # rgb, rgba
dst_image: numpy.typing.NDArray[numpy.uint8] = numpy.empty(size + (image.shape[2],), numpy.uint8)
dst_image[:, :, 0] = scaled(image[:, :, 0], size, method=method)
dst_image[:, :, 1] = scaled(image[:, :, 1], size, method=method)
dst_image[:, :, 2] = scaled(image[:, :, 2], size, method=method)
if image.shape[2] == 4:
dst_image[:, :, 3] = scaled(image[:, :, 3], size, method=method)
return dst_image
raise Exception("Unable to scale image")
def rebin_1d(src: _ImageDataType, len: int, retained: typing.Optional[typing.Dict[str, typing.Any]] = None) -> _ImageDataType:
src_len = src.shape[0]
if len < src_len:
if retained is not None and (retained.get("src_len") != src_len or retained.get("len") != len):
retained.clear()
if retained is not None and "w" in retained:
w = retained["w"]
else:
ix, iy = numpy.meshgrid(numpy.linspace(0, src_len-1, src_len), numpy.linspace(0, len-1, len)) # type: ignore
# # create linear bins
# ss = numpy.linspace(0, float(src_len), len+1)
# # create some useful row and column values using meshgrid
# ix = ix.astype(numpy.int32)
# iy = iy.astype(numpy.int32)
# # basic idea here is to multiply low window by high window to get the window for each bin; then sum the transpose to do the actual binning
# # result is scaled to keep amplitude the same.
# w = numpy.maximum(numpy.minimum(ss[iy+1] - ix, 1.0), 0.0) * numpy.minimum(numpy.maximum(ix+1 - ss[iy], 0), 1.0)
# below is a faster version (which releases the GIL).
s1 = (iy+1) * float(src_len) / len - ix
s2 = s1[::-1, ::-1]
w = numpy.clip(s1, 0.0, 1.0) * numpy.clip(s2, 0.0, 1.0)
if retained is not None:
retained["src_len"] = src_len
retained["len"] = len
retained["w"] = w
weighted_src = w * src
# This ensures that nans are handled properly: Only propagate nans that fall within a bin (i.e. where weight != 0)
weighted_src[w==0] = 0
return typing.cast(_ImageDataType, numpy.sum(weighted_src, axis=1) * len / src_len)
else:
# linear
result: numpy.typing.NDArray[numpy.double] = numpy.empty((len, ), dtype=numpy.double)
index = (numpy.arange(len) * src_len / len).astype(numpy.int32)
result[:] = src[index]
return result
def get_dtype_view(array: numpy.typing.ArrayLike, dtype: numpy.typing.DTypeLike) -> _ImageDataType:
# this is useful for handling both numpy and h5py arrays
return numpy.array(array, copy=False).view(dtype)
def get_byte_view(rgba_image: _RGBAImageDataType) -> _RGBA8ImageDataType:
return get_dtype_view(rgba_image, numpy.uint8).reshape(rgba_image.shape + (-1, ))
def get_rgb_view(rgba_image: _RGBAImageDataType, byteorder: typing.Optional[str] = None) -> _RGBImageDataType:
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[2] == 4
if byteorder == 'little':
return typing.cast(_RGBImageDataType, bytes[..., :3]) # strip A off BGRA
else:
return typing.cast(_RGBImageDataType, bytes[..., 1:]) # strip A off ARGB
def get_red_view(rgba_image: _RGBAImageDataType, byteorder: typing.Optional[str] = None) -> _U8ImageDataType:
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[2] == 4
if byteorder == 'little':
return typing.cast(_U8ImageDataType, bytes[..., 2])
else:
return typing.cast(_U8ImageDataType, bytes[..., 1])
def get_green_view(rgba_image: _RGBAImageDataType, byteorder: typing.Optional[str] = None) -> _U8ImageDataType:
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[2] == 4
if byteorder == 'little':
return typing.cast(_U8ImageDataType, bytes[..., 1])
else:
return typing.cast(_U8ImageDataType, bytes[..., 2])
def get_blue_view(rgba_image: _RGBAImageDataType, byteorder: typing.Optional[str] = None) -> _U8ImageDataType:
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[2] == 4
if byteorder == 'little':
return typing.cast(_U8ImageDataType, bytes[..., 0])
else:
return typing.cast(_U8ImageDataType, bytes[..., 3])
def get_alpha_view(rgba_image: _RGBAImageDataType, byteorder: typing.Optional[str] = None) -> _U8ImageDataType:
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[2] == 4
if byteorder == 'little':
return typing.cast(_U8ImageDataType, bytes[..., 3])
else:
return typing.cast(_U8ImageDataType, bytes[..., 0])
def get_rgba_view_from_rgba_data(rgba_data: _RGBAImageDataType) -> _RGBA8ImageDataType:
return get_dtype_view(rgba_data, numpy.uint8).reshape(rgba_data.shape + (4,))
def get_rgba_data_from_rgba(rgba_image: _RGBA8ImageDataType) -> _RGBImageDataType:
return get_dtype_view(rgba_image, numpy.uint32).reshape(rgba_image.shape[:-1])
def dimensional_shape_from_shape_and_dtype(shape: ShapeType, dtype: numpy.typing.DTypeLike) -> typing.Optional[ShapeType]:
if shape is None or dtype is None:
return None
return shape[:-1] if dtype == numpy.uint8 and shape[-1] in (3,4) and len(shape) > 1 else shape
def dimensional_shape_from_data(data: _ImageDataType) -> typing.Optional[ShapeType]:
return dimensional_shape_from_shape_and_dtype(data.shape, data.dtype)
def is_shape_and_dtype_rgb(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None:
return False
return dtype == numpy.uint8 and shape[-1] == 3 and len(shape) > 1
def is_data_rgb(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_rgb(data.shape, data.dtype)
def is_shape_and_dtype_rgba(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None:
return False
return dtype == numpy.uint8 and shape[-1] == 4 and len(shape) > 1
def is_data_rgba(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_rgba(data.shape, data.dtype)
def is_shape_and_dtype_rgb_type(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
return is_shape_and_dtype_rgb(shape, dtype) or is_shape_and_dtype_rgba(shape, dtype)
def is_data_rgb_type(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_rgb_type(data.shape, data.dtype)
def is_shape_and_dtype_complex64(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None:
return False
return dtype == numpy.complex64
def is_data_complex64(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_complex64(data.shape, data.dtype)
def is_shape_and_dtype_complex128(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None:
return False
return dtype == numpy.complex128
def is_data_complex128(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_complex128(data.shape, data.dtype)
def is_shape_and_dtype_complex_type(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None:
return False
return dtype == numpy.complex64 or dtype == numpy.complex128
def is_data_complex_type(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_complex_type(data.shape, data.dtype)
def is_shape_and_dtype_scalar_type(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None:
return False
return not is_shape_and_dtype_rgb_type(shape, dtype) and not is_shape_and_dtype_complex_type(shape, dtype)
def is_data_scalar_type(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_scalar_type(data.shape, data.dtype)
def is_shape_and_dtype_bool(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None:
return False
return dtype == bool and len(shape) > 1
def is_data_bool(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_bool(data.shape, data.dtype)
def is_shape_and_dtype_valid(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None:
return False
if is_shape_and_dtype_rgb_type(shape, dtype):
return len(shape) > 1 and functools.reduce(lambda x, y: x * y, shape[:-1]) > 0 # one extra dimension for rgb(a) values
return len(shape) > 0 and functools.reduce(lambda x, y: x * y, shape) > 0
def is_data_valid(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_valid(data.shape, data.dtype)
def is_shape_and_dtype_1d(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None or not is_shape_and_dtype_valid(shape, dtype):
return False
if is_shape_and_dtype_rgb(shape, dtype) or is_shape_and_dtype_rgba(shape, dtype):
return len(shape) == 2 # one extra dimension for rgb(a) values
return len(shape) == 1
def is_data_1d(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_1d(data.shape, data.dtype)
def is_shape_and_dtype_2d(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None or not is_shape_and_dtype_valid(shape, dtype):
return False
if is_shape_and_dtype_rgb(shape, dtype) or is_shape_and_dtype_rgba(shape, dtype):
return len(shape) == 3 # one extra dimension for rgb(a) values
return len(shape) == 2
def is_data_2d(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_2d(data.shape, data.dtype)
def is_shape_and_dtype_3d(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None or not is_shape_and_dtype_valid(shape, dtype):
return False
if is_shape_and_dtype_rgb(shape, dtype) or is_shape_and_dtype_rgba(shape, dtype):
return len(shape) == 4 # one extra dimension for rgb(a) values
return len(shape) == 3
def is_data_3d(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_3d(data.shape, data.dtype)
def is_shape_and_dtype_4d(shape: typing.Optional[ShapeType], dtype: numpy.typing.DTypeLike) -> bool:
if shape is None or dtype is None or not is_shape_and_dtype_valid(shape, dtype):
return False
if is_shape_and_dtype_rgb(shape, dtype) or is_shape_and_dtype_rgba(shape, dtype):
return len(shape) == 5 # one extra dimension for rgb(a) values
return len(shape) == 4
def is_data_4d(data: typing.Optional[_ImageDataType]) -> bool:
return data is not None and is_shape_and_dtype_4d(data.shape, data.dtype)
def scalar_from_array(array: _ImageDataType, normalize: bool = True) -> _ImageDataType:
if numpy.iscomplexobj(array): # type: ignore
# numpy.nextafter returns the next possible represented number after 0 in the direction of 1
# this prevents log from generating -inf from 0.0
# quick way to drop out bottom percent:
# samples = 2000, fraction=0.10
# numpy.log(numpy.sort(numpy.abs(numpy.random.choice(data.reshape(numpy.product(data.shape)), samples)))[samples*fraction])
# unfortunately, this needs to be integrated into the display calculation, not the conversion here.
# the annoying conversion to float64 is to prevent float32 + float64 returning a 0.0. argh.
# TODO: consider optimizing log(abs) to 0.5*log(re**2 + im**2)
return typing.cast(_ImageDataType, numpy.log(numpy.abs(array).astype(numpy.float64) + numpy.nextafter(0,1)))
return array
# data_range and display_limits are in data value units. both are option parameters.
# if display limits is specified, values out of range are mapped to the min/max colors.
# if display limits are not specified, data range can be passed to avoid calculating min/max again.
# if underlimit/overlimit are specified and display limits are specified, values out of the under/over
# limit percentage values are mapped to blue and red.
# may return a new array or a view on the existing array
def create_rgba_image_from_array(array: _ImageDataType, normalize: bool = True,
data_range: typing.Optional[typing.Tuple[float, float]] = None,
display_limits: typing.Optional[typing.Tuple[float, float]] = None,
underlimit: typing.Optional[float] = None, overlimit: typing.Optional[float] = None,
lookup: typing.Optional[_RGBAImageDataType] = None) -> _RGBAImageDataType:
assert numpy.ndim(array) in (1, 2, 3)
assert numpy.can_cast(array.dtype, numpy.double) # type: ignore
if
|
numpy.ndim(array)
|
numpy.ndim
|
#!/usr/bin/env python
# encoding: utf-8
"""
towerstruc.py
Created by <NAME> on 2012-01-20.
Copyright (c) NREL. All rights reserved.
HISTORY: 2012 created
-7/2014: R.D. Bugs found in the call to shellBucklingEurocode from towerwithFrame3DD. Fixed.
Also set_as_top added.
-10/2014: R.D. Merged back with some changes Andrew did on his end.
-12/2014: A.N. fixed some errors from the merge (redundant drag calc). pep8 compliance. removed several unneccesary variables and imports (including set_as_top)
- 6/2015: A.N. major rewrite. removed pBEAM. can add spring stiffness anywhere. can add mass anywhere.
can use different material props throughout.
- 7/2015 : R.D. modified to use commonse modules.
- 1/2018 : G.B. modified for easier use with other modules, reducing user input burden, and shifting more to commonse
"""
from __future__ import print_function
import numpy as np
from openmdao.api import ExplicitComponent, Group, Problem, IndepVarComp
from wisdem.commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag
from wisdem.commonse.environment import WindBase, WaveBase, LinearWaves, TowerSoil, PowerWind, LogWind
from wisdem.commonse.tube import CylindricalShellProperties
from wisdem.commonse.utilities import assembleI, unassembleI, nodal2sectional, interp_with_deriv
from wisdem.commonse import gravity, eps, NFREQ
from wisdem.commonse.vertical_cylinder import CylinderDiscretization, CylinderMass, CylinderFrame3DD
import wisdem.commonse.UtilizationSupplement as Util
def find_nearest(array,value):
return (np.abs(array-value)).argmin()
# -----------------
# Components
# -----------------
class MonopileFoundation(ExplicitComponent):
def initialize(self):
self.options.declare('nPoints')
self.options.declare('monopile')
def setup(self):
nPoints = self.options['nPoints']
self.add_input('tower_section_height', np.zeros(nPoints-1), units='m', desc='parameterized section heights along cylinder')
self.add_input('tower_outer_diameter', np.zeros(nPoints), units='m', desc='cylinder diameter at corresponding locations')
self.add_input('tower_wall_thickness', np.zeros(nPoints-1), units='m', desc='shell thickness at corresponding locations')
self.add_input('suctionpile_depth', 0.0, units='m', desc='depth of foundation in the soil')
self.add_input('suctionpile_depth_diam_ratio', 0.0, desc='ratio of sunction pile depth to mudline monopile diameter')
self.add_input('foundation_height', 0.0, units='m', desc='height of foundation (0.0 for land, -water_depth for fixed bottom)')
nadd = 1 if self.options['monopile'] else 0
self.add_output('section_height_out', np.zeros(nPoints-1+nadd), units='m', desc='parameterized section heights along cylinder')
self.add_output('outer_diameter_out', np.zeros(nPoints+nadd), units='m', desc='cylinder diameter at corresponding locations')
self.add_output('wall_thickness_out', np.zeros(nPoints-1+nadd), units='m', desc='shell thickness at corresponding locations')
self.add_output('foundation_height_out', 0.0, units='m', desc='height of suction pile bottom (0.0 for land, -water_depth-pile for fixed bottom)')
def compute(self, inputs, outputs):
if self.options['monopile']:
# Determine suction pile depth
pile = inputs['suctionpile_depth']
if pile == 0.0:
pile = inputs['suctionpile_depth_diam_ratio'] * inputs['tower_outer_diameter'][0]
# Gravity foundations will have 0 suction depth, but we will still add an extra point for consistency
pile = np.maximum(0.1, pile)
outputs['section_height_out'] = np.r_[pile, inputs['tower_section_height']]
outputs['outer_diameter_out'] = np.r_[inputs['tower_outer_diameter'][0], inputs['tower_outer_diameter']]
outputs['wall_thickness_out'] = np.r_[inputs['tower_wall_thickness'][0], inputs['tower_wall_thickness']]
outputs['foundation_height_out'] = inputs['foundation_height'] - pile
else:
outputs['section_height_out'] = inputs['tower_section_height']
outputs['outer_diameter_out'] = inputs['tower_outer_diameter']
outputs['wall_thickness_out'] = inputs['tower_wall_thickness']
outputs['foundation_height_out'] = inputs['foundation_height']
class TowerDiscretization(ExplicitComponent):
def initialize(self):
self.options.declare('nPoints')
def setup(self):
nPoints = self.options['nPoints']
self.add_input('hub_height', val=0.0, units='m', desc='diameter at tower base')
self.add_input('z_param', np.zeros(nPoints), units='m', desc='parameterized locations along tower, linear lofting between')
self.add_output('height_constraint', val=0.0, units='m', desc='mismatch between tower height and desired hub_height')
self.declare_partials('height_constraint', ['hub_height','z_param'])
def compute(self, inputs, outputs):
outputs['height_constraint'] = inputs['hub_height'] - inputs['z_param'][-1]
def compute_partials(self, inputs, J):
nPoints = self.options['nPoints']
J['height_constraint','hub_height'] = 1.
J['height_constraint','z_param'] = np.zeros(nPoints)
J['height_constraint','z_param'][-1] = -1.
class TowerMass(ExplicitComponent):
def initialize(self):
self.options.declare('nFull')
def setup(self):
nFull = self.options['nFull']
self.add_input('cylinder_mass', val=np.zeros(nFull-1), units='kg', desc='Total cylinder mass')
self.add_input('cylinder_cost', val=0.0, units='USD', desc='Total cylinder cost')
self.add_input('cylinder_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of cylinder')
self.add_input('cylinder_section_center_of_mass', val=np.zeros(nFull-1), units='m', desc='z position of center of mass of each can in the cylinder')
self.add_input('cylinder_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of cylinder about base [xx yy zz xy xz yz]')
self.add_input('transition_piece_height', 0.0, units='m', desc='point mass height of transition piece above water line')
self.add_input('transition_piece_mass', 0.0, units='kg', desc='point mass of transition piece')
self.add_input('gravity_foundation_mass', 0.0, units='kg', desc='extra mass of gravity foundation')
self.add_input('foundation_height', 0.0, units='m', desc='height of foundation (0.0 for land, -water_depth for fixed bottom)')
self.add_input('z_full', np.zeros(nFull), units='m', desc='parameterized locations along tower, linear lofting between')
self.add_output('tower_raw_cost', val=0.0, units='USD', desc='Total tower cost')
self.add_output('tower_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_output('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of tower')
self.add_output('tower_section_center_of_mass', val=np.zeros(nFull-1), units='m', desc='z position of center of mass of each can in the tower')
self.add_output('tower_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
self.add_output('monopile_mass', val=0.0, units='kg', desc='Mass of monopile from bottom of suction pile through transition piece')
self.add_output('monopile_cost', val=0.0, units='USD', desc='Total monopile cost')
self.add_output('monopile_length', val=0.0, units='m', desc='Length of monopile from bottom of suction pile through transition piece')
self.declare_partials('tower_raw_cost', 'cylinder_cost')
self.declare_partials('tower_mass', ['cylinder_mass','transition_piece_mass'])
self.declare_partials('tower_center_of_mass', 'cylinder_center_of_mass')
self.declare_partials('tower_section_center_of_mass', 'cylinder_section_center_of_mass')
self.declare_partials('tower_I_base', 'cylinder_I_base')
self.declare_partials('monopile_mass', ['cylinder_mass','z_full','transition_piece_height'])
self.declare_partials('monopile_cost', ['cylinder_mass','z_full','transition_piece_height','cylinder_cost'])
self.declare_partials('monopile_length', ['transition_piece_height','z_full'])
self.J = {}
def compute(self, inputs, outputs):
outputs['tower_raw_cost'] = inputs['cylinder_cost']
outputs['tower_mass'] = inputs['cylinder_mass'].sum()
outputs['tower_center_of_mass'] = ( (inputs['cylinder_center_of_mass']*outputs['tower_mass'] +
inputs['transition_piece_mass']*inputs['transition_piece_height'] +
inputs['gravity_foundation_mass']*inputs['foundation_height']) /
(outputs['tower_mass']+inputs['transition_piece_mass']+inputs['gravity_foundation_mass']) )
outputs['tower_section_center_of_mass'] = inputs['cylinder_section_center_of_mass']
outputs['tower_I_base'] = inputs['cylinder_I_base']
outputs['monopile_mass'],dydx,dydxp,dydyp = interp_with_deriv(inputs['transition_piece_height'],
inputs['z_full'],
np.r_[0.0, np.cumsum(inputs['cylinder_mass'])])
outputs['tower_mass'] -= outputs['monopile_mass']
outputs['monopile_cost'] = inputs['cylinder_cost']*outputs['monopile_mass']/inputs['cylinder_mass'].sum()
outputs['monopile_mass'] += inputs['transition_piece_mass'] + inputs['gravity_foundation_mass']
outputs['monopile_length'] = inputs['transition_piece_height'] - inputs['z_full'][0]
self.J = {}
self.J['monopile_mass', 'z_full'] = dydxp[0,:]
self.J['monopile_mass', 'cylinder_mass'] = dydyp[0,1:]
self.J['monopile_mass', 'transition_piece_height'] = dydx[0,0]
self.J['monopile_cost', 'z_full'] = inputs['cylinder_cost'] * self.J['monopile_mass', 'z_full'] / inputs['cylinder_mass'].sum()
self.J['monopile_cost', 'cylinder_cost'] = outputs['monopile_mass']/ inputs['cylinder_mass']
self.J['monopile_cost', 'cylinder_mass'] = inputs['cylinder_cost']*self.J['monopile_mass', 'cylinder_mass']/inputs['cylinder_mass'] - outputs['monopile_cost']/inputs['cylinder_mass']
self.J['monopile_cost', 'transition_piece_height'] = inputs['cylinder_cost'] * self.J['monopile_mass', 'transition_piece_height'] / inputs['cylinder_mass']
def compute_partials(self, inputs, J):
J['tower_mass','cylinder_mass'] = np.ones(len(inputs['cylinder_mass'])) - self.J['monopile_mass', 'cylinder_mass']
J['tower_mass','transition_piece_mass'] = 1.0
J['tower_mass', 'z_full'] = -self.J['monopile_mass', 'z_full']
J['tower_mass', 'transition_piece_height'] = -self.J['monopile_mass', 'transition_piece_height']
J['tower_raw_cost','cylinder_cost'] = 1.0
J['tower_center_of_mass','cylinder_center_of_mass'] = 1.0
J['tower_section_center_of_mass','cylinder_section_center_of_mass'] = np.eye(len(inputs['cylinder_section_center_of_mass']))
J['tower_I_base','cylinder_I_base'] = np.eye(len(inputs['cylinder_I_base']))
J['monopile_mass', 'z_full'] = self.J['monopile_mass', 'z_full']
J['monopile_mass', 'cylinder_mass'] = self.J['monopile_mass', 'cylinder_mass']
J['monopile_mass', 'transition_piece_height'] = self.J['monopile_mass', 'transition_piece_height']
J['monopile_mass', 'transition_piece_mass'] = 1.0
J['monopile_mass', 'gravity_foundation_mass'] = 1.0
J['monopile_cost', 'z_full'] = self.J['monopile_cost', 'z_full']
J['monopile_cost', 'cylinder_cost'] = self.J['monopile_cost', 'cylinder_cost']
J['monopile_cost', 'cylinder_mass'] = self.J['monopile_cost', 'cylinder_mass']
J['monopile_cost', 'transition_piece_height'] = self.J['monopile_cost', 'transition_piece_height']
J['monopile_length','transition_piece_height'] = 1.
J['monopile_length','z_full'] = np.zeros(inputs['z_full'].size)
J['monopile_length','z_full'][0] = -1.
class TurbineMass(ExplicitComponent):
def setup(self):
self.add_input('hub_height', val=0.0, units='m', desc='Hub-height')
self.add_input('rna_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_input('rna_I', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of rna about tower top [xx yy zz xy xz yz]')
self.add_input('rna_cg', np.zeros((3,)), units='m', desc='xyz-location of rna cg relative to tower top')
self.add_input('tower_mass', val=0.0, units='kg', desc='Total tower mass (not including monopile)')
self.add_input('monopile_mass', val=0.0, units='kg', desc='Monopile mass')
self.add_input('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of tower')
self.add_input('tower_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
self.add_output('turbine_mass', val=0.0, units='kg', desc='Total mass of tower+rna')
self.add_output('turbine_center_of_mass', val=np.zeros((3,)), units='m', desc='xyz-position of tower+rna center of mass')
self.add_output('turbine_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
# Derivatives
# self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs):
outputs['turbine_mass'] = inputs['rna_mass'] + inputs['tower_mass'] + inputs['monopile_mass']
cg_rna = inputs['rna_cg'] + np.array([0.0, 0.0, inputs['hub_height']])
cg_tower = np.array([0.0, 0.0, inputs['tower_center_of_mass']])
outputs['turbine_center_of_mass'] = (inputs['rna_mass']*cg_rna + inputs['tower_mass']*cg_tower) / outputs['turbine_mass']
R = cg_rna
I_tower = assembleI(inputs['tower_I_base'])
I_rna = assembleI(inputs['rna_I']) + inputs['rna_mass']*(np.dot(R, R)*np.eye(3) - np.outer(R, R))
outputs['turbine_I_base'] = unassembleI(I_tower + I_rna)
class TowerPreFrame(ExplicitComponent):
def initialize(self):
self.options.declare('nFull')
self.options.declare('nPoints')
self.options.declare('monopile', default=False)
def setup(self):
nPoints = self.options['nPoints']
nFull = self.options['nFull']
nRefine = int( (nFull-1)/(nPoints-1) )
self.add_input('z', np.zeros(nFull), units='m', desc='location along tower. start at bottom and go to top')
self.add_input('d', np.zeros(nFull), units='m', desc='diameter along tower')
# extra mass
self.add_input('mass', 0.0, units='kg', desc='added mass')
self.add_input('mI', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia about some point p [xx yy zz xy xz yz]')
self.add_input('mrho', np.zeros((3,)), units='m', desc='xyz-location of p relative to node')
self.add_input('transition_piece_mass', 0.0, units='kg', desc='point mass of transition piece')
self.add_input('gravity_foundation_mass', 0.0, units='kg', desc='point mass of transition piece')
self.add_input('transition_piece_height', 0.0, units='m', desc='height of transition piece above water line')
self.add_input('foundation_height', 0.0, units='m', desc='height of foundation (0.0 for land, -water_depth for fixed bottom)')
# point loads
self.add_input('rna_F', np.zeros((3,)), units='N', desc='rna force')
self.add_input('rna_M', np.zeros((3,)), units='N*m', desc='rna moment')
# Monopile handling
self.add_input('k_monopile', np.zeros(6), units='N/m', desc='Stiffness BCs for ocean soil. Only used if monoflag inputis True')
# spring reaction data. Use float('inf') for rigid constraints.
nK = nRefine+1 if self.options['monopile'] else 1
self.add_output('kidx', np.zeros(nK, dtype=np.int_), desc='indices of z where external stiffness reactions should be applied.')
self.add_output('kx', np.zeros(nK), units='N/m', desc='spring stiffness in x-direction')
self.add_output('ky', np.zeros(nK), units='N/m', desc='spring stiffness in y-direction')
self.add_output('kz', np.zeros(nK), units='N/m', desc='spring stiffness in z-direction')
self.add_output('ktx', np.zeros(nK), units='N/m', desc='spring stiffness in theta_x-rotation')
self.add_output('kty', np.zeros(nK), units='N/m', desc='spring stiffness in theta_y-rotation')
self.add_output('ktz', np.zeros(nK), units='N/m', desc='spring stiffness in theta_z-rotation')
# extra mass
nMass = 3
self.add_output('midx', np.zeros(nMass, dtype=np.int_), desc='indices where added mass should be applied.')
self.add_output('m', np.zeros(nMass), units='kg', desc='added mass')
self.add_output('mIxx', np.zeros(nMass), units='kg*m**2', desc='x mass moment of inertia about some point p')
self.add_output('mIyy', np.zeros(nMass), units='kg*m**2', desc='y mass moment of inertia about some point p')
self.add_output('mIzz', np.zeros(nMass), units='kg*m**2', desc='z mass moment of inertia about some point p')
self.add_output('mIxy', np.zeros(nMass), units='kg*m**2', desc='xy mass moment of inertia about some point p')
self.add_output('mIxz', np.zeros(nMass), units='kg*m**2', desc='xz mass moment of inertia about some point p')
self.add_output('mIyz', np.zeros(nMass), units='kg*m**2', desc='yz mass moment of inertia about some point p')
self.add_output('mrhox',
|
np.zeros(nMass)
|
numpy.zeros
|
#------------------------------------------------------------------------
# <NAME>, 10/09/09
# Calculation of the figure of merit to compare different
# designs initially for the JET ELM coils project.
# Here we use the definition given by <NAME> at the
# JET ELM coils meeting on 27/08/09.
#------------------------------------------------------------------------
from numpy import ndarray as _ndarray
import numpy as _np
def merit_evaluate(chirikov:_ndarray, s_chir:_ndarray, S:_ndarray, br_nm:_ndarray):
# Chirikov Mean between [s1, s2]
s1, s2 = 0.9, 0.95
i1, i2 = _np.argmin(_np.abs(s_chir - s1)), _np.argmin(_np.abs(s_chir - s2))
if s_chir[i1] < s1: i1 += 1
if s_chir[i2] > s2: i2 -= 1
# TODO: Improve the integral by interpolation
chir_s1 = ((s_chir[i1]-s1)*chirikov[i1-1]+(s1-s_chir[i1-1])*chirikov[i1])/(s_chir[i1]-s_chir[i1-1])
chir_mean = (chir_s1+chirikov[i1])*(s_chir[i1]-s1)/2
for i in range(i1, i2):
chir_mean += (chirikov[i]+chirikov[i+1])*(s_chir[i+1]-s_chir[i])/2
chir_s2 = ((s2-s_chir[i2])*chirikov[i2+1]+(s_chir[i2+1]-s2)*chirikov[i2])/(s_chir[i2+1]-s_chir[i2])
chir_mean = chir_mean + (chir_s2+chirikov[i2])*(s2-s_chir[i2])/2
chir_mean /= (s2-s1)
# NTV Mean between [s3, s4]
s3, s4 = 0.1, 0.8
i3, i4 = _np.argmin(_np.abs(S - s3)), _np.argmin(_np.abs(S - s4))
if S[i3] < s3: i3 += 1
if S[i4] > s4: i4 -= 1
NTV_mean = 0
for i in range(i3, i4+1):
NTV_mean += _np.sum(_np.abs(br_nm[i,:])**2)
NTV_mean /= (i4-i3+1)
NTV_mean *= 2 # Factor 2 to include positive and negative n! (NTV_mean should really be calculated as a sum over all n's, but I leave that for later.
# Figure of merit
FoM = chir_mean**4 / NTV_mean
return FoM
# My figure of merit
# find_br_res98
# FoM2 = br_res98**2 / NTV_mean
# i_Q: This parameter is useful when the q profile is not monotonic.
def quantities_prof(S:_ndarray, Q:_ndarray, n_har:int, b1_sm_bis_nfix:_ndarray, G11:_ndarray, i_Q:int=0):
assert(b1_sm_bis_nfix.ndim == 2)
ns, ntheta = b1_sm_bis_nfix.shape
nhar_abs = _np.abs(n_har)
Q_abs = _np.abs(Q)
Q_min, Q_max =
|
_np.min(Q_abs[i_Q:])
|
numpy.min
|
import numpy as np
import pytest
import collections
import sqlite3 as lite
import os
import sys
path = os.path.realpath(__file__)
sys.path.append(os.path.dirname(os.path.dirname(path)))
import analysis as an
dir = os.path.dirname(__file__)
test_sqlite_path = os.path.join(dir, 'test.sqlite')
def get_sqlite_cursor():
con = lite.connect(test_sqlite_path)
con.row_factory = lite.Row
with con:
cur = con.cursor()
return cur
def test_agent_ids():
"""Test if get_agentids returns the right agentids"""
cur = get_sqlite_cursor()
ids = an.agent_ids(cur, 'reactor')
answer = ['39', '40', '41', '42', '43', '44']
assert ids == answer
def test_prototype_id():
"""Test if get_prototype_id returns the right agentids"""
cur = get_sqlite_cursor()
ids = an.prototype_id(cur, 'lwr')
answer = ['39', '40', '42']
assert ids == answer
def test_simulation_timesteps():
"""Tests if simulation_timesteps function outputs the right information"""
cur = get_sqlite_cursor()
init_year, init_month, duration, timestep = an.simulation_timesteps(cur)
assert init_year == 2000
assert init_month == 1
assert duration == 10
assert timestep.all() == np.linspace(0, 9, num=10).all()
def test_facility_commodity_flux():
"""Tests if facility_commodity_flux works properly"""
cur = get_sqlite_cursor()
agentids = ['39', '40', '42']
commod_list_send = ['uox_waste']
commod_list_rec = ['uox']
x = an.facility_commodity_flux(cur, agentids, commod_list_send, True)
y = an.facility_commodity_flux(cur, agentids, commod_list_rec, False)
answer_x = collections.OrderedDict()
answer_y = collections.OrderedDict()
answer_x['uox_waste'] = [0.0, 0.0, 0.3,
0.3, 0.7, 0.7, 1.0, 1.0, 1.0, 1.0]
answer_y['uox'] = [0.0, 0.3, 0.6, 0.9, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
assert len(x['uox_waste']) == len(answer_x['uox_waste'])
assert len(y['uox']) == len(answer_y['uox'])
for expected, actual in zip(x['uox_waste'], answer_x['uox_waste']):
assert expected == pytest.approx(actual, 1e-7)
for expected, actual in zip(y['uox'], answer_y['uox']):
assert expected == pytest.approx(actual, 1e-5)
@pytest.mark.skip(reason='not working, will fix')
def test_facility_commodity_flux_isotopics():
"""Tests if facility_commodity_flux_isotopics works properly"""
cur = get_sqlite_cursor()
agentids = ['27']
commod_list_send = ['reprocess_waste', 'uox_Pu']
commod_list_rec = ['uox_waste']
x = an.facility_commodity_flux_isotopics(
cur, agentids, commod_list_send, True)
y = an.facility_commodity_flux_isotopics(
cur, agentids, commod_list_rec, False)
answer_x = collections.OrderedDict()
answer_y = collections.OrderedDict()
answer_x['U235'] = [0, 0, 0, 2.639e-05, 2.639e-05, 6.279e-05,
6.279e-05, 8.919e-05, 8.919e-05, 8.919e-05]
answer_y['U235'] = [0, 0, 9.599e-3, 9.599e-3, 2.959e-2, 2.959e-2,
3.919e-2, 3.919e-2, 3.919e-2, 3.919e-2]
assert len(x['U235']) == len(answer_x['U235'])
assert len(y['U235']) == len(answer_y['U235'])
for expected, actual in zip(x['U235'], answer_x['U235']):
assert expected == pytest.approx(actual, abs=1e-5)
for expected, actual in zip(y['U235'], answer_y['U235']):
assert expected == pytest.approx(actual, abs=1e-5)
def test_stockpiles():
"""Tests if get_stockpiles function works properly """
cur = get_sqlite_cursor()
facility = 'separations'
pile_dict = an.stockpiles(cur, facility)
answer = collections.OrderedDict()
answer[facility] = [0, 0, 0, 0.2794, 0.2794,
0.6487, 0.6487, 0.9281, .9281, 0.9281]
assert len(pile_dict[facility]) == len(answer[facility])
for expected, actual in zip(pile_dict[facility], answer[facility]):
assert expected == pytest.approx(actual, abs=1e-4)
def test_swu_timeseries():
"""Tests if get_swu function works properly """
cur = get_sqlite_cursor()
swu = an.swu_timeseries(cur)
answer = collections.OrderedDict()
answer['Enrichment_30'] = [0, 1144.307, 2288.615,
3432.922, 3814.358, 3814.358,
3814.358, 3814.358, 3814.358, 3814.358]
assert len(swu['Enrichment_30']) == len(answer['Enrichment_30'])
for expected, actual in zip(swu['Enrichment_30'],
answer['Enrichment_30']):
assert expected == pytest.approx(actual, 1e-3)
def test_power_capacity():
"""Tests if get_power_dict function works properly """
cur = get_sqlite_cursor()
power_dict = an.power_capacity(cur)
lwr_inst = np.array([0, 1, 1, 2, 1, 1, 0, 0, 0, 0])
fr_inst = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert power_dict['lwr_inst'].all() == lwr_inst.all()
assert power_dict['fr_inst'].all() == fr_inst.all()
def test_u_util_calc():
""" Tests if u_util_calc function works properly """
cur = get_sqlite_cursor()
x = an.u_util_calc(cur)
answer = np.array([0, 0.142, 0.142, 0.142, 0.142,
0.142, 0.142, 0.142, 0.142, 0.142])
assert x.all() == answer.all()
def test_exec_string_receiverid():
"""Test if exec_string function prints the right thing
When the query wants to find receiverid """
string = an.exec_string([12, 35], 'receiverid', 'time, quantity')
answer = ('SELECT time, quantity '
'FROM resources INNER JOIN transactions '
'ON transactions.resourceid = resources.resourceid '
'WHERE (receiverid = 12 OR receiverid = 35)')
assert string == answer
def test_exec_string_commodity():
"""Test if exec_string function prints the right thing
When the query wants to find commodity """
string = an.exec_string(['uox', 'mox'], 'commodity', 'time, quantity')
answer = ('SELECT time, quantity '
'FROM resources INNER JOIN transactions '
'ON transactions.resourceid = resources.resourceid '
'WHERE (commodity = "uox" OR commodity = "mox")')
assert string == answer
def test_timeseries():
"""Test if get_timeseries returns the right timeseries list
Given an in_list"""
in_list = [[1, 245], [5, 375], [10, 411]]
duration = 13
x = an.timeseries(in_list, duration, False)
answer = [0, 245, 0, 0, 0, 375, 0,
0, 0, 0, 411, 0, 0]
assert x == answer
def test_kg_to_tons_no_cum():
"""Test if kg_to_tons boolean actually returns in tons
for non-cumulative timeseries search"""
in_list = [[1, 245], [5, 375], [10, 411]]
duration = 13
x = an.timeseries(in_list, duration, True)
answer = [0, 245, 0, 0, 0, 375, 0,
0, 0, 0, 411, 0, 0]
answer = [y * 0.001 for y in answer]
assert x == answer
def test_timeseries_cum():
"""Test if get_timeseries_cum returns the right timeseries list
Given an in_list"""
in_list = [[1, 245], [5, 375], [10, 411]]
duration = 13
x = an.timeseries_cum(in_list, duration, False)
answer = [0, 245, 245, 245, 245, 245 + 375, 245 + 375,
245 + 375, 245 + 375, 245 + 375, 245 + 375 + 411,
245 + 375 + 411, 245 + 375 + 411]
assert x == answer
def test_kg_to_tons_cum():
"""Test if kg_to_tons boolean actually
returns in tons for cumulative"""
in_list = [[1, 245], [5, 375], [10, 411]]
duration = 13
x = an.timeseries_cum(in_list, duration, True)
answer = [0, 245, 245, 245, 245, 245 + 375, 245 + 375,
245 + 375, 245 + 375, 245 + 375, 245 + 375 + 411,
245 + 375 + 411, 245 + 375 + 411]
answer = [y * 0.001 for y in answer]
assert x == answer
def test_isotope_transactions():
"""Test if get_isotope_transactions function
If it returns the right dictionary"""
cur = get_sqlite_cursor()
resources = cur.execute('SELECT sum(quantity), time, '
'qualid FROM transactions '
'INNER JOIN resources '
'ON resources.resourceid = '
'transactions.resourceid '
'WHERE commodity = "reprocess_waste" '
'GROUP BY time').fetchall()
compositions = cur.execute('SELECT * FROM compositions').fetchall()
x = an.isotope_transactions(resources, compositions)
answer = collections.defaultdict(list)
answer[922350000].append((3, 0.02639))
answer[922350000].append((5, 0.03639))
answer[922350000].append((7, 0.02639))
answer[922380000].append((3, 0.5336))
answer[922380000].append((5, 0.7036))
answer[922380000].append((7, 0.53360))
answer[942380000].append((3, 0.04))
answer[942380000].append((5, 0.06))
answer[942380000].append((7, 0.04))
for key in x:
assert len(x[key]) == len(answer[key])
for expected, actual in zip(x[key], answer[key]):
for i in range(0, 1):
assert expected[i] == pytest.approx(actual[i], 1e-3)
@pytest.mark.skip(reason='missing function')
def test_capacity_calc():
"""Test capacity_calc function"""
cur = get_sqlite_cursor()
init_year, init_month, duration, timestep = an.get_timesteps(cur)
governments = an.get_inst(cur)
entry_exit = cur.execute('SELECT max(value), timeseriespower.agentid, '
'parentid, entertime, entertime + lifetime '
'FROM agententry '
'INNER JOIN timeseriespower '
'ON agententry.agentid = '
'timeseriespower.agentid '
'GROUP BY timeseriespower.agentid').fetchall()
entry = cur.execute('SELECT max(value), timeseriespower.agentid, '
'parentid, entertime FROM agententry '
'INNER JOIN timeseriespower '
'ON agententry.agentid = timeseriespower.agentid '
'GROUP BY timeseriespower.agentid').fetchall()
exit_step = cur.execute('SELECT max(value), timeseriespower.agentid, '
'parentid, exittime FROM agentexit '
'INNER JOIN timeseriespower '
'ON agentexit.agentid = '
'timeseriespower.agentid'
' INNER JOIN agententry '
'ON agentexit.agentid = agententry.agentid '
'GROUP BY timeseriespower.agentid').fetchall()
power_dict = an.capacity_calc(governments, timestep, entry_exit)
answer_power = collections.OrderedDict()
answer_power['lwr_inst'] = np.asarray([0, 1, 2, 2, 2, 1, 1, 0, 0, 0])
answer_power['fr_inst'] =
|
np.asarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
|
numpy.asarray
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.table import Table
from scipy.ndimage.filters import convolve
from astropop.logger import logger
import warnings
def symmetry_roundness(chunk, nhalf):
"""Compute the folding roundness."""
# Quads
# 3 3 4 4 4
# 3 3 4 4 4
# 3 3 x 1 1
# 2 2 2 1 1
# 2 2 2 1 1
chunk = np.array(chunk)
chunk[nhalf, nhalf] = 0 # copy and setcentral pixel to 0
q1 = chunk[nhalf:, nhalf+1:].sum()
q2 = chunk[nhalf+1:, :nhalf+1].sum()
q3 = chunk[:nhalf+1, :nhalf].sum()
q4 = chunk[:nhalf, nhalf:].sum()
sum2 = -q1+q2-q3+q4
sum4 = q1+q2+q3+q4
# ignore divide-by-zero RuntimeWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
sround = 2.0 * sum2 / sum4
return sround
def stetson_find_peaks(h, hmin, mask, pixels, middle, n_x, n_y):
"""Stetson way to find peaks."""
mask = np.array(mask) # work with a copy
mask[middle, middle] = 0 # From now on we exclude the central pixel
pixels = np.sum(mask)
good = np.where(mask) # "good" identifies position of valid pixels
xx = good[1] - middle # x and y coordinate of valid pixels
yy = good[0] - middle # relative to the center
index = np.where(h >= hmin) # Valid image pixels are greater than hmin
nfound = len(index)
logger.debug('%i pixels above threshold', nfound)
if nfound == 0: # Any maxima found?
logger.warning('No maxima exceed input threshold of %f', hmin)
return
for i in range(pixels):
hy = index[0]+yy[i]
hx = index[1]+xx[i]
hgood = np.where((hy < n_y) & (hx < n_x) & (hy >= 0) & (hx >= 0))[0]
stars = np.where(np.greater_equal(h[index[0][hgood], index[1][hgood]],
h[hy[hgood], hx[hgood]]))
nfound = len(stars)
if nfound == 0: # Do valid local maxima exist?
logger.warning('No maxima exceed input threshold of %f', hmin)
return
index = np.array([index[0][hgood][stars], index[1][hgood][stars]])
ix = index[1] # X index of local maxima
iy = index[0] # Y index of local maxima
ngood = len(index[0])
logger.debug('%i local maxima located above threshold', ngood)
return ix, iy, ngood
def stetson_image_params(fwhm, snr, noise, data, background):
"""General Stetson kernel and image params."""
hmin = np.median(snr*noise)
image = data.astype(np.float64) - background
maxbox = 13 # Maximum size of convolution box in pixels
# Get information about the input image
type = np.shape(image)
if len(type) != 2:
raise ValueError('data array must be 2 dimensional')
n_x = type[1]
n_y = type[0]
logger.debug('Input Image Size is %ix%i', n_x, n_y)
if fwhm < 0.5:
raise ValueError('Supplied FWHM must be at least 0.5 pixels')
radius = np.max([0.637*fwhm, 2.001])
nhalf = np.min([int(radius), int((maxbox-1)/2.)])
nbox = 2*nhalf + 1 # number of pixels in side of convolution box
middle = nhalf # Index of central pixel
sigsq = (fwhm*gaussian_fwhm_to_sigma)**2
return image, hmin, n_x, n_y, radius, nhalf, nbox, middle, sigsq
def stetson_source_chunk(image, ix, iy, nhalf):
"""Extract a source image chunk."""
return image[iy-nhalf:iy+nhalf+1,
ix-nhalf:ix+nhalf+1]
def stetson_sharpness(temp, middle, mask, d):
"""Stetson compute of sharpness."""
mask = np.array(mask) # work with a copy
mask[middle, middle] = 0
sharp = temp[middle, middle] - (np.sum(mask*temp))/np.sum(mask)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
sharp /= d
return sharp
def stetson_roundness(temp, c1):
"""Stetson compute of roundness."""
dx = np.sum(np.sum(temp, axis=0)*c1)
dy = np.sum(np.sum(temp, axis=1)*c1)
if (dx <= 0) or (dy <= 0):
return np.nan
return 2*(dx-dy) / (dx + dy)
def stetson_kernels(radius, nhalf, nbox, sigsq):
"""Stetson kernel generation."""
# Mask identifies valid pixels in convolution box
mask = np.zeros([nbox, nbox], dtype='int8')
g = np.zeros([nbox, nbox]) # Gaussian convolution kernel
row2 = (np.arange(nbox)-nhalf)**2
for i in range(nhalf+1):
temp = row2 + i**2
g[nhalf-i, :] = temp
g[nhalf+i, :] = temp
g_row = np.where(g <= radius**2)
# MASK is complementary to SKIP in Stetson's Fortran
mask[g_row[0], g_row[1]] = 1
good = np.where(mask) # Value of c are now equal to distance to center
pixels = len(good[0])
g = np.exp(-0.5*g/sigsq)
c = g*mask # Convolution kernel now in c
sumc = np.sum(c)
sumcsq = np.sum(c**2) - sumc**2/pixels
sumc = sumc/pixels
c[good[0], good[1]] = (c[good[0], good[1]] - sumc)/sumcsq
c1 = np.exp(-.5*row2/sigsq)
sumc1 = np.sum(c1)/nbox
sumc1sq = np.sum(c1**2) - sumc1
c1 = (c1-sumc1)/sumc1sq
return mask, g, pixels, c, c1
def daofind_stetson(data, snr, background, noise, fwhm):
"""Find sources using DAOfind algorithm.
For testing purpouse only.
Translated from IDL Astro package by <NAME>. Original function available
at PythonPhot package. https://github.com/djones1040/PythonPhot
The function recieved some improvements to work better.
The function was also modified to keep computing statistics, even with bad
roundness and sharpness.
"""
# Compute hmin based on snr, background and noise
params = stetson_image_params(fwhm, snr, noise, data, background)
image, hmin, n_x, n_y, radius, nhalf, nbox, middle, sigsq = params
mask, g, pixels, c, c1 = stetson_kernels(radius, nhalf, nbox, sigsq)
# Compute quantities for centroid computations that can be used for all
# stars
xwt = np.zeros([nbox, nbox])
wt = nhalf - np.abs(np.arange(nbox)-nhalf) + 1
for i in range(nbox):
xwt[i, :] = wt
ywt = np.transpose(xwt)
sgx = np.sum(g*xwt, 1)
p = np.sum(wt)
sgy = np.sum(g*ywt, 0)
sumgx = np.sum(wt*sgy)
sumgy = np.sum(wt*sgx)
sumgsqy = np.sum(wt*sgy*sgy)
sumgsqx = np.sum(wt*sgx*sgx)
vec = nhalf - np.arange(nbox)
dgdx = sgy*vec
dgdy = sgx*vec
sdgdxs = np.sum(wt*dgdx**2)
sdgdx = np.sum(wt*dgdx)
sdgdys = np.sum(wt*dgdy**2)
sdgdy = np.sum(wt*dgdy)
sgdgdx = np.sum(wt*sgy*dgdx)
sgdgdy = np.sum(wt*sgx*dgdy)
h = convolve(image, c) # Convolve image with kernel "c"
minh = np.min(h)
h[:, 0:nhalf] = minh
h[:, n_x-nhalf:n_x] = minh
h[0:nhalf, :] = minh
h[n_y - nhalf: n_y - 1, :] = minh
ix, iy, ngood = stetson_find_peaks(h, hmin, mask, pixels, middle, n_x, n_y)
x = np.full(ngood, fill_value=np.nan)
y = np.full(ngood, fill_value=np.nan)
flux = np.full(ngood, fill_value=np.nan)
sharp =
|
np.full(ngood, fill_value=np.nan)
|
numpy.full
|
import random
import codecs
import copy
import math
import os,sys
import torch
import torch.nn.functional as F
import numpy as np
from utils import index2dense
from scipy.sparse import csr_matrix
from torch_geometric.datasets import Planetoid
from torch_geometric.datasets import Amazon
#access a quantity-balanced training set: each class has the same training size train_each
def get_split(opt,all_idx,all_label,nclass = 10):
train_each = opt.train_each
valid_each = opt.valid_each
train_list = [0 for _ in range(nclass)]
train_node = [[] for _ in range(nclass)]
train_idx = []
for iter1 in all_idx:
iter_label = all_label[iter1]
if train_list[iter_label] < train_each:
train_list[iter_label]+=1
train_node[iter_label].append(iter1)
train_idx.append(iter1)
if sum(train_list)==train_each*nclass:break
assert sum(train_list)==train_each*nclass
after_train_idx = list(set(all_idx)-set(train_idx))
valid_list = [0 for _ in range(nclass)]
valid_idx = []
for iter2 in after_train_idx:
iter_label = all_label[iter2]
if valid_list[iter_label] < valid_each:
valid_list[iter_label]+=1
valid_idx.append(iter2)
if sum(valid_list)==valid_each*nclass:break
assert sum(valid_list)==valid_each*nclass
test_idx = list(set(after_train_idx)-set(valid_idx))
return train_idx,valid_idx,test_idx,train_node
#access a quantity-imbalanced training set; the training set follows the step distribution.
def get_step_split(opt,all_idx,all_label,nclass=7):
base_valid_each = opt.valid_each
imb_ratio = opt.imb_ratio
head_list = opt.head_list if len(opt.head_list)>0 else [i for i in range(nclass//2)]
all_class_list = [i for i in range(nclass)]
tail_list = list(set(all_class_list) - set(head_list))
h_num = len(head_list)
t_num = len(tail_list)
base_train_each = int( len(all_idx) * opt.labeling_ratio / (t_num + h_num * imb_ratio) )
idx2train,idx2valid = {},{}
total_train_size = 0
total_valid_size = 0
for i_h in head_list:
idx2train[i_h] = int(base_train_each * imb_ratio)
idx2valid[i_h] = int(base_valid_each * 1)
total_train_size += idx2train[i_h]
total_valid_size += idx2valid[i_h]
for i_t in tail_list:
idx2train[i_t] = int(base_train_each * 1)
idx2valid[i_t] = int(base_valid_each * 1)
total_train_size += idx2train[i_t]
total_valid_size += idx2valid[i_t]
train_list = [0 for _ in range(nclass)]
train_node = [[] for _ in range(nclass)]
train_idx = []
for iter1 in all_idx:
iter_label = all_label[iter1]
if train_list[iter_label] < idx2train[iter_label]:
train_list[iter_label]+=1
train_node[iter_label].append(iter1)
train_idx.append(iter1)
if sum(train_list)==total_train_size:break
assert sum(train_list)==total_train_size
after_train_idx = list(set(all_idx)-set(train_idx))
valid_list = [0 for _ in range(nclass)]
valid_idx = []
for iter2 in after_train_idx:
iter_label = all_label[iter2]
if valid_list[iter_label] < idx2valid[iter_label]:
valid_list[iter_label]+=1
valid_idx.append(iter2)
if sum(valid_list)==total_valid_size:break
assert sum(valid_list)==total_valid_size
test_idx = list(set(after_train_idx)-set(valid_idx))
return train_idx,valid_idx,test_idx,train_node
#return the ReNode Weight
def get_renode_weight(opt,data):
ppr_matrix = data.Pi #personlized pagerank
gpr_matrix = torch.tensor(data.gpr).float() #class-accumulated personlized pagerank
base_w = opt.rn_base_weight
scale_w = opt.rn_scale_weight
nnode = ppr_matrix.size(0)
unlabel_mask = data.train_mask.int().ne(1)#unlabled node
#computing the Totoro values for labeled nodes
gpr_sum = torch.sum(gpr_matrix,dim=1)
gpr_rn = gpr_sum.unsqueeze(1) - gpr_matrix
rn_matrix = torch.mm(ppr_matrix,gpr_rn)
label_matrix = F.one_hot(data.y,gpr_matrix.size(1)).float()
label_matrix[unlabel_mask] = 0
rn_matrix = torch.sum(rn_matrix * label_matrix,dim=1)
rn_matrix[unlabel_mask] = rn_matrix.max() + 99 #exclude the influence of unlabeled node
#computing the ReNode Weight
train_size = torch.sum(data.train_mask.int()).item()
totoro_list = rn_matrix.tolist()
id2totoro = {i:totoro_list[i] for i in range(len(totoro_list))}
sorted_totoro = sorted(id2totoro.items(),key=lambda x:x[1],reverse=False)
id2rank = {sorted_totoro[i][0]:i for i in range(nnode)}
totoro_rank = [id2rank[i] for i in range(nnode)]
rn_weight = [(base_w + 0.5 * scale_w * (1 + math.cos(x*1.0*math.pi/(train_size-1)))) for x in totoro_rank]
rn_weight = torch.from_numpy(
|
np.array(rn_weight)
|
numpy.array
|
# Imports
import string
import pandas as pd
import numpy as np
from fuzzywuzzy import fuzz, StringMatcher
class WordEmbeddings:
"""
Module to load and handle the GloVe Word Embeddings
"""
def __init__(self):
self.vocabulary = set()
self.word_to_vec = {}
self.word_to_index = {}
self.index_to_word = {}
def load_glove(self, path):
"""
Loads a pretrained GloVe model
Expects a path to a GloVe pretrained word embeddings file
"""
with open(path, 'r') as file:
for line in file:
line = line.strip().split()
self.vocabulary.add(line[0])
self.word_to_vec[line[0]] = np.array(line[1:], dtype='float64')
for x,y in enumerate(sorted(self.vocabulary)):
self.word_to_index[y] = x+1
self.index_to_word[x+1] = y
self.EMBEDDING_DIM = len(self.word_to_vec['the'])
def get_matrix(self):
embedding_matrix = np.zeros((len(self.word_to_index) + 1, self.EMBEDDING_DIM))
for word, i in self.word_to_index.items():
embedding_vector = self.word_to_vec[word]
if(embedding_vector is not None):
embedding_matrix[i] = embedding_vector
return embedding_matrix
def in_vocab(self, word):
"""
Checks if a word is present in the vocabulary
"""
return (word in self.vocabulary)
def autocorrect(self, wrong_word):
"""
Attempts to map a wrongly spelt word to the closest one present in the vocabulary.
THIS IS NOT COSINE SIMILARITY. THIS IS AUTOCORRECT.
"""
if self.in_vocab(wrong_word):
return wrong_word
closest_ratio = 0.0
closest_word = None
for word in self.vocabulary:
if fuzz.ratio(word,wrong_word) > closest_ratio :
closest_word = word
closest_ratio = fuzz.ratio(word,wrong_word)
return closest_word
def similarity(self, word_1, word_2):
"""
Returns the cosine similarity of word_1 and word_2
"""
assert (self.in_vocab(word_1) and self.in_vocab(word_2))
u = self.word_to_vec[word_1]
v = self.word_to_vec[word_2]
dot =
|
np.sum(u * v)
|
numpy.sum
|
import numpy as np
import os
import argparse
import os.path as osp
def check_size(submission_file):
max_size = 60*1024*1024
if osp.getsize(submission_file) > max_size:
raise IOError #File size exceeds the specified maximum size, which is 60M for the server.
def judge_overlap(pbox,ignore_box):
overlap=[]
delete=[]
for p in pbox:
pl=min(p[0],p[2])
pr=max(p[0],p[2])
pb=min(p[1],p[3])
pt=max(p[1],p[3])
s_p=(pr-pl)*(pt-pb)
s_lap=-0.01
for c in ignore_box:
cl=min(c[0],c[2])
cr=max(c[0],c[2])
cb=min(c[1],c[3])
ct=max(c[1],c[3])
if not (cr<pl or cl>pr or ct<pb or cb>pt):
s_lap+=(min(cr,pr)-max(cl,pl))*(min(ct,pt)-max(cb,pb))
if s_lap>0:
overlap.append([p,s_lap/s_p])
for o in overlap:
if o[1]>0.5:
delete.append(o[0])
remain_id = [p for p in pbox if p not in delete]
return remain_id
def parse_ignore_file(ignore_file):
with open(ignore_file,'r') as f:
lines = f.readlines()
ig = [x.strip().split() for x in lines]
ignore = {}
for item in ig:
key = item[0]
ignore_num = (len(item)-1)/4
bbox = []
for i in range(int(ignore_num)):
b = []
b.append(int(item[1+4*i]))
b.append(int(item[2+4*i]))
b.append(int(item[1+4*i])+int(item[3+4*i]))
b.append(int(item[2+4*i])+int(item[4+4*i]))
bbox.append(b)
ignore[key] = bbox
return ignore
def parse_submission(submission_file,ignore_file):
ignore_zone = parse_ignore_file(ignore_file)
ignore_keys = ignore_zone.keys()
with open(submission_file, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split() for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = []
for x in splitlines:
bb = []
bb.append(float(x[2]))
bb.append(float(x[3]))
bb.append(float(x[2])+float(x[4]))
bb.append(float(x[3])+float(x[5]))
BB.append(bb)
sub_key = []
for x in image_ids:
if x not in sub_key:
sub_key.append(x)
final_confidence = []
final_ids = []
final_BB = []
for key in sub_key:
find = [i for i,v in enumerate(image_ids) if v == key]
BB_sub = [BB[i] for i in find]
confid_sub = [confidence[i] for i in find]
if key in ignore_keys:
ignore_bbox = ignore_zone[key]
bbox_remain = judge_overlap(BB_sub,ignore_bbox)
find_remain = []
for i,v in enumerate(BB_sub):
if v in bbox_remain:
find_remain.append(i)
confid_remain = [confid_sub[i] for i in find_remain]
BB_sub = bbox_remain
confid_sub = confid_remain
ids_sub = [key]*len(BB_sub)
final_ids.extend(ids_sub)
final_confidence.extend(confid_sub)
final_BB.extend(BB_sub)
final_BB = np.array(final_BB)
final_confidence = np.array(final_confidence)
sorted_ind = np.argsort(-final_confidence)
final_BB = final_BB[sorted_ind, :]
final_ids = [final_ids[x] for x in sorted_ind]
return final_ids, final_BB
def parse_gt_annotation(gt_file,ignore_file):
ignore_zone = parse_ignore_file(ignore_file)
ignore_keys = ignore_zone.keys()
with open(gt_file, 'r') as f:
lines = f.readlines()
info = [x.strip().split() for x in lines]
gt = {}
for item in info:
bbox = []
bbox_num = (len(item)-1)/5
for i in range(int(bbox_num)):
b = []
b.append(int(item[2+5*i]))
b.append(int(item[3+5*i]))
b.append(int(item[2+5*i])+int(item[4+5*i]))
b.append(int(item[3+5*i])+int(item[5+5*i]))
bbox.append(b)
if item[0] in ignore_keys:
ignore_bbox = ignore_zone[item[0]]
bbox_remain = judge_overlap(bbox,ignore_bbox)
else:
bbox_remain = bbox
gt[item[0]] =
|
np.array(bbox_remain)
|
numpy.array
|
"""
Created: May 2018
@author: JerryX
Find more : https://www.zhihu.com/people/xu-jerry-82
"""
import numpy as np
import operator as op
import numba
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
sys.path.append(curPath)
from xDLUtils import Tools
from activators import ReLU
# Rnn类
class RnnLayer(object):
# N,H,L和优化器在初始化时定义
# T作为X的一个维度传进来
# tanh和sigmoid的前反向传播在类内部定义。
def __init__(self, LName, miniBatchesSize, nodesNum, layersNum,
optimizerCls, optmParams, dropoutRRate, dataType, init_rng):
# 初始化超参数
self.name = LName
self.miniBatchesSize = miniBatchesSize
self.nodesNum = nodesNum
self.layersNum = layersNum
self.dataType = dataType
self.init_rng = init_rng
self.isInited = False # 初始化标志
# dropout 的保留率
self.dropoutRRate = dropoutRRate
self.dropoutMask = []
self.out = []
self.optimizerObjs = [optimizerCls(optmParams, dataType) for i in range(layersNum)]
# 初始化w,u,b 和对应偏置,维度,层次和节点个数传参进去。但是没有T,所以不能创建参数
# 返回的是一个组合结构,按层次(数组)划分的U、W,字典
# 改为放在首batch X传入时lazy init
self.rnnParams = []
# 保存各层中间产出的 st和f(st),用于前向和反向传播
# 不需要,已经在前反向传播中保留
self.deltaPrev = [] # 上一层激活后的误差输出
def _initNnWeight(self, D, H, layersNum, dataType):
# 层次
rnnParams = []
for layer in range(layersNum):
Wh = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, H)).astype(dataType)
if (0 == layer):
Wx = np.random.uniform(-1 * self.init_rng, self.init_rng, (D, H)).astype(dataType)
else:
Wx = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, H)).astype(dataType)
b = np.zeros(H, dataType)
rnnParams.append({'Wx': Wx, 'Wh': Wh, 'b': b})
self.rnnParams = rnnParams
def _initNnWeightOrthogonal(self, D, H, layersNum, dataType):
# 层次
rnnParams = []
for layer in range(layersNum):
# Wh = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, H)).astype(dataType)
Wh = Tools.initOrthogonal( (H, H),self.init_rng, dataType)
DH = D if 0 == layer else H
Wx = Tools.initOrthogonal( (DH, H),self.init_rng, dataType)
b = np.zeros(H, dataType)
rnnParams.append({'Wx': Wx, 'Wh': Wh, 'b': b})
self.rnnParams = rnnParams
# 训练时前向传播
def fp(self, input):
out_tmp = self.inference(input)
self.out, self.dropoutMask = Tools.dropout4rnn(out_tmp, self.dropoutRRate)
return self.out
# 预测时前向传播,激活后再输出
# input: batch x seqNum, 32*10
def inference(self, x):
N, T, D = x.shape
H = self.nodesNum
L = self.layersNum
# lazy init
if (False == self.isInited):
#self._initNnWeight(D, H, L, self.dataType)
self._initNnWeightOrthogonal(D, H, L, self.dataType)
self.isInited = True
# 缓存已经存入rnnParams里了,此处只需要返回输出结果(N,T,H)
h = self.rnn_forward(x)
# N进 v 1出 模型,只保留时序最后的一项输出
# self.out = h[:,-1,:]
# 全部输出,未用到的部分梯度为0
self.out = h
return self.out
# 反向传播方法(误差和权参)
# TODO 实现反向传播逻辑,先按照时间,再按照层次,再更新Wx/Wf/b/V/bv 及偏置的反向传播梯度
def bp(self, input, delta_ori, lrt):
if self.dropoutRRate == 1:
delta = delta_ori
else:
delta = delta_ori * self.dropoutMask
# dw是一个数组,对应结构的多层,每层的dw,dh,db,dh0表示需要参数梯度
N, T, D = input.shape
H = delta.shape[1]
# 只有最后一个T填delta,其余的dh梯度设置为0
dh = np.zeros((N, T, H), self.dataType)
# dh[:,-1,:] = delta
dh = delta
dx, dweight = self.rnn_backward(dh)
# 根据梯度更新参数
self.bpWeights(dweight, lrt)
return dx
# 计算反向传播权重梯度w,b
def bpWeights(self, dw, lrt):
L = self.layersNum
# for l in range(L - 1, -1, -1):
for l in range(L):
w = (self.rnnParams[l]['Wx'], self.rnnParams[l]['Wh'], self.rnnParams[l]['b'])
# 此处不赋值也可以,因为是按引用传参
# self.rnnParams[l]['Wx'], self.rnnParams[l]['Wh'], self.rnnParams[l]['b'] = self.optimizerObjs[l].getUpdWeights(w,dw[L-1-l],lrt)
self.optimizerObjs[l].getUpdWeights(w, dw[L - 1 - l], lrt)
def rnn_forward(self, x):
"""
Run a vanilla RNN forward on an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The RNN uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the RNN forward, we return the hidden states for all timesteps.
Inputs:
- x: Input data for the entire timeseries, of shape (N, T, D).
- h0: Initial hidden state, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- h: Hidden states for the entire timeseries, of shape (N, T, H).
- cache: Values needed in the backward pass
"""
h, cache = None, None
##############################################################################
# TODO: Implement forward pass for a vanilla RNN running on a sequence of #
# input data. You should use the rnn_step_forward function that you defined #
# above. You can use a for loop to help compute the forward pass. #
##############################################################################
N, T, D = x.shape
L = self.layersNum
H = self.rnnParams[0]['b'].shape[0]
xh = x
for layer in range(L):
h = np.zeros((N, T, H))
h0 = np.zeros((N, H))
cache = []
for t in range(T):
h[:, t, :], tmp_cache = self.rnn_step_forward(xh[:, t, :],
h[:, t - 1, :] if t > 0 else h0,
self.rnnParams[layer]['Wx'], self.rnnParams[layer]['Wh'],
self.rnnParams[layer]['b'])
cache.append(tmp_cache)
xh = h # 之后以h作为xh作为跨层输入
##############################################################################
# END OF YOUR CODE #
##############################################################################
self.rnnParams[layer]['h'] = h
self.rnnParams[layer]['cache'] = cache
return h # 返回最后一层作为输出
def rnn_backward(self, dh):
"""
Compute the backward pass for a vanilla RNN over an entire sequence of data.
Inputs:
- dh: Upstream gradients of all hidden states, of shape (N, T, H).
NOTE: 'dh' contains the upstream gradients produced by the
individual loss functions at each timestep, *not* the gradients
being passed between timesteps (which you'll have to compute yourself
by calling rnn_step_backward in a loop).
Returns a tuple of:
- dx: Gradient of inputs, of shape (N, T, D)
- dh0: Gradient of initial hidden state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, H)
- db: Gradient of biases, of shape (H,)
"""
dx, dh0, dWx, dWh, db = None, None, None, None, None
##############################################################################
# TODO: Implement the backward pass for a vanilla RNN running an entire #
# sequence of data. You should use the rnn_step_backward function that you #
# defined above. You can use a for loop to help compute the backward pass. #
##############################################################################
N, T, H = dh.shape
x, _, _, _, _ = self.rnnParams[0]['cache'][0]
D = x.shape[1]
# 初始化最上一层误差
dh_prevl = dh
# 保存各层dwh,dwx,和db
dweights = []
# 逐层倒推
for layer in range(self.layersNum - 1, -1, -1):
# 得到前向传播保存的cache数组
cache = self.rnnParams[layer]['cache']
DH = D if layer == 0 else H
dx = np.zeros((N, T, DH))
dWx = np.zeros((DH, H))
dWh = np.zeros((H, H))
db = np.zeros(H)
dprev_h_t = np.zeros((N, H))
# 倒序遍历
for t in range(T - 1, -1, -1):
dx[:, t, :], dprev_h_t, dWx_t, dWh_t, db_t = self.rnn_step_backward(dh_prevl[:, t, :] + dprev_h_t,
cache[t])
dWx += dWx_t
dWh += dWh_t
db += db_t
# 本层得出的dx,作为下一层的prev_l
dh_prevl = dx
dweight = (dWx, dWh, db)
dweights.append(dweight)
##############################################################################
# END OF YOUR CODE #
##############################################################################
# 返回x误差和各层参数误差
return dx, dweights
def rnn_step_forward(self, x, prev_h, Wx, Wh, b):
"""
Run the forward pass for a single timestep of a vanilla RNN that uses a tanh
activation function.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data for this timestep, of shape (N, D).
- prev_h: Hidden state from previous timestep, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- cache: Tuple of values needed for the backward pass.
"""
next_h, cache = None, None
##############################################################################
# TODO: Implement a single forward step for the vanilla RNN. Store the next #
# hidden state and any values you need for the backward pass in the next_h #
# and cache variables respectively. #
##############################################################################
z = Tools.matmul(x, Wx) + Tools.matmul(prev_h, Wh) + b
next_h = np.tanh(z)
dtanh = 1. - next_h * next_h
cache = (x, prev_h, Wx, Wh, dtanh)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return next_h, cache
def rnn_step_backward(self, dnext_h, cache):
"""
Backward pass for a single timestep of a vanilla RNN.
Inputs:
- dnext_h: Gradient of loss with respect to next hidden state, of shape (N, H)
- cache: Cache object from the forward pass
Returns a tuple of:
- dx: Gradients of input data, of shape (N, D)
- dprev_h: Gradients of previous hidden state, of shape (N, H)
- dWx: Gradients of input-to-hidden weights, of shape (D, H)
- dWh: Gradients of hidden-to-hidden weights, of shape (H, H)
- db: Gradients of bias vector, of shape (H,)
"""
dx, dprev_h, dWx, dWh, db = None, None, None, None, None
##############################################################################
# TODO: Implement the backward pass for a single step of a vanilla RNN. #
# #
# HINT: For the tanh function, you can compute the local derivative in terms #
# of the output value from tanh. #
##############################################################################
x, prev_h, Wx, Wh, dtanh = cache
dz = dnext_h * dtanh
dx = Tools.matmul(dz, Wx.T)
dprev_h = Tools.matmul(dz, Wh.T)
dWx = Tools.matmul(x.T, dz)
dWh = Tools.matmul(prev_h.T, dz)
db = np.sum(dz, axis=0)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dWx, dWh, db
# LSTM 类
class LSTMLayer(object):
# N,H,L和优化器在初始化时定义
# T作为X的一个维度传进来
# tanh和sigmoid的前反向传播在类内部定义。
def __init__(self, LName, miniBatchesSize, nodesNum, layersNum,
optimizerCls, optmParams, dropoutRRate, dataType, init_rng):
# 初始化超参数
self.name = LName
self.miniBatchesSize = miniBatchesSize
self.nodesNum = nodesNum
self.layersNum = layersNum
self.dataType = dataType
self.init_rng = init_rng
self.isInited = False # 初始化标志
# dropout 的保留率
self.dropoutRRate = dropoutRRate
self.dropoutMask = []
self.out = []
self.optimizerObjs = [optimizerCls(optmParams, dataType) for i in range(layersNum)]
# 初始化w,u,b 和对应偏置,维度,层次和节点个数传参进去。但是没有T,所以不能创建参数
# 返回的是一个组合结构,按层次(数组)划分的U、W,字典
# 改为放在首batch X传入时lazy init
self.lstmParams = []
# 保存各层中间产出的 st和f(st),用于前向和反向传播
# 不需要,已经在前反向传播中保留
self.deltaPrev = [] # 上一层激活后的误差输出
def _initNnWeight(self, D, H, layersNum, dataType):
# 层次
lstmParams = []
for layer in range(layersNum):
Wh = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 4 * H)).astype(dataType)
if (0 == layer):
Wx = np.random.uniform(-1 * self.init_rng, self.init_rng, (D, 4 * H)).astype(dataType)
else:
Wx = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 4 * H)).astype(dataType)
b = np.zeros(4 * H, dataType)
lstmParams.append({'Wx': Wx, 'Wh': Wh, 'b': b})
self.lstmParams = lstmParams
def _initNnWeightOrthogonal(self, D , H, layersNum, dataType):
# 层次
lstmParams = []
for layer in range(layersNum):
# Wh = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 4 * H)).astype(dataType)
Wh = Tools.initOrthogonal( (H, 4*H),self.init_rng, dataType)
DH = D if 0 == layer else H
# Wx = np.random.uniform(-1 * self.init_rng, self.init_rng, (DH, 4 * H)).astype(dataType)
Wx = Tools.initOrthogonal( (DH, 4*H),self.init_rng, dataType)
b = np.zeros(4 * H, dataType)
lstmParams.append({'Wx': Wx, 'Wh': Wh, 'b': b})
self.lstmParams = lstmParams
# 预测时前向传播
def fp(self, input):
out_tmp = self.inference(input)
self.out, self.dropoutMask = Tools.dropout4rnn(out_tmp, self.dropoutRRate)
return self.out
def inference(self, x):
N, T, D = x.shape
H = self.nodesNum
L = self.layersNum
# lazy init
if (False == self.isInited):
# self._initNnWeight(D, H, L, self.dataType)
self._initNnWeightOrthogonal(D, H, L, self.dataType)
self.isInited = True
# 缓存已经存入rnnParams里了,此处只需要返回输出结果(N,T,H)
h = self.lstm_forward(x)
# N进 v 1出 模型,只保留时序最后的一项输出
# self.out = h[:,-1,:]
self.out = h
return self.out
# 反向传播方法(误差和权参)
# TODO 实现反向传播逻辑,先按照时间,再按照层次,再更新Wx/Wf/b/V/bv 及偏置的反向传播梯度
def bp(self, input, delta_ori, lrt):
if self.dropoutRRate == 1:
delta = delta_ori
else:
delta = delta_ori * self.dropoutMask
# dw是一个数组,对应结构的多层,每层的dw,dh,db,dh0表示需要参数梯度
N, T, D = input.shape
H = delta.shape[1]
# 只有最后一个T填delta,其余的dh梯度设置为0
dh = np.zeros((N, T, H), self.dataType)
# dh[:,-1,:] = delta
dh = delta
dx, dweight = self.lstm_backward(dh)
# 根据梯度更新参数
self.bpWeights(dweight, lrt)
return dx
# 计算反向传播权重梯度w,b
def bpWeights(self, dw, lrt):
L = self.layersNum
for l in range(L):
w = (self.lstmParams[l]['Wx'], self.lstmParams[l]['Wh'], self.lstmParams[l]['b'])
# self.lstmParams[l]['Wx'], self.lstmParams[l]['Wh'], self.lstmParams[l]['b'] = self.optimizerObjs[l].getUpdWeights(w, dw[L-1-l], lrt)
self.optimizerObjs[l].getUpdWeights(w, dw[L - 1 - l], lrt)
# self.optimizerObjs[l].getUpdWeights(w, dw[l], lrt)
def lstm_forward(self, x):
"""
Forward pass for an LSTM over an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The LSTM uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the LSTM forward, we return the hidden states for all timesteps.
Note that the initial cell state is passed as input, but the initial cell
state is set to zero. Also note that the cell state is not returned; it is
an internal variable to the LSTM and is not accessed from outside.
Inputs:
- x: Input data of shape (N, T, D)
- h0: Initial hidden state of shape (N, H)
- Wx: Weights for input-to-hidden connections, of shape (D, 4H)
- Wh: Weights for hidden-to-hidden connections, of shape (H, 4H)
- b: Biases of shape (4H,)
Returns a tuple of:
- h: Hidden states for all timesteps of all sequences, of shape (N, T, H)
- cache: Values needed for the backward pass.
"""
h, cache = None, None
#############################################################################
# TODO: Implement the forward pass for an LSTM over an entire timeseries. #
# You should use the lstm_step_forward function that you just defined. #
# 首层,x(N,T,D), 向上变成xh(N,T,H)
# 首层 Wx(D,H), 向上变成Wxh(H,H)
#############################################################################
N, T, D = x.shape
L = self.layersNum
H = int(self.lstmParams[0]['b'].shape[0] / 4) # 取整
xh = x # 首层输入是x
for layer in range(L):
h = np.zeros((N, T, H))
h0 = np.zeros((N, H))
c = np.zeros((N, T, H))
c0 = np.zeros((N, H))
cache = []
for t in range(T):
h[:, t, :], c[:, t, :], tmp_cache = self.lstm_step_forward(xh[:, t, :], h[:, t - 1, :] if t > 0 else h0,
c[:, t - 1, :] if t > 0 else c0,
self.lstmParams[layer]['Wx'],
self.lstmParams[layer]['Wh'],
self.lstmParams[layer]['b'])
cache.append(tmp_cache)
xh = h # 之后以h作为xh作为跨层输入
##############################################################################
# END OF YOUR CODE #
##############################################################################
self.lstmParams[layer]['h'] = h
self.lstmParams[layer]['c'] = c
self.lstmParams[layer]['cache'] = cache
return h
def lstm_backward(self, dh):
"""
Backward pass for an LSTM over an entire sequence of data.]
Inputs:
- dh: Upstream gradients of hidden states, of shape (N, T, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data of shape (N, T, D)
- dh0: Gradient of initial hidden state of shape (N, H)
- dWx: Gradient of input-to-hidden weight matrix of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weight matrix of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dh0, dWx, dWh, db = None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for an LSTM over an entire timeseries. #
# You should use the lstm_step_backward function that you just defined. #
#############################################################################
N, T, H = dh.shape
x, _, _, _, _, _, _, _, _, _ = self.lstmParams[0]['cache'][0]
D = x.shape[1]
dh_prevl = dh
# 保存各层dwh,dwx,和db
dweights = []
for layer in range(self.layersNum - 1, -1, -1):
# 得到前向传播保存的cache数组
cache = self.lstmParams[layer]['cache']
DH = D if layer == 0 else H
dx = np.zeros((N, T, DH))
dWx = np.zeros((DH, 4 * H))
dWh = np.zeros((H, 4 * H))
db = np.zeros((4 * H))
dprev_h = np.zeros((N, H))
dprev_c = np.zeros((N, H))
for t in range(T - 1, -1, -1):
dx[:, t, :], dprev_h, dprev_c, dWx_t, dWh_t, db_t = self.lstm_step_backward(dh_prevl[:, t, :] + dprev_h,
dprev_c,
cache[t]) # 注意此处的叠加
dWx += dWx_t
dWh += dWh_t
db += db_t
# 本层得出的dx,作为下一层的prev_l
dh_prevl = dx
dweight = (dWx, dWh, db)
dweights.append(dweight)
##############################################################################
# END OF YOUR CODE #
##############################################################################
# 返回x误差和各层参数误差
return dx, dweights
def lstm_step_forward(self, x, prev_h, prev_c, Wx, Wh, b):
"""
Forward pass for a single timestep of an LSTM.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Note that a sigmoid() function has already been provided for you in this file.
Inputs:
- x: Input data, of shape (N, D)
- prev_h: Previous hidden state, of shape (N, H)
- prev_c: previous cell state, of shape (N, H)
- Wx: Input-to-hidden weights, of shape (D, 4H)
- Wh: Hidden-to-hidden weights, of shape (H, 4H)
- b: Biases, of shape (4H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- next_c: Next cell state, of shape (N, H)
- cache: Tuple of values needed for backward pass.
"""
next_h, next_c, cache = None, None, None
#############################################################################
# TODO: Implement the forward pass for a single timestep of an LSTM. #
# You may want to use the numerically stable sigmoid implementation above.
# 首层,x(N,T,D), 向上变成xh(N,T,H)
# 首层 Wx(D,H), 向上变成Wxh(H,H)
#############################################################################
H = prev_h.shape[1]
# z , of shape(N,4H)
z = Tools.matmul(x, Wx) + Tools.matmul(prev_h, Wh) + b
# of shape(N,H)
i = Tools.sigmoid(z[:, :H])
f = Tools.sigmoid(z[:, H:2 * H])
o = Tools.sigmoid(z[:, 2 * H:3 * H])
g = np.tanh(z[:, 3 * H:])
next_c = f * prev_c + i * g
next_h = o * np.tanh(next_c)
cache = (x, prev_h, prev_c, Wx, Wh, i, f, o, g, next_c)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return next_h, next_c, cache
def lstm_step_backward(self, dnext_h, dnext_c, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dprev_h, dprev_c, dWx, dWh, db = None, None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for a single timestep of an LSTM. #
# #
# HINT: For sigmoid and tanh you can compute local derivatives in terms of #
# the output value from the nonlinearity. #
#############################################################################
x, prev_h, prev_c, Wx, Wh, i, f, o, g, next_c = cache
dnext_c = dnext_c + o * (1 - np.tanh(next_c) ** 2) * dnext_h # next_h = o*np.tanh(next_c)
di = dnext_c * g # next_c = f*prev_c + i*g
df = dnext_c * prev_c # next_c = f*prev_c + i*g
do = dnext_h * np.tanh(next_c) # next_h = o*np.tanh(next_c)
dg = dnext_c * i # next_h = o*np.tanh(next_c)
dprev_c = f * dnext_c # next_c = f*prev_c + i*g
dz = np.hstack((i * (1 - i) * di, f * (1 - f) * df, o * (1 - o) * do, (1 - g ** 2) * dg)) # 共四部分
dx = Tools.matmul(dz, Wx.T)
dprev_h = Tools.matmul(dz, Wh.T)
dWx = Tools.matmul(x.T, dz)
dWh = Tools.matmul(prev_h.T, dz)
db = np.sum(dz, axis=0)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dprev_c, dWx, dWh, db
#最后一层concate,输出N*T*2H
class BiLSTMLayer(object):
# N,H,L和优化器在初始化时定义
# T作为X的一个维度传进来
# tanh和sigmoid的前反向传播在类内部定义。
# 直接输出分类维度
def __init__(self, LName, miniBatchesSize, nodesNum, layersNum,
optimizerCls, optmParams, dropoutRRate, dataType, init_rng):
# 初始化超参数
self.name = LName
self.miniBatchesSize = miniBatchesSize
self.nodesNum = nodesNum
self.layersNum = layersNum
self.dataType = dataType
self.init_rng = init_rng
self.isInited = False # 初始化标志
# dropout 的保留率
self.dropoutRRate = dropoutRRate
self.dropoutMask = []
self.out = []
self.optimizerObjs = [optimizerCls(optmParams, dataType) for i in range(layersNum)]
# 初始化w,u,b 和对应偏置,维度,层次和节点个数传参进去。但是没有T,所以不能创建参数
# 返回的是一个组合结构,按层次(数组)划分的U、W,字典
# 改为放在首batch X传入时lazy init
self.lstmParams = []
# 保存各层中间产出的 st和f(st),用于前向和反向传播
# 不需要,已经在前反向传播中保留
self.deltaPrev = [] # 上一层激活后的误差输出
def _initNnWeight(self, D, H, layersNum, dataType):
# 层次
lstmParams = []
for layer in range(layersNum):
Wh = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 4 * H)).astype(dataType)
iWh = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 4 * H)).astype(dataType)
if (0 == layer):
Wx = np.random.uniform(-1 * self.init_rng, self.init_rng, (D, 4 * H)).astype(dataType)
iWx = np.random.uniform(-1 * self.init_rng, self.init_rng, (D, 4 * H)).astype(dataType)
else:
Wx = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 4 * H)).astype(dataType)
iWx = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 4 * H)).astype(dataType)
b = np.zeros(4 * H, dataType)
ib = np.zeros(4 * H, dataType)
lstmParams.append({'Wx': Wx, 'Wh': Wh, 'b': b,
'iWx': iWx, 'iWh': iWh, 'ib': ib
# , 'U': U, 'V': V, 'bc': bc
})
self.lstmParams = lstmParams
# 预测时前向传播
def fp(self, input):
out_tmp = self.inference(input)
self.out, self.dropoutMask = Tools.dropout4rnn(out_tmp, self.dropoutRRate)
return self.out
def inference(self, x):
N, T, D = x.shape
H = self.nodesNum
L = self.layersNum
# lazy init
if (False == self.isInited):
self._initNnWeight(D, H, L, self.dataType)
self.isInited = True
# 缓存已经存入rnnParams里了,此处只需要返回输出结果(N,T,H)
h = self.lstm_forward(x)
# N进 v 1出 模型,只保留时序最后的一项输出
# self.out = h[:,-1,:]
self.out = h
return self.out
# 反向传播方法(误差和权参)
def bp(self, input, delta_ori, lrt):
if self.dropoutRRate == 1:
delta = delta_ori
else:
delta = delta_ori * self.dropoutMask
# dw是一个数组,对应结构的多层,每层的dw,dh,db,dh0表示需要参数梯度
# N, T, D = input.shape
# H = delta.shape[1]
# 只有最后一个T填delta,其余的dh梯度设置为0
# dh = np.zeros((N, T, H), self.dataType)
# dh[:,-1,:] = delta
dh = delta
dx, dweight = self.lstm_backward(dh)
# 根据梯度更新参数
self.bpWeights(dweight, lrt)
return dx
# 计算反向传播权重梯度w,b
def bpWeights(self, dw, lrt):
L = self.layersNum
for l in range(L):
w = (self.lstmParams[l]['Wx'], self.lstmParams[l]['Wh'], self.lstmParams[l]['b'],
self.lstmParams[l]['iWx'], self.lstmParams[l]['iWh'], self.lstmParams[l]['ib']
)
self.optimizerObjs[l].getUpdWeights(w, dw[L - 1 - l], lrt)
def lstm_forward(self, x):
"""
Forward pass for an LSTM over an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The LSTM uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the LSTM forward, we return the hidden states for all timesteps.
Note that the initial cell state is passed as input, but the initial cell
state is set to zero. Also note that the cell state is not returned; it is
an internal variable to the LSTM and is not accessed from outside.
Inputs:
- x: Input data of shape (N, T, D)
- h0: Initial hidden state of shape (N, H)
- Wx: Weights for input-to-hidden connections, of shape (D, 4H)
- Wh: Weights for hidden-to-hidden connections, of shape (H, 4H)
- b: Biases of shape (4H,)
Returns a tuple of:
- h: Hidden states for all timesteps of all sequences, of shape (N, T, H)
- cache: Values needed for the backward pass.
"""
#############################################################################
# TODO: Implement the forward pass for an BiLSTM over an entire timeseries. #
# You should use the lstm_step_forward function that you just defined. #
# 首层,x(N,T,D), 向上变成xh(N,T,H)
# 首层 Wx(D,H), 向上变成Wxh(H,H)
#############################################################################
N, T, D = x.shape
L = self.layersNum
H = int(self.lstmParams[0]['b'].shape[0] / 4) # 取整
xh = x # 首层输入是x
ixh = x # 反向
for layer in range(L):
h0 = np.zeros((N, H))
c0 = np.zeros((N, H))
# 右向
h = np.zeros((N, T, H))
c = np.zeros((N, T, H))
cache = []
# 左向
ih = np.zeros((N, T, H))
ic = np.zeros((N, T, H))
icache = []
for t in range(T):
# 右向
h[:, t, :], c[:, t, :], tmp_cache = self.lstm_step_forward(xh[:, t, :],
h[:, t - 1, :] if t > 0 else h0,
c[:, t - 1, :] if t > 0 else c0,
self.lstmParams[layer]['Wx'],
self.lstmParams[layer]['Wh'],
self.lstmParams[layer]['b'])
cache.append(tmp_cache)
# 左向,
# 若此处ih和x的下标保持一致,均由大到小排列,后续无需倒排,提高效率
ih[:, T - 1 - t, :], ic[:, T - 1 - t, :], tmp_icache = self.lstm_step_forward(ixh[:, T - 1 - t, :],
ih[:, T - t, :] if t > 0 else h0,
ic[:, T - t, :] if t > 0 else c0,
self.lstmParams[layer]['iWx'],
self.lstmParams[layer]['iWh'],
self.lstmParams[layer]['ib'])
# icache下标和ih下标是反向对应的 self.lstmParams[layer]['ib'])
icache.append(tmp_icache)
# 右向
self.lstmParams[layer]['h'] = h
self.lstmParams[layer]['c'] = c
self.lstmParams[layer]['cache'] = cache
# 左向
self.lstmParams[layer]['ih'] = ih
self.lstmParams[layer]['ic'] = ic
self.lstmParams[layer]['icache'] = icache
# Batch * TimeStep * H
xh = h
ixh = ih
self.lstmParams[layer]['xh'] = xh
self.lstmParams[layer]['ixh'] = ixh
xh_final = np.concatenate((xh,ixh),axis=2) # 在H维度上做拼接
self.lstmParams[layer]['xh_final'] = xh_final
return xh_final
def lstm_backward(self, dh_all):
"""
Backward pass for an BiLSTM over an entire sequence of data.]
Inputs:
- dh_all: Upstream gradients of hidden states, of shape (N, T, 2*H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data of shape (N, T, D)
- dh0: Gradient of initial hidden state of shape (N, H)
- dWx: Gradient of input-to-hidden weight matrix of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weight matrix of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
#############################################################################
# TODO: Implement the backward pass for an BiLSTM over an entire timeseries. #
# You should use the lstm_step_backward function that you just defined. #
#############################################################################
N, T, H_time_2 = dh_all.shape #得到的误差是batch *T* 2H
H = int(H_time_2 / 2)
x, _, _, _, _, _, _, _, _, _ = self.lstmParams[0]['cache'][0]
D = x.shape[1] # 单个时间步上维度
dh = dh_all[:,:,0:H]
dih = dh_all[:,:,H:2*H]
dweights = []
for layer in range(self.layersNum - 1, -1, -1):
dh_prevl = dh
dih_prevl = dih
DH = D if layer == 0 else H
# 右向
dx = np.zeros((N, T, DH))
cache = self.lstmParams[layer]['cache']
dWx = np.zeros((DH, 4 * H))
dWh = np.zeros((H, 4 * H))
db = np.zeros((4 * H))
dprev_h = np.zeros((N, H))
dprev_c = np.zeros((N, H))
# 左向
dix = np.zeros((N, T, DH))
icache = self.lstmParams[layer]['icache']
diWx = np.zeros((DH, 4 * H))
diWh = np.zeros((H, 4 * H))
dib = np.zeros((4 * H))
dprev_ih = np.zeros((N, H))
dprev_ic = np.zeros((N, H))
for t in range(T - 1, -1, -1):
# 右向
dx[:, t, :], dprev_h, dprev_c, dWx_t, dWh_t, db_t = self.lstm_step_backward(dh_prevl[:, t, :] + dprev_h,
dprev_c,
cache[t]) # 注意此处的叠加
dWx += dWx_t
dWh += dWh_t
db += db_t
# fwd选择ih和输入x的下标一致对应,且之后合并前馈时,ih按时间步一致再前馈
# bp时,按照时间步倒序回传,dih从小到大回传
dix[:, T - 1 - t, :], dprev_ih, dprev_ic, diWx_t, diWh_t, db_it = self.lstm_step_backward(dih_prevl[:, T - 1 - t, :] + dprev_ih,
dprev_ic,
# icache[T - 1 - t]) # 注意此处的叠加
icache[t]) # 注意此处的叠加
diWx += diWx_t
diWh += diWh_t
dib += db_it
dweight = (dWx, dWh, db, diWx, diWh, dib)
dweights.append(dweight)
# 本层得出的dx,作为下一层的误差输入
dh = dx
dih = dix
# 第一层,正反两个方向的误差相加,得到总的dx返回上一层
# 如果rnn是第一层,则误差不需要继续向上传递
# 返回x误差和各层参数误差
dh_t_all = dh + dih # 合并得到dx
return dh_t_all, dweights
def lstm_step_forward(self, x, prev_h, prev_c, Wx, Wh, b):
"""
Forward pass for a single timestep of an LSTM.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Note that a sigmoid() function has already been provided for you in this file.
Inputs:
- x: Input data, of shape (N, D)
- prev_h: Previous hidden state, of shape (N, H)
- prev_c: previous cell state, of shape (N, H)
- Wx: Input-to-hidden weights, of shape (D, 4H)
- Wh: Hidden-to-hidden weights, of shape (H, 4H)
- b: Biases, of shape (4H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- next_c: Next cell state, of shape (N, H)
- cache: Tuple of values needed for backward pass.
"""
next_h, next_c, cache = None, None, None
#############################################################################
# TODO: Implement the forward pass for a single timestep of an LSTM. #
# You may want to use the numerically stable sigmoid implementation above.
# 首层,x(N,T,D), 向上变成xh(N,T,H)
# 首层 Wx(D,H), 向上变成Wxh(H,H)
#############################################################################
H = prev_h.shape[1]
# z , of shape(N,4H)
z = Tools.matmul(x, Wx) + Tools.matmul(prev_h, Wh) + b
# of shape(N,H)
i = Tools.sigmoid(z[:, :H])
f = Tools.sigmoid(z[:, H:2 * H])
o = Tools.sigmoid(z[:, 2 * H:3 * H])
g = np.tanh(z[:, 3 * H:])
next_c = f * prev_c + i * g
next_h = o * np.tanh(next_c)
cache = (x, prev_h, prev_c, Wx, Wh, i, f, o, g, next_c)
return next_h, next_c, cache
def lstm_step_backward(self, dnext_h, dnext_c, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dprev_h, dprev_c, dWx, dWh, db = None, None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for a single timestep of an LSTM. #
# #
# HINT: For sigmoid and tanh you can compute local derivatives in terms of #
# the output value from the nonlinearity. #
#############################################################################
x, prev_h, prev_c, Wx, Wh, i, f, o, g, next_c = cache
dnext_c = dnext_c + o * (1 - np.tanh(next_c) ** 2) * dnext_h # next_h = o*np.tanh(next_c)
di = dnext_c * g # next_c = f*prev_c + i*g
df = dnext_c * prev_c # next_c = f*prev_c + i*g
do = dnext_h * np.tanh(next_c) # next_h = o*np.tanh(next_c)
dg = dnext_c * i # next_h = o*np.tanh(next_c)
dprev_c = f * dnext_c # next_c = f*prev_c + i*g
dz = np.hstack((i * (1 - i) * di, f * (1 - f) * df, o * (1 - o) * do, (1 - g ** 2) * dg)) # 共四部分
dx = Tools.matmul(dz, Wx.T)
dprev_h = Tools.matmul(dz, Wh.T)
dWx = Tools.matmul(x.T, dz)
dWh = Tools.matmul(prev_h.T, dz)
db = np.sum(dz, axis=0)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dprev_c, dWx, dWh, db
# GRU 类
class GRULayer(object):
def __init__(self, LName, miniBatchesSize, nodesNum, layersNum,
optimizerCls, optmParams, dropoutRRate, dataType, init_rng):
# 初始化超参数
self.name = LName
self.miniBatchesSize = miniBatchesSize
self.nodesNum = nodesNum
self.layersNum = layersNum
# self.optimizer = optimizer
self.dataType = dataType
self.init_rng = init_rng
self.isInited = False # 初始化标志
# dropout 的保留率
self.dropoutRRate = dropoutRRate
self.dropoutMask = []
self.out = []
self.optimizerObjs = [optimizerCls(optmParams, dataType) for i in range(layersNum)]
# 初始化w,u,b 和对应偏置,维度,层次和节点个数传参进去。但是没有T,所以不能创建参数
# 返回的是一个组合结构,按层次(数组)划分的U、W,字典
# 改为放在首batch X传入时lazy init
self.gruParams = []
# 保存各层中间产出的 st和f(st),用于前向和反向传播
# 不需要,已经在前反向传播中保留
self.deltaPrev = [] # 上一层激活后的误差输出
# N,H,L和优化器在初始化时定义
# T作为X的一个维度传进来
# tanh和sigmoid的前反向传播在类内部定义。
def _initNnWeight(self, D, H, layersNum, dataType):
# 层次
gruParams = []
for layer in range(layersNum):
Wzh = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 2 * H)).astype(dataType)
War = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, H)).astype(dataType)
if (0 == layer):
Wzx = np.random.uniform(-1 * self.init_rng, self.init_rng, (D, 2 * H)).astype(dataType)
Wax = np.random.uniform(-1 * self.init_rng, self.init_rng, (D, H)).astype(dataType)
else:
Wzx = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, 2 * H)).astype(dataType)
Wax = np.random.uniform(-1 * self.init_rng, self.init_rng, (H, H)).astype(dataType)
bz = np.zeros(2 * H, dataType)
ba = np.zeros(H, dataType)
gruParams.append({'Wzx': Wzx, 'Wzh': Wzh, 'bz': bz, 'Wax': Wax, 'War': War, 'ba': ba})
self.gruParams = gruParams
def fp(self, input):
out_tmp = self.inference(input)
self.out, self.dropoutMask = Tools.dropout4rnn(out_tmp, self.dropoutRRate)
return self.out
# 预测时前向传播
def inference(self, x):
N, T, D = x.shape
H = self.nodesNum
L = self.layersNum
# lazy init
if (False == self.isInited):
self._initNnWeight(D, H, L, self.dataType)
self.isInited = True
# 缓存已经存入rnnParams里了,此处只需要返回输出结果(N,T,H)
h = self.gru_forward(x)
# N进 v 1出 模型,只保留时序最后的一项输出
# self.out = h[:,-1,:]
self.out = h
return self.out
# 反向传播方法(误差和权参)
# TODO 实现反向传播逻辑,先按照时间,再按照层次,再更新Wx/Wf/b/V/bv 及偏置的反向传播梯度
def bp(self, input, delta_ori, lrt):
if self.dropoutRRate == 1:
delta = delta_ori
else:
delta = delta_ori * self.dropoutMask
# dw是一个数组,对应结构的多层,每层的dw,dh,db,dh0表示需要参数梯度
N, T, D = input.shape
H = delta.shape[1]
# 只有最后一个T填delta,其余的dh梯度设置为0
dh = np.zeros((N, T, H), self.dataType)
# dh[:,-1,:] = delta
dh = delta
dx, dweight = self.gru_backward(dh)
# 根据梯度更新参数
self.bpWeights(dweight, lrt)
return dx
# 计算反向传播权重梯度w,b
def bpWeights(self, dw, lrt):
L = self.layersNum
for l in range(L):
w = (self.gruParams[l]['Wzx'], self.gruParams[l]['Wzh'], self.gruParams[l]['bz'], self.gruParams[l]['Wax'],
self.gruParams[l]['War'], self.gruParams[l]['ba'])
# self.gruParams[l]['Wzx'], self.gruParams[l]['Wzh'], self.gruParams[l]['bz'],self.gruParams[l]['Wax'], self.gruParams[l]['War'], self.gruParams[l]['ba'] = self.optimizerObjs[l].getUpdWeights(w, dw[L-1-l], lrt)
self.optimizerObjs[l].getUpdWeights(w, dw[L - 1 - l], lrt)
def gru_forward(self, x):
"""
Forward pass for an LSTM over an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The LSTM uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the LSTM forward, we return the hidden states for all timesteps.
Note that the initial cell state is passed as input, but the initial cell
state is set to zero. Also note that the cell state is not returned; it is
an internal variable to the LSTM and is not accessed from outside.
Inputs:
- x: Input data of shape (N, T, D)
- h0: Initial hidden state of shape (N, H)
- Wx: Weights for input-to-hidden connections, of shape (D, 4H)
- Wh: Weights for hidden-to-hidden connections, of shape (H, 4H)
- b: Biases of shape (4H,)
Returns a tuple of:
- h: Hidden states for all timesteps of all sequences, of shape (N, T, H)
- cache: Values needed for the backward pass.
"""
h, cache = None, None
#############################################################################
# TODO: Implement the forward pass for an LSTM over an entire timeseries. #
# You should use the lstm_step_forward function that you just defined. #
# 首层,x(N,T,D), 向上变成xh(N,T,H)
# 首层 Wx(D,H), 向上变成Wxh(H,H)
#############################################################################
N, T, D = x.shape
L = self.layersNum
H = self.gruParams[0]['ba'].shape[0] # 取整
xh = x # 首层输入是x
for layer in range(L):
h = np.zeros((N, T, H))
h0 = np.zeros((N, H))
cache = []
for t in range(T):
h[:, t, :], tmp_cache = self.gru_step_forward(xh[:, t, :], h[:, t - 1, :] if t > 0 else h0,
self.gruParams[layer]['Wzx'],
self.gruParams[layer]['Wzh'],
self.gruParams[layer]['bz'],
self.gruParams[layer]['Wax'],
self.gruParams[layer]['War'],
self.gruParams[layer]['ba'],
)
cache.append(tmp_cache)
xh = h # 之后以h作为xh作为跨层输入
##############################################################################
# END OF YOUR CODE #
##############################################################################
self.gruParams[layer]['h'] = h
self.gruParams[layer]['cache'] = cache
return h
def gru_backward(self, dh):
"""
Backward pass for an LSTM over an entire sequence of data.]
Inputs:
- dh: Upstream gradients of hidden states, of shape (N, T, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data of shape (N, T, D)
- dh0: Gradient of initial hidden state of shape (N, H)
- dWx: Gradient of input-to-hidden weight matrix of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weight matrix of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dh0, dWzx, dWzh, dbz, dWax, dWar, dba = None, None, None, None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for an LSTM over an entire timeseries. #
# You should use the lstm_step_backward function that you just defined. #
#############################################################################
N, T, H = dh.shape
x, _, _, _, _, _, _, _, _, _ = self.gruParams[0]['cache'][0]
D = x.shape[1]
dh_prevl = dh
# 保存各层dwh,dwx,和db
dweights = []
for layer in range(self.layersNum - 1, -1, -1):
# 得到前向传播保存的cache数组
cache = self.gruParams[layer]['cache']
DH = D if layer == 0 else H
dx = np.zeros((N, T, DH))
dWzx = np.zeros((DH, 2 * H))
dWzh = np.zeros((H, 2 * H))
dbz = np.zeros((2 * H))
dWax = np.zeros((DH, H))
dWar = np.zeros((H, H))
dba = np.zeros((H))
dprev_h = np.zeros((N, H))
for t in range(T - 1, -1, -1):
dx[:, t, :], dprev_h, dWzx_t, dWzh_t, dbz_t, dWax_t, dWar_t, dba_t = self.gru_step_backward(
dh_prevl[:, t, :] + dprev_h,
cache[t]) # 注意此处的叠加
dWzx += dWzx_t
dWzh += dWzh_t
dbz += dbz_t
dWax += dWax_t
dWar += dWar_t
dba += dba_t
# 本层得出的dx,作为下一层的prev_l
dh_prevl = dx
dweight = (dWzx, dWzh, dbz, dWax, dWar, dba)
dweights.append(dweight)
##############################################################################
# END OF YOUR CODE #
##############################################################################
# 返回x误差和各层参数误差
return dx, dweights
def gru_step_forward(self, x, prev_h, Wzx, Wzh, bz, Wax, War, ba):
"""
Forward pass for a single timestep of an LSTM.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Note that a sigmoid() function has already been provided for you in this file.
Inputs:
- x: Input data, of shape (N, D)
- prev_h: Previous hidden state, of shape (N, H)
- prev_c: previous cell state, of shape (N, H)
- Wzx: Input-to-hidden weights, of shape (D, 4H)
- Wh: Hidden-to-hidden weights, of shape (H, 4H)
- b: Biases, of shape (4H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- next_c: Next cell state, of shape (N, H)
- cache: Tuple of values needed for backward pass.
"""
next_h, cache = None, None
#############################################################################
# TODO: Implement the forward pass for a single timestep of an LSTM. #
# You may want to use the numerically stable sigmoid implementation above.
# 首层,x(N,T,D), 向上变成xh(N,T,H)
# 首层 Wx(D,H), 向上变成Wxh(H,H)
#############################################################################
H = prev_h.shape[1]
# z_hat, of shape(N,4H)
z_hat = Tools.matmul(x, Wzx) + Tools.matmul(prev_h, Wzh) + bz
# of shape(N,H)
r = Tools.sigmoid(z_hat[:, :H])
z = Tools.sigmoid(z_hat[:, H:2 * H])
a = Tools.matmul(x, Wax) + Tools.matmul(r * prev_h, War) + ba
next_h = prev_h * (1. - z) + z * np.tanh(a)
cache = (x, prev_h, Wzx, Wzh, Wax, War, z_hat, r, z, a)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return next_h, cache
def gru_step_backward(self, dnext_h, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba = None, None, None, None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for a single timestep of an LSTM. #
# #
# HINT: For sigmoid and tanh you can compute local derivatives in terms of #
# the output value from the nonlinearity. #
#############################################################################
x, prev_h, Wzx, Wzh, Wax, War, z_hat, r, z, a = cache
N, D = x.shape
H = dnext_h.shape[1]
z_hat_H1 = z_hat[:, :H]
z_hat_H2 = z_hat[:, H:2 * H]
# delta
tanha = np.tanh(a)
dh = dnext_h
da = dh * z * (1. - tanha ** 2)
dh_prev_1 = dh * (1. - z)
# dz = dh*(tanha-prev_h)
dz_hat_2 = dh * (tanha - prev_h) * (z * (1. - z))
dhat_a = Tools.matmul(da, War.T)
dr = dhat_a * prev_h
dx_1 = Tools.matmul(da, Wax.T)
dh_prev_2 = dhat_a * r # da* Tools.matmul(r,War.T)
dz_hat_1 = dr * (r * (1. - r))
dz_hat = np.hstack((dz_hat_1, dz_hat_2))
dx_2 = Tools.matmul(dz_hat_1, Wzx[:, :H].T)
dh_prev_3 = Tools.matmul(dz_hat_1, Wzh[:, :H].T)
dx_3 = Tools.matmul(dz_hat_2, Wzx[:, H:2 * H].T)
dh_prev_4 = Tools.matmul(dz_hat_2, Wzh[:, H:2 * H].T)
dprev_h = dh_prev_1 + dh_prev_2 + dh_prev_3 + dh_prev_4
dx = dx_1 + dx_2 + dx_3
dWax = Tools.matmul(x.T, da)
dWar = Tools.matmul((r * prev_h).T, da)
dba = np.sum(da, axis=0)
dWzx = Tools.matmul(x.T, dz_hat)
dWzh = Tools.matmul(prev_h.T, dz_hat)
dbz = np.sum(dz_hat, axis=0)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba
def gru_step_backward_succ(self, dnext_h, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba = None, None, None, None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for a single timestep of an LSTM. #
# #
# HINT: For sigmoid and tanh you can compute local derivatives in terms of #
# the output value from the nonlinearity. #
#############################################################################
x, prev_h, Wzx, Wzh, Wax, War, z_hat, r, z, a = cache
N, D = x.shape
H = dnext_h.shape[1]
z_hat_H1 = z_hat[:, :H]
z_hat_H2 = z_hat[:, H:2 * H]
# delta
tanha = np.tanh(a)
dh = dnext_h
da = dh * z * (1. - tanha ** 2)
dh_prev_1 = dh * (1. - z)
# dz = dh * (z+tanha)
# dz = dh*tanha+1.-dh*(1.-z)*prev_h
# dz = dh*tanha+1.-dh*prev_h
dz = dh * (tanha - prev_h)
# dz_hat_2 = dz*(z*(1.-z))
dz_hat_2 = dh * (tanha - prev_h) * (z * (1. - z))
# dz_hat_2 = dz*(z_hat_H2*(1.-z_hat_H2))
dhat_a = Tools.matmul(da, War.T)
# dz_hat_2 = dhat_r * r
dr = dhat_a * prev_h
dx_1 = Tools.matmul(da, Wax.T)
dh_prev_2 = dhat_a * r # da* Tools.matmul(r,War.T)
# dz_hat_1 = dh_prev_2 * (r * (1. - r))
dz_hat_1 = dr * (r * (1. - r))
# dz_hat_1 = prev_h * Tools.matmul(dh*z*(1-tanha**2), War.T)*(r*(1.-r))
dz_hat = np.hstack((dz_hat_1, dz_hat_2))
# dh_prev_3 = Tools.matmul(dz_hat_2,Wzh.T)
# dx_2 = Tools.matmul(dz_hat_2,Wzx.T)
dx_2 = Tools.matmul(dz_hat_1, Wzx[:, :H].T)
# dh_prev_3 = Tools.matmul(dz_hat,Wzh.T)
# dh_prev_3 = Tools.matmul(dz_hat_2,Wzh.T)
dh_prev_3 = Tools.matmul(dz_hat_1, Wzh[:, :H].T)
dx_23 = Tools.matmul(dz_hat, Wzx.T)
# dx_3 = Tools.matmul(dz_hat_1,Wzx.T)
dx_3 = Tools.matmul(dz_hat_2, Wzx[:, H:2 * H].T)
# dh_prev_4 =Tools.matmul(dz_hat_1, Wzh.T)
dh_prev_4 = Tools.matmul(dz_hat_2, Wzh[:, H:2 * H].T)
# dx_3 = Tools.matmul(dz_hat,Wzx.T)
# dh_prev_4 =Tools.matmul(dz_hat, Wzh.T)
# dh_prev_34 = np.hstack((dh_prev_3, dh_prev_4))
# dh_prev_34 = Tools.matmul(dh_prev_34,Wzh.T)
dh_prev_34 = Tools.matmul(dz_hat, Wzh.T)
# dprev_h = dh_prev_1+dh_prev_2+dh_prev_34 * 2. #dh_prev_3 + dh_prev_4
# dx = dx_1 + dx_2*2. # +dx_3
# dprev_h = dh_prev_1+dh_prev_2+dh_prev_34 #dh_prev_3 + dh_prev_4
dprev_h = dh_prev_1 + dh_prev_2 + dh_prev_3 + dh_prev_4
# dx = dx_1 + dx_23 # +dx_3
dx = dx_1 + dx_2 + dx_3
dWax = Tools.matmul(x.T, da)
dWar = Tools.matmul((r * prev_h).T, da)
dba = np.sum(da, axis=0)
dWzx = Tools.matmul(x.T, dz_hat)
dWzh = Tools.matmul(prev_h.T, dz_hat)
dbz = np.sum(dz_hat, axis=0)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba
def gru_step_backward_v2(self, dnext_h, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba = None, None, None, None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for a single timestep of an LSTM. #
# #
# HINT: For sigmoid and tanh you can compute local derivatives in terms of #
# the output value from the nonlinearity. #
#############################################################################
x, prev_h, Wzx, Wzh, Wax, War, z_hat, r, z, a = cache
N, D = x.shape
H = dnext_h.shape[1]
z_hat_H1 = z_hat[:, :H]
z_hat_H2 = z_hat[:, H:2 * H]
# delta
tanha = np.tanh(a)
dh = dnext_h
da = dh * z * (1. - tanha ** 2)
dh_prev_1 = dh * (1. - z)
# dz = dh * (z+tanha)
# dz = dh*tanha+1.-dh*(1.-z)*prev_h
# dz = dh*tanha+1.-dh*prev_h
dz = dh * (tanha - prev_h)
# dz_hat_2 = dz*(z*(1.-z))
dz_hat_2 = dh * (tanha - prev_h) * (z * (1. - z))
# dz_hat_2 = dz*(z_hat_H2*(1.-z_hat_H2))
dhat_a = Tools.matmul(da, War.T)
# dz_hat_2 = dhat_r * r
dr = dhat_a * prev_h
dx_1 = Tools.matmul(da, Wax.T)
dh_prev_2 = dhat_a * r # da* Tools.matmul(r,War.T)
# dz_hat_1 = dh_prev_2 * (r * (1. - r))
dz_hat_1 = dr * (r * (1. - r))
# dz_hat_1 = prev_h * Tools.matmul(dh*z*(1-tanha**2), War.T)*(r*(1.-r))
dz_hat = np.hstack((dz_hat_1, dz_hat_2))
# dh_prev_3 = Tools.matmul(dz_hat_2,Wzh.T)
# dx_2 = Tools.matmul(dz_hat_2,Wzx.T)
# dh_prev_3 = Tools.matmul(dz_hat,Wzh.T)
# dh_prev_3 = Tools.matmul(dz_hat_2,Wzh.T)
dx_23 = Tools.matmul(dz_hat, Wzx.T)
# dx_3 = Tools.matmul(dz_hat_1,Wzx.T)
# dh_prev_4 =Tools.matmul(dz_hat_1, Wzh.T)
# dx_3 = Tools.matmul(dz_hat,Wzx.T)
# dh_prev_4 =Tools.matmul(dz_hat, Wzh.T)
# dh_prev_34 = np.hstack((dh_prev_3, dh_prev_4))
# dh_prev_34 = Tools.matmul(dh_prev_34,Wzh.T)
dh_prev_34 = Tools.matmul(dz_hat, Wzh.T)
# dprev_h = dh_prev_1+dh_prev_2+dh_prev_34 * 2. #dh_prev_3 + dh_prev_4
# dx = dx_1 + dx_2*2. # +dx_3
dprev_h = dh_prev_1 + dh_prev_2 + dh_prev_34 # dh_prev_3 + dh_prev_4
dx = dx_1 + dx_23 # +dx_3
dWax = Tools.matmul(x.T, da)
dWar = Tools.matmul((r * prev_h).T, da)
dba = np.sum(da, axis=0)
dWzx = Tools.matmul(x.T, dz_hat)
dWzh = Tools.matmul(prev_h.T, dz_hat)
dbz = np.sum(dz_hat, axis=0)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba
def gru_step_backward_v1(self, dnext_h, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba = None, None, None, None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for a single timestep of an LSTM. #
# #
# HINT: For sigmoid and tanh you can compute local derivatives in terms of #
# the output value from the nonlinearity. #
#############################################################################
x, prev_h, Wzx, Wzh, Wax, War, z_hat, r, z, a = cache
N, D = x.shape
H = dnext_h.shape[1]
z_hat_H1 = z_hat[:, :H]
z_hat_H2 = z_hat[:, H:2 * H]
# delta
tanha = np.tanh(a)
dh = dnext_h
da = dh * z * (1. - tanha * tanha)
dh_prev_1 = dh * (1. - z)
# dz = dh * (z+tanha)
# dz = dh*tanha+1.-dh*(1.-z)*prev_h
# dz = dh*tanha+1.-dh*prev_h
dz = dh * (tanha - prev_h)
dz_hat_2 = dz * (z * (1. - z))
# dz_hat_2 = dz*(z_hat_H2*(1.-z_hat_H2))
dhat_a = Tools.matmul(da, War.T)
# dz_hat_2 = dhat_r * r
dr = dhat_a * prev_h
dx_1 = Tools.matmul(da, Wax.T)
dh_prev_2 = dhat_a * r # da* Tools.matmul(r,War.T)
# dz_hat_1 = dh_prev_2 * (r * (1. - r))
dz_hat_1 = dr * (r * (1. - r))
dz_hat = np.hstack((dz_hat_1, dz_hat_2))
# dh_prev_3 = Tools.matmul(dz_hat_2,Wzh.T)
# dx_2 = Tools.matmul(dz_hat_2,Wzx.T)
# dh_prev_3 = Tools.matmul(dz_hat,Wzh.T)
# dh_prev_3 = Tools.matmul(dz_hat_2,Wzh.T)
dx_2 = Tools.matmul(dz_hat, Wzx.T)
# dx_3 = Tools.matmul(dz_hat_1,Wzx.T)
# dh_prev_4 =Tools.matmul(dz_hat_1, Wzh.T)
# dx_3 = Tools.matmul(dz_hat,Wzx.T)
# dh_prev_4 =Tools.matmul(dz_hat, Wzh.T)
# dh_prev_34 = np.hstack((dh_prev_3, dh_prev_4))
# dh_prev_34 = Tools.matmul(dh_prev_34,Wzh.T)
dh_prev_34 = Tools.matmul(dz_hat, Wzh.T)
# dprev_h = dh_prev_1+dh_prev_2+dh_prev_34 * 2. #dh_prev_3 + dh_prev_4
# dx = dx_1 + dx_2*2. # +dx_3
dprev_h = dh_prev_1 + dh_prev_2 + dh_prev_34 # dh_prev_3 + dh_prev_4
dx = dx_1 + dx_2 # +dx_3
dWax = Tools.matmul(x.T, da)
dWar = Tools.matmul((r * prev_h).T, da)
dba = np.sum(da, axis=0)
dWzx = Tools.matmul(x.T, dz_hat)
dWzh = Tools.matmul(prev_h.T, dz_hat)
dbz = np.sum(dz_hat, axis=0)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba
def gru_step_backward_v0(self, dnext_h, cache):
"""
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dprev_h, dWzx, dWzh, dbz, dWax, dWar, dba = None, None, None, None, None, None, None, None
x, prev_h, Wzx, Wzh, Wax, War, z_hat, r, z, a = cache
N, D = x.shape
H = dnext_h.shape[1]
z_hat_H1 = z_hat[:, :H]
z_hat_H2 = z_hat[:, H:2 * H]
# delta
tanha = np.tanh(a)
dh = dnext_h
da = dh * z * (1. - tanha * tanha)
dh_prev_1 = dh * (1. - z)
dz = dh * (z + tanha)
dz_hat_2 = dz * (z * (1. - z))
d13 = np.matmul(da, War.T)
dr = d13 * prev_h
dx_1 = np.matmul(da, Wax.T)
dh_prev_2 = d13 * r
dz_hat_1 = dh_prev_2 * (r * (1. - r))
dz_hat =
|
np.hstack((dz_hat_1, dz_hat_2))
|
numpy.hstack
|
import numpy as np
from scipy.linalg import expm
import modern_robotics as mr
"""
Use 'expm' for matrix exponential.
Angles are in radian, distance are in meters.
"""
# y z x
# base = 3.61 0.8 -3.78 (right, up, back)
def Get_MS():
# =================== Your code starts here ====================#
# Fill in the correct values for S1~6, as well as the M matrix
base = np.array([3.61, 0.8, -3.78])
w_1 = np.array([0, 0, 1]).T
w_2 = np.array([0, 1, 0]).T
w_3 = np.array([0, 1, 0]).T
w_4 = np.array([0, 1, 0]).T
w_5 = np.array([0, 0, 1]).T
w_6 = np.array([0, 1, 0]).T
q_1 = np.array([0, 0, 0.152] ).T
q_2 = np.array([0, 0.120, 0.152]).T
q_3 = np.array([0.244, 0.120, 0.152]).T
q_4 = np.array([0.457, 0.027, 0.152]).T
q_5 = np.array([0.457, 0.11, 0.152]).T
q_6 = np.array([0.457, 0.11, 0.069]).T
v_1 = np.cross(w_1, -1 * q_1)
v_2 = np.cross(w_2, -1 * q_2)
v_3 = np.cross(w_3, -1 * q_3)
v_4 = np.cross(w_4, -1 * q_4)
v_5 = np.cross(w_5, -1 * q_5)
v_6 = np.cross(w_6, -1 * q_6)
s1 = np.concatenate((w_1, v_1))
s2 = np.concatenate((w_2, v_2))
s3 = np.concatenate((w_3, v_3))
s4 = np.concatenate((w_4, v_4))
s5 = np.concatenate((w_5, v_5))
s6 = np.concatenate((w_6, v_6))
S = np.stack( (s1, s2, s3,s4,s5,s6))
M = np.array([ [-1, 0, 0, 0.457],
[0, 1, 0, 0.20],
[0, 0, -1, 0.069],
[0, 0, 0, 1] ] )
return M, S
"""
Function that calculates an elbow up Inverse Kinematic solution for the UR3
"""
def inverse(X, Y, Z):
L1 = 0.152
L3 = 0.244
L5 = 0.213
L6 = 0.083
L7 = 0.083
Xcen = X
Ycen = Y - 0.228
Zcen = Z + 0.083
# find theta 1
cen_len = np.sqrt( (Xcen ** 2 + Ycen ** 2) )
small_theta = np.degrees( np.arcsin( (0.027+0.083) / cen_len ) )
big_theta = np.degrees(np.arctan2(Ycen, Xcen))
theta1 = (big_theta - small_theta)
# find 3end
ext_3end = np.sqrt( Xcen ** 2 + Ycen ** 2 - (0.027+0.083)**2 )
X3end = np.cos( np.radians(theta1) ) * ext_3end
Y3end = np.sin( np.radians(theta1) ) * ext_3end
Z3end = Zcen
# find theta 3
c = np.sqrt((np.sqrt(X3end ** 2 + Y3end ** 2))**2 + (Z3end - L1)**2)
theta3 = 180 - np.degrees(np.arccos( (L3 ** 2 + L5 ** 2 - c ** 2)/(2* L3 * L5) ) )
# find theta 2
theta2_small =np.degrees( np.arctan2((Z3end-L1), (np.sqrt(X3end ** 2 + Y3end ** 2) ) ))
theta2_big = np.degrees( np.arccos( (L3 ** 2 + c ** 2 - L5 ** 2)/(2* L3 * c) ) )
theta2 = -(theta2_small+theta2_big)
# find theta 4
theta4 = -theta2 - theta3
# define theta 5 6 = 0
theta5 = 0
theta6 = 0
M, S = Get_MS()
S = S.T
return ( np.radians(theta1), np.radians(theta2),
|
np.radians(theta3)
|
numpy.radians
|
import numpy as np
import os
import pandas as pd
from network.utils import bbox_iou
import pickle
from tqdm import tqdm
import shutil
import multiprocessing
from configs.default import __C, cfg_from_file
from dataset.Parsers.structures import *
import argparse
class GTSingleParser:
def __init__(self, folder,
min_visibility,
forward_frames,
frame_stride,
tube_thre):
# 1. get the gt path and image folder
gt_file_path = os.path.join(folder, 'gt/gt.txt')
self.folder = folder
self.forward_frames = forward_frames
self.tube_thre = tube_thre
self.min_visibility = min_visibility
self.frame_stride = frame_stride
# 2. read the gt data
gt_file = pd.read_csv(gt_file_path, header=None)
gt_file = gt_file[gt_file[6] == 1] # human class
gt_file = gt_file[gt_file[8] > min_visibility]
gt_group = gt_file.groupby(0)
gt_group_keys = gt_group.indices.keys()
self.max_frame_index = max(gt_group_keys)
# 3. update tracks
self.tracks = Tracks()
self.recorder = {}
for key in gt_group_keys:
det = gt_group.get_group(key).values
ids = np.array(det[:, 1]).astype(int)
det = np.array(det[:, 2:6])
det[:, 2:4] += det[:, :2]
self.recorder[key - 1] = list()
# 3.1 update tracks
for id, d in zip(ids, det):
node = Node(d, key - 1)
track_index, node_index = self.tracks.add_node(node, id)
self.recorder[key - 1].append((track_index, node_index))
def bbox2tube(self, track, mid_id, direction, pos_in_video, thre):
def get_true_z(mid_node, end_node):
return end_node.frame_id - mid_node.frame_id
def get_inter_box(start_box, end_box, inter_id, end_id):
return start_box * (end_id - inter_id) / end_id + end_box * inter_id / end_id
mid_node = track.get_node_by_index(mid_id)
mid_box = mid_node.box
inter_boxes = []
z = 1 if direction == 'front' else -1
if mid_id + z >= len(track.nodes) or mid_id + z < 0:
return
|
np.array([0, 0, 0, 0, 0])
|
numpy.array
|
# ====================================================================================== #
# Module for Median Voter Model.
#
# Note that "O" or "o" refer to "ordinary" voters which is what I used to call Random
# voters.
#
# Author: <NAME>, <EMAIL>
# ====================================================================================== #
import numpy as np
from coniii.utils import *
from coniii.enumerate import fast_logsumexp
from scipy.special import binom, factorial, comb
from scipy.optimize import minimize, root
def create_mvm_p(n, q):
"""Use a probability transition matrix formulation to generate the MVM probability
distribution. This can be quite expensive when generating p.
Parameters
----------
n : int
q : float
Returns
-------
ndarray
Of length 2^n
"""
allStates = bin_states(n, True)
T = np.zeros((2**n,2**n))
for i,s in enumerate(allStates):
if s[1:].sum()!=0 and s[0]!=np.sign(s[1:].sum()):
T[i,i] = 1-q
snew = s.copy()
snew[0] *= -1
ix = np.where((allStates==snew).all(1))[0]
T[ix,i] = q
else:
T[i,i] = 1
p = np.ones(2**n)/2**n
pmvm = T.dot(p)
if q==1:
# check that states with zero probability are now ones where the Median was in the minority
ix = allStates[pmvm>0][:,0]==np.sign(allStates[pmvm>0][:,1:].sum(1))
assert (np.sign(allStates[pmvm>0][ix==0][:,1:].sum(1))==0).all()
return pmvm
def corr(n):
"""Median Voter Model (q=1) pairwise correlations.
Parameters
----------
int : n
Returns
-------
float
<s_median * s_ordinary>
float
<s_o * s_o''>
"""
assert (n%2)==1
Z = 2**(n-2)
# <s_Median s_Ordinary>
smo = (sum([binom(n-1,k) * (k/(n-1) - (n-1-k)/(n-1))
for k in range((n-1)//2,n)])) / Z
soo = 0.
return smo, soo
def couplings(n,
data_corr=None,
full_output=False,
tol=1e-12,
return_as_full_vec=False):
"""Find couplings corresponding to mvm pairwise correlations numerically. First, a
solution is found using scipy.minimize. Then an iterative, fixed point algorithm is
used to refine the solution.
Parameters
----------
n : int
data_corr : ndarray, None
Correlations to fit instead of taking MVM correlations.
(smo, smop, soo, soop)
full_output : bool, False
If True, return output from scipy.minimize.
tol : float, 1e-12
Norm error allowed in fit to pairwise correlations.
return_as_full_vec : bool, False
If True, return the couplings J as part of a full (h, J) vector that can be passed
directly in to the coniii module.
Returns
-------
ndarray
[Jmo, Joo] or (h, J) vector that can be passed to ConIII.
dict (optional)
From scipy.minimize.
"""
if data_corr is None:
smo, soo = corr(n)
smo_fun, _, soo_fun, _, _ = setup_maxent(n)
def cost(params, as_vec=False):
Jmo, Joo = params
if as_vec:
return np.array([smo-smo_fun(Jmo, Jmo, Joo, Joo),
soo-soo_fun(Jmo, Jmo, Joo, Joo)])
return np.sqrt((smo-smo_fun(Jmo, Jmo, Joo, Joo))**2 +
(soo-soo_fun(Jmo, Jmo, Joo, Joo))**2)
soln = minimize(cost, [0,0], tol=1e-10)
soln = root(lambda x:cost(x, as_vec=True), soln['x'])
else:
smo, smop, soo, soop = data_corr
smo_fun, smop_fun, soo_fun, soop_fun, _ = setup_maxent(n)
def cost(params, as_vec=False):
if as_vec:
return np.array([smo - smo_fun(*params),
smop - smop_fun(*params),
soo - soo_fun(*params),
soop - soop_fun(*params)])
return np.sqrt((smo - smo_fun(*params))**2 +
(smop - smop_fun(*params))**2 +
(soo - soo_fun(*params))**2 +
(soop - soop_fun(*params))**2)
soln = minimize(cost, [.1,.1,0,0], tol=1e-10)
soln = root(lambda x:cost(x, as_vec=True), soln['x'])
if return_as_full_vec:
params = np.zeros(n+n*(n-1)//2)
params[n:2*n-1] = soln['x'][0]
params[2*n-1:] = soln['x'][1]
else:
params = soln['x']
if full_output:
return params, soln
return params
def setup_fast_mvm(n):
"""Straightforward MVM with only special correlations between the Median
and all uniform Ordinary voters.
Check formulation in SCOTUS II pg. 116.
Parameters
----------
int : n
Returns
-------
function
smo(Jmo, Joo)
function
soo(Jmo, Joo)
"""
_E_with_maj = lambda Jmo,Joo,k,n=n: -Jmo*(2*k-n-1) - Joo*(binom(k-1,2) + binom(n-k,2) - (k-1)*(n-k))
_E_not_with_maj = lambda Jmo,Joo,k,n=n: -Jmo*(n-2*k-1) - Joo*(binom(n-k-1,2) + binom(k,2) - (n-k-1)*k)
Z = lambda Jmo,Joo,n=n: sum([k/n * binom(n,k) * np.exp(-_E_with_maj(Jmo,Joo,k)) +
(n-k)/n * binom(n,k) * np.exp(-_E_not_with_maj(Jmo,Joo,k))
for k in range(n//2+1,n)]) + np.exp(-_E_with_maj(Jmo,Joo,n))
# <s_m s_o>
def smo(Jmo, Joo, n=n):
return (sum([binom(n,k) * (k/n * (2*k-n-1)/(n-1) * np.exp(-_E_with_maj(Jmo,Joo,k))
+(n-k)/n * (n-2*k-1)/(n-1) * np.exp(-_E_not_with_maj(Jmo,Joo,k)))
for k in range(n//2+1,n)]) + np.exp(-_E_with_maj(Jmo,Joo,n)))/Z(Jmo,Joo)
# <s_o s_o'>
def soo(Jmo, Joo, n=n):
weightmaj = lambda k:k/n * (binom(k-1,2)+binom(n-k,2)-(k-1)*(n-k))/binom(n-1,2)
weightnotmaj = lambda k:(n-k)/n * (binom(k,2)+binom(n-k-1,2)-k*(n-k-1))/binom(n-1,2)
return (sum([binom(n,k) * (weightmaj(k) * np.exp(-_E_with_maj(Jmo,Joo,k)) +
weightnotmaj(k) * np.exp(-_E_not_with_maj(Jmo,Joo,k)))
for k in range(n//2+1,n)]) + np.exp(-_E_with_maj(Jmo,Joo,n)))/Z(Jmo,Joo)
return smo, soo, Z
def setup_maxent(n):
"""Correlation functions of the Median Voter Model with special Ordinary voter O' that
has special couplings with the Median and the remaining O voters. Using more stable
formulation of logsumexp.
Check formulation in SCOTUS II pg. 116.
Parameters
----------
int : n
Returns
-------
function
smo(Jm, Jmp, Jo, Jop)
function
smo_prime(Jm, Jmp, Jo, Jop)
function
soo(Jm, Jmp, Jo, Jop)
function
soo_prime(Jm, Jmp, Jo, Jop)
function
pk(Jm, Jmp, Jo, Jop)
Distribution of k votes in the majority.
"""
E_with_maj_with_median = lambda Jm,Jmp,Jo,Jop,k,n=n: -(Jm*(2*k-n-2) +
Jmp +
Jo*(binom(k-2,2)+binom(n-k,2)-(k-2)*(n-k)) +
Jop*(2*k-n-2))
E_with_maj_against_median = lambda Jm,Jmp,Jo,Jop,k,n=n: -(Jm*(2*k-n) -
Jmp +
Jo*(binom(k-1,2)+binom(n-k-1,2)-(k-1)*(n-k-1)) +
Jop*(n-2*k))
E_not_with_maj_with_median = lambda Jm,Jmp,Jo,Jop,k,n=n: -(Jm*(n-2*k-2) +
Jmp +
Jo*(binom(n-k-2,2)+binom(k,2)-(n-k-2)*k) +
Jop*(n-2*k-2))
E_not_with_maj_against_median = lambda Jm,Jmp,Jo,Jop,k,n=n: -(Jm*(n-2*k) -
Jmp +
Jo*(binom(n-k-1,2)+binom(k-1,2)-(n-k-1)*(k-1)) +
Jop*(2*k-n))
def logZ(*J, n=n):
coeffs = []
exp = []
for k in range(n//2+1, n-1):
coeffs += [k * (k-1) / (n * (n-1)),
k * (n-k) / (n * (n-1)),
(n-k) * (n-k-1) / (n * (n-1)),
(n-k) * k / (n * (n-1))]
exp += [-E_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_with_maj_against_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_against_median(*J, k) + np.log(binom(n,k))]
coeffs += [(n-2) / n, 1/n, 1/n, 1]
exp += [-E_with_maj_with_median(*J, n-1) + np.log(n),
-E_with_maj_against_median(*J, n-1) + np.log(n),
-E_not_with_maj_against_median(*J, n-1) + np.log(n),
-E_with_maj_with_median(*J, n)]
return fast_logsumexp(exp, coeffs)[0]
#Z = lambda Jm,Jmp,Jo,Jop,n=n:( sum([binom(n,k) *
# (k/n * ((k-1)/(n-1) * np.exp(-E_with_maj_with_median(Jm,Jmp,Jo,Jop,k)) +
# (n-k)/(n-1) * np.exp(-E_with_maj_against_median(Jm,Jmp,Jo,Jop,k))) +
# (n-k)/n * ((n-k-1)/(n-1) * np.exp(-E_not_with_maj_with_median(Jm,Jmp,Jo,Jop,k)) +
# k/(n-1) * np.exp(-E_not_with_maj_against_median(Jm,Jmp,Jo,Jop,k))))
# for k in range(n//2+1,n-1)]) +
# n*((n-1)/n * ((n-2)/(n-1) * np.exp(-E_with_maj_with_median(Jm,Jmp,Jo,Jop,n-1)) +
# 1/(n-1) * np.exp(-E_with_maj_against_median(Jm,Jmp,Jo,Jop,n-1))) +
# 1/n * np.exp(-E_not_with_maj_against_median(Jm,Jmp,Jo,Jop,n-1))) +
# np.exp(-E_with_maj_with_median(Jm,Jmp,Jo,Jop,n)))
# <s_Median s_Ordinary>
def smo(*J, n=n):
coeffs = []
exp = []
for k in range(n//2+1, n-1):
coeffs += [k * (k-1) * (2*k-n-2) / (n * (n-1) * (n-2)),
k * (n-k) * (2*k-n) / (n * (n-1) * (n-2)),
(n-k) * (n-k-1) * (n-2*k-2) / (n * (n-1) * (n-2)),
(n-k) * k * (n-2*k) / (n * (n-1) * (n-2))]
exp += [-E_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_with_maj_against_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_against_median(*J, k) + np.log(binom(n,k))]
coeffs += [(n-4)/n, 1/n, -1/n, 1]
exp += [-E_with_maj_with_median(*J, n-1) + np.log(n),
-E_with_maj_against_median(*J, n-1) + np.log(n),
-E_not_with_maj_against_median(*J, n-1) + np.log(n),
-E_with_maj_with_median(*J, n)]
num, sign = fast_logsumexp(exp, coeffs)
return sign * np.exp( num - logZ(*J) )
# <s_M s_O'>
def smop(*J, n=n):
coeffs = []
exp = []
for k in range(n//2+1, n-1):
coeffs += [k * (k-1) / (n * (n-1)),
k * (k-n) / (n * (n-1)),
(n-k) * (n-k-1) / (n * (n-1)),
(n-k) * -k / (n * (n-1))]
exp += [-E_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_with_maj_against_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_against_median(*J, k) + np.log(binom(n,k))]
coeffs += [(n-2)/n, -1/n, -1/n, 1]
exp += [-E_with_maj_with_median(*J, n-1) + np.log(n),
-E_with_maj_against_median(*J, n-1) + np.log(n),
-E_not_with_maj_against_median(*J, n-1) + np.log(n),
-E_with_maj_with_median(*J, n)]
num, sign = fast_logsumexp(exp, coeffs)
return sign * np.exp( num - logZ(*J) )
# <s_O s_O''>
def soo(*J, n=n):
coeffs = []
exp = []
for k in range(n//2+1, n-1):
coeffs += [k * (k-1) * (binom(k-2,2)+binom(n-k,2)-(k-2)*(n-k)) / (n * (n-1) * binom(n-2,2)),
k * (n-k) * (binom(k-1,2)+binom(n-k-1,2)-(k-1)*(n-k-1)) / (n * (n-1) * binom(n-2,2)),
(n-k) * (n-k-1) * (binom(n-k-2,2)+binom(k,2)-(n-k-2)*k) / (n * (n-1) * binom(n-2,2)),
(n-k) * k * (binom(n-k-1,2)+binom(k-1,2)-(n-k-1)*(k-1)) / (n * (n-1) * binom(n-2,2))]
exp += [-E_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_with_maj_against_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_against_median(*J, k) + np.log(binom(n,k))]
coeffs += [(n-2) * (binom(n-3,2)-(n-3)) / (n * binom(n-2,2)), 1/n, 1/n, 1]
exp += [-E_with_maj_with_median(*J, n-1) + np.log(n),
-E_with_maj_against_median(*J, n-1) + np.log(n),
-E_not_with_maj_against_median(*J, n-1) + np.log(n),
-E_with_maj_with_median(*J, n)]
num, sign = fast_logsumexp(exp, coeffs)
return sign * np.exp( num - logZ(*J) )
# <s_O s_O'>
def sop(*J, n=n):
coeffs = []
exp = []
for k in range(n//2+1, n-1):
coeffs += [k * (k-1) * (2*k-n-2) / (n * (n-1) * (n-2)),
k * (n-k) * (n-2*k) / (n * (n-1) * (n-2)),
(n-k) * (n-k-1) * (n-2*k-2) / (n * (n-1) * (n-2)),
(n-k) * k * (2*k-n) / (n * (n-1) * (n-2))]
exp += [-E_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_with_maj_against_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_with_median(*J, k) + np.log(binom(n,k)),
-E_not_with_maj_against_median(*J, k) + np.log(binom(n,k))]
coeffs += [(n-4)/n, -1/n, 1/n, 1]
exp += [-E_with_maj_with_median(*J, n-1) + np.log(n),
-E_with_maj_against_median(*J, n-1) +
|
np.log(n)
|
numpy.log
|
import os
from tqdm import tqdm
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
videos = os.listdir("videos")
annots = json.load(open("annotations.json", "r"))
if not os.path.isdir("train"):
os.mkdir("train")
if not os.path.isdir("train/crash"):
os.mkdir("train/crash")
if not os.path.isdir("train/nocrash"):
os.mkdir("train/nocrash")
for file in tqdm(videos):
path = os.path.join("videos", file)
try:
annot=annots[file]
except:
continue
if (len(annot)==0): continue
keyframes = [annot[i]['keyframes'] for i in range(len(annot))]
frame_bounds = []
tqdm.write(file)
for frame in keyframes:
frame_bounds.append((int(frame[0]['frame']), int(frame[1]['frame'])))
try:
cap = cv2.VideoCapture(path)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frames = np.empty((frameCount, 224, 224, 3), np.dtype('uint8'))
fc = 0
ret = True
while (fc < frameCount and ret):
ret, frame = cap.read()
frames[fc] = cv2.resize(frame, (224, 224))
fc += 1
cap.release()
except:
print(f"ERROR: {file}")
continue
crashframes=[]
for i in frame_bounds:
crashframes.extend(frames[i[0]:i[1]])
nocrashframes = np.array(frames[0:frame_bounds[0][0]])
if nocrashframes.shape[0]==0:
nocrashframes=np.array([])
crashframes =
|
np.array(crashframes)
|
numpy.array
|
import re
from scipy import misc
import numpy as np
# np.set_printoptions(threshold=np.nan)
import sys
import pandas as pd
import os
from config import Config as cfg
from libs.pyntcloud.pyntcloud import PyntCloud
import glob
from sklearn.model_selection import train_test_split
from itertools import compress
from config import DATA_TYPES_3D
from sklearn.decomposition import PCA
from colorama import Fore, Back, Style
######################################################################################################
######################################################################################################
def read(file):
if file.endswith('.float3'): return readFloat(file)
elif file.endswith('.flo'): return readFlow(file)
elif file.endswith('.ppm'): return readImage(file)
elif file.endswith('.pgm'): return readImage(file)
elif file.endswith('.png'): return readImage(file)
elif file.endswith('.jpg'): return readImage(file)
elif file.endswith('.pfm'): return readPFM(file)[0]
else: raise Exception('don\'t know how to read %s' % file)
def write(file, data):
if file.endswith('.float3'): return writeFloat(file, data)
elif file.endswith('.flo'): return writeFlow(file, data)
elif file.endswith('.ppm'): return writeImage(file, data)
elif file.endswith('.pgm'): return writeImage(file, data)
elif file.endswith('.png'): return writeImage(file, data)
elif file.endswith('.jpg'): return writeImage(file, data)
elif file.endswith('.pfm'): return writePFM(file, data)
else: raise Exception('don\'t know how to write %s' % file)
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == 'PF':
color = True
elif header.decode("ascii") == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def writePFM(file, image, scale=1):
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n'.encode())
file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n'.encode() % scale)
image.tofile(file)
def readFlow(name):
if name.endswith('.pfm') or name.endswith('.PFM'):
return readPFM(name)[0][:,:,0:2]
f = open(name, 'rb')
header = f.read(4)
if header.decode("utf-8") != 'PIEH':
raise Exception('Flow file header does not contain PIEH')
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))
return flow.astype(np.float32)
def readImage(name):
if name.endswith('.pfm') or name.endswith('.PFM'):
data = readPFM(name)[0]
if len(data.shape)==3:
return data[:,:,0:3]
else:
return data
return misc.imread(name)
def writeImage(name, data):
if name.endswith('.pfm') or name.endswith('.PFM'):
return writePFM(name, data, 1)
return misc.imsave(name, data)
def writeFlow(name, flow):
f = open(name, 'wb')
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
def readFloat(name):
f = open(name, 'rb')
if(f.readline().decode("utf-8")) != 'float\n':
raise Exception('float file %s did not contain <float> keyword' % name)
dim = int(f.readline())
dims = []
count = 1
for i in range(0, dim):
d = int(f.readline())
dims.append(d)
count *= d
dims = list(reversed(dims))
data = np.fromfile(f, np.float32, count).reshape(dims)
if dim > 2:
data = np.transpose(data, (2, 1, 0))
data = np.transpose(data, (1, 0, 2))
return data
def writeFloat(name, data):
f = open(name, 'wb')
dim=len(data.shape)
if dim>3:
raise Exception('bad float file dimension: %d' % dim)
f.write(('float\n').encode('ascii'))
f.write(('%d\n' % dim).encode('ascii'))
if dim == 1:
f.write(('%d\n' % data.shape[0]).encode('ascii'))
else:
f.write(('%d\n' % data.shape[1]).encode('ascii'))
f.write(('%d\n' % data.shape[0]).encode('ascii'))
for i in range(2, dim):
f.write(('%d\n' % data.shape[i]).encode('ascii'))
data = data.astype(np.float32)
if dim==2:
data.tofile(f)
else:
np.transpose(data, (2, 0, 1)).tofile(f)
######################################################################################################
######################################################################################################
######################################################################################################
######################################################################################################
def getBlackListDirs():
black_dirs_txt = "black_list_dirs.txt"
with open(black_dirs_txt, 'r') as f:
black_dirs = f.read().splitlines()
return black_dirs
def load_sequence(datatype3d_base_dir, sceneflow_base_dir, sample_base_dir, data_type):
samples = []
sceneflow_dir = os.path.join(sceneflow_base_dir, sample_base_dir)
for path in sorted(glob.glob(sceneflow_dir + "/*")):
sceneflow_path = path.replace('\\', '/')
sample_number_0 = os.path.basename(path).split('.')[0]
if data_type == DATA_TYPES_3D['POINTCLOUD']:
sample_path_0 = os.path.join(sample_base_dir, sample_number_0 + ".npy")
elif data_type == DATA_TYPES_3D['BOTH']:
sample_path_0 = os.path.join(sample_base_dir, sample_number_0 + ".npz")
sample_name = sample_base_dir.replace('/', '-') + "-" + sample_number_0
sample_number_1 = str(int(os.path.basename(path).split('.')[0]) + 1).zfill(4)
if data_type == DATA_TYPES_3D['POINTCLOUD']:
sample_path_1 = os.path.join(sample_base_dir, sample_number_1 + ".npy")
elif data_type == DATA_TYPES_3D['BOTH']:
sample_path_1 = os.path.join(sample_base_dir, sample_number_1 + ".npz")
datatype3d_path_0 = os.path.join(datatype3d_base_dir, sample_path_0)
datatype3d_path_1 = os.path.join(datatype3d_base_dir, sample_path_1)
sample = [datatype3d_path_0, datatype3d_path_1, sceneflow_path, sample_name]
samples.append(sample)
return samples
def sequence_exists(sceneflow_base_dir, sample_base_dir):
"""
Returns whether or not the path to a sequence exists
:param sceneflow_base_dir:
:param sample_base_dir:
:return:
"""
sequence_path = os.path.join(sceneflow_base_dir, sample_base_dir)
if os.path.isdir(sequence_path):
return True
else:
return False
def check_sequence_number(number):
"""
Checks if the sequence number ''number'' is a valid one
:param number:
:return:
"""
if number >= 750:
raise Exception("Sequences range from 0000 to 0749")
def load_files(input_base_dir, sceneflow_base_dir, data_split, data_type, sequences_to_use):
"""
Load numpy files containing the voxelgrids and the sceneflow groundtruth
:param dataset_path:
:return: list of path files for the voxelgrids and the sceneflow groungtruth
"""
black_list_dirs = getBlackListDirs()
all_samples = []
if sequences_to_use == "ALL":
## Use the whole dataset
for letter in os.listdir(os.path.join(sceneflow_base_dir, data_split)):
for number in os.listdir(os.path.join(sceneflow_base_dir, data_split, letter)):
sequence = os.path.join(letter, number)
sample_base_dir = os.path.join(data_split, sequence).replace('\\', '/')
if sample_base_dir in black_list_dirs:
continue
sequence_samples = load_sequence(input_base_dir, sceneflow_base_dir, sample_base_dir, data_type)
all_samples.append(sequence_samples)
else:
for sequence_to_use in sequences_to_use:
if sequence_to_use == "A" or sequence_to_use == "B" or sequence_to_use == "C":
"""Get a complete letter"""
letter = sequence_to_use
for number in os.listdir(os.path.join(sceneflow_base_dir, data_split, letter)):
sequence = os.path.join(letter, number)
sample_base_dir = os.path.join(data_split, sequence).replace('\\', '/')
if sample_base_dir in black_list_dirs:
continue
sequence_samples = load_sequence(input_base_dir, sceneflow_base_dir, sample_base_dir, data_type)
all_samples.append(sequence_samples)
elif "-" in sequence_to_use:
letter, numbers_range = sequence_to_use.split('/')
_from, _to = numbers_range.split('-')
_from, _to = int(_from), int(_to)
check_sequence_number(_from)
check_sequence_number(_to)
for number in range(_from, _to + 1):
number = str(number).zfill(4)
sequence = os.path.join(letter, number)
sample_base_dir = os.path.join(data_split, sequence).replace('\\', '/')
if sample_base_dir in black_list_dirs or not sequence_exists(sceneflow_base_dir, sample_base_dir):
continue
sequence_samples = load_sequence(input_base_dir, sceneflow_base_dir, sample_base_dir, data_type)
all_samples.append(sequence_samples)
else:
number = int(sequence_to_use.split('/')[1])
check_sequence_number(number)
sample_base_dir = os.path.join(data_split, sequence_to_use).replace('\\', '/')
if sample_base_dir in black_list_dirs:
raise Exception("Sequence to eval is in Black List!")
sequence_samples = load_sequence(input_base_dir, sceneflow_base_dir, sample_base_dir, data_type)
all_samples.append(sequence_samples)
final_samples = []
for sequence_samples in all_samples:
for sample in sequence_samples:
final_samples.append(sample)
return final_samples
def get_train_val_loader(dataset_dir, data_split, data_type, use_local, use_normal,
sequences_to_train=None, batch_size_train=1, batch_size_val=1,
validation_percentage=0.05):
"""
Compute dataset loader
:param dataset_dir:
:param batch_size:
:return:
"""
import torch.utils.data
from torch.utils.data.dataloader import default_collate
if cfg.model_name == "SiameseModel3D":
detection_collate = detection_collate_baseline_train
elif cfg.model_name == "SiamesePointNet":
detection_collate = detection_collate_pointnet_train
if data_type == DATA_TYPES_3D['POINTCLOUD']:
from loader import PointcloudDataset as Dataset
elif data_type == DATA_TYPES_3D['BOTH']:
if cfg.model_name == "SiameseModel3D":
from loader import SiameseBaselineDatasetTrain as Dataset
elif cfg.model_name == "SiamesePointNet":
from loader import SiamesePointNetDatasetTrain as Dataset
## Load files lists
if cfg.model_name == "SiameseModel3D":
vg_or_pcl_dir = os.path.join(dataset_dir, "pointcloud_voxelgrid")
else:
if use_local:
if use_normal:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_features_normals")
else:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_features")
else:
if use_normal:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_xyz_normals_features")
else:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_xyz_features")
sceneflow_dir = os.path.join(dataset_dir, "sceneflow")
samples = load_files(vg_or_pcl_dir, sceneflow_dir, data_split, data_type, sequences_to_train)
samples_train, samples_val = train_test_split(samples, test_size=validation_percentage,
random_state=20)
#####################################################################
## HELP: DO NOT REMOVE - USE TO GET THE SAMPLES IN VALIDATION SET ###
#####################################################################
# validation_samples = []
# for sample_val in samples_val:
# validation_samples.append(sample_val[-1])
# validation_samples.sort()
# with open("validation_samples.txt", "w") as f:
# for sample in validation_samples:
# f.write(sample + "\n")
#####################################################################
#####################################################################
## Create TRAIN loader
train_dataset = Dataset(samples_train)
print("Train Dataset's length:", len(train_dataset))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size_train, shuffle=True,
num_workers=8, collate_fn=detection_collate,
drop_last=True, pin_memory=False)
## Create VAL loader
val_dataset = Dataset(samples_val)
print("Val Dataset's length:", len(val_dataset))
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size_val, shuffle=True,
num_workers=8, collate_fn=detection_collate,
drop_last=True, pin_memory=False)
print("Number of training batches: ", len(train_loader),
"(Samples: ", str(len(train_loader) * batch_size_train), ")")
print("Number of val batches: ", len(val_loader),
"(Samples: ", str(len(val_loader) * batch_size_val), ")")
return train_loader, val_loader
def get_eval_loader(dataset_dir, data_split, data_type, use_local, use_normal,
sequences_to_eval=None, batch_size=1):
"""
Compute dataset loader
:param dataset_dir:
:param batch_size:
:return:
"""
import torch.utils.data
from torch.utils.data.dataloader import default_collate
if cfg.model_name == "SiameseModel3D":
detection_collate = detection_collate_baseline_test
elif cfg.model_name == "SiamesePointNet":
detection_collate = detection_collate_pointnet_test
if data_type == DATA_TYPES_3D['POINTCLOUD']:
from loader import PointcloudDataset as Dataset
elif data_type == DATA_TYPES_3D['BOTH']:
if cfg.model_name == "SiameseModel3D":
from loader import SiameseBaselineDatasetTest as Dataset
elif cfg.model_name == "SiamesePointNet":
from loader import SiamesePointNetDatasetTest as Dataset
## Load files lists
if cfg.model_name == "SiameseModel3D":
vg_or_pcl_dir = os.path.join(dataset_dir, "pointcloud_voxelgrid")
else:
if use_local:
if use_normal:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_features_normals")
else:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_features")
else:
if use_normal:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_xyz_normals_features")
else:
vg_or_pcl_dir = os.path.join(dataset_dir, "voxels_xyz_features")
sceneflow_dir = os.path.join(dataset_dir, "sceneflow")
samples = load_files(vg_or_pcl_dir, sceneflow_dir, data_split, data_type, sequences_to_eval)
## Create TRAIN loader
eval_dataset = Dataset(samples)
print("eval Dataset's length:", len(eval_dataset))
eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size, shuffle=True,
num_workers=8, collate_fn=detection_collate,
drop_last=True, pin_memory=False)
print("Number of eval batches: ", len(eval_loader),
"(Samples: ", str(len(eval_loader) * batch_size), ")")
return eval_loader
#################################################################################################
#################################################################################################
#################################################################################################
#################################################################################################
def compute_voxelgrid_and_sceneflow(color_frame, of_frame, disp_frame, dispChange_frame,
data_type_3D):
# import time
# import matplotlib.pyplot as plt
# import cv2
height, width, _ = color_frame.shape
## Store our input data with high precision
# colors_np_A = color_frame.reshape(-1, 3)
of =
|
np.asarray(of_frame, dtype=np.float64)
|
numpy.asarray
|
"""
This file contains stochastic generation code that I am releasing to the group. I will try to keep it updated,
however, if you would like to most up-to-date research code that I am using, you should email me at
<EMAIL>. (or text me)
This file contains several different useful generation classes.
(1) StatisticsGenerator: This is the base generation class. It samples the gaussian random field, does filtering (assuming
filtering is called), and returns in without any post-processing
(2) EigenGenerator_... - this are series of child classes that implement generation of eigenmicrostructures. I would suggest using
EigenGenerator_SquareLocalNMaximumConstrained (this one is the generator that is described in my paper).
(3) PhaseFieldGenerator - This is a generator where I just slapped a soft-max function on the output. I have not tested it at all.
Use at your own risk.
By: <NAME>
"""
import numpy as np
from HelperFunctions_StochasticGeneration import disect, rescale, local_square_mean
try:
import torch
except:
print("Couldn't find pytorch. Don't use AutoEigen")
ctol = 1e-8
class StatisticsGenerator():
def __init__(self, statistics, statistics_type='complete'):
"""
Initializing the class to be able to generate new structures given a set of statistics.
The second term indicates to the code whether the statistics passed in are a complete row (N statistics), or
a reduced row (N-1).
Statistics are assumed to be provided as a numpy array, where the first array, [..., 0], is the autocorrelation
and the remaining arrays are cross-correlations
The statistics are also assumed to be provided [0,0,0] being the t=0 point (so fftshift has not been applied)
:param statistics:
:param statistics_type: an indicator which explains to the code whether the statistics are complete. Can be
"incomplete" or "complete".
:return:
"""
# Some useful parameters
self.N = np.array(statistics.shape)[:-1].prod()
self.shape = statistics.shape[:-1]
self.twoD = True if (self.shape.__len__()<3) else False
# First we will transform all the statistics into the frequency domain
self.two_point_fft = np.fft.fftn(statistics, axes=tuple(range(0, self.shape.__len__())))
# Compute the zero mean:
self.means = self.two_point_fft[tuple([0] * (self.shape.__len__()) + [slice(None)])].real.copy()
self.means[0] = (self.means[0]/self.N)**(0.5)
# Computing the Final Phase, if we are interested in a conserved N-Phase Structure
if statistics_type.lower() == 'incomplete':
final_phase = np.zeros(self.shape, dtype=np.complex128)
final_phase[tuple([0] * self.shape.__len__())] = self.N * self.means[0]
final_phase -= self.two_point_fft.sum(axis=-1)
self.two_point_fft =
|
np.concatenate((self.two_point_fft, final_phase[..., np.newaxis]), axis=-1)
|
numpy.concatenate
|
import unittest
import numpy as np
from nptest import nptest
class MathematicalFunctionsTests(unittest.TestCase):
#region Trigonometric Functions
def test_sin_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.sin(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.sin(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.sin(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.sin(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.sin(a, where= x, out = out )
print(b)
def test_sin_3(self):
a = np.arange(0, 5, dtype = np.float64)
b = np.sin(a)
c = np.sin(a[::-1])
print(b)
print(c)
def test_cos_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.cos(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.cos(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.cos(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.cos(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.cos(a, where= x, out = out )
print(b)
def test_tan_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.tan(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b = np.tan(a)
print(b)
a = np.arange(0, 10, dtype = np.int16)
a = a[::2]
b = np.tan(a)
print(b)
print("********")
a = np.arange(0, 10, dtype = np.float64).reshape((1,2,5))
a = a[::2]
b = np.tan(a)
print(b)
print("********")
a = np.array([[0,1,2,3,4],[5,6,7,8,9]])
a = a[::2]
x = a>2
out = np.zeros_like(a, dtype=np.float64)
b = np.tan(a, where= x, out = out )
print(b)
def test_arcsin_1(self):
a = np.linspace(-1.0, 1.0, 12)
print(a)
b = np.arcsin(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arcsin(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12)
a = a[::2]
x = a > -0.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arcsin(a, where= x, out = out )
print(b)
def test_arccos_1(self):
a = np.linspace(-1.0, 1.0, 12)
print(a)
b = np.arccos(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arccos(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12)
a = a[::2]
x = a > -0.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arccos(a, where= x, out = out )
print(b)
def test_arctan_1(self):
a = np.linspace(-1.0, 1.0, 12)
print(a)
b = np.arctan(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12).reshape((2,2,3))
a = a[::2]
b = np.arctan(a)
print(b)
print("********")
a = np.linspace(-1.0, 1.0, 12)
a = a[::2]
x = a > -0.5
print(x)
out = np.zeros_like(a, dtype=np.float64)
b = np.arctan(a, where= x, out = out )
print(b)
def test_hypot_1(self):
a = np.hypot(np.ones((3, 3)) * 3, np.ones((3, 3)) * 4)
print(a)
b = np.hypot(np.ones((3, 3)) * 3, [4])
print(b)
def test_arctan2_1(self):
x = np.array([-1, +1, +1, -1])
y = np.array([-1, -1, +1, +1])
z = np.arctan2(y, x) * 180 / np.pi
print(z)
a = np.arctan2([1., -1.], [0., 0.])
print(a)
b = np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
print(b)
def test_degrees_1(self):
rad = np.arange(12.)*np.pi/6
a = np.degrees(rad)
print(a)
out = np.zeros((rad.shape))
r = np.degrees(rad, out)
print(np.all(r == out))
def test_radians_1(self):
deg = np.arange(12.0, dtype=np.float64) * 30.0;
a = np.radians(deg)
print(a)
out = np.zeros((deg.shape))
r = np.radians(deg, out)
print(np.all(r == out))
def test_rad2deg_1(self):
rad = np.arange(12.)*np.pi/6
a = np.rad2deg(rad)
print(a)
out = np.zeros((rad.shape))
r = np.rad2deg(rad, out)
print(np.all(r == out))
def test_deg2rad_1(self):
deg = np.arange(12.0, dtype=np.float64) * 30.0;
a = np.deg2rad(deg)
print(a)
out = np.zeros((deg.shape))
r = np.deg2rad(deg, out)
print(np.all(r == out))
#endregion
#region Hyperbolic functions
def test_sinh_1(self):
a = np.arange(0, 10, dtype = np.float64)
a = a[::2]
b = np.sinh(a)
print(b)
a = np.arange(0, 10, dtype = np.float32)
a = a[::2]
b =
|
np.sinh(a)
|
numpy.sinh
|
import numpy as np
from matplotlib import pyplot as plt
def Filter(I, f):
'''
Filter an image I with matrix filter f.
Inputs:
f -- numpy array of shape (m,n), where m and n are odd
I -- numpy array of shape (k,l), the image
Outpus:
O -- numpy array of shape (k,l), the filtered image
'''
m,n = f.shape
k,l = I.shape
#initialize the output array
O = np.empty((k,l))
#create a padded version of I by padding with zeros
N = np.zeros((k+m-1, l+n-1))
N[m/2:m/2+k, n/2:n/2+l] = I
#set the ouput pixels
for i in xrange(k):
for j in xrange(l):
O[i,j] = (f*N[i:i+m,j:j+n]).sum()
return O
def plotEdges(I):
'''
Find and plot the edges of an image I using the Sobel filter.
Inputs:
I -- numpy array of shape (m,n), the image
Returns:
This function returns nothing, but the last line is a call
to the function plt.show() from matplotlib.pyplot
'''
#create the Sobel filter (for the vertical gradient)
S = np.array([[-1,-2,-1],
[0,0,0],
[1,2,1]])/8.
#filter the image horizontally and vertically to get gradient values
Oy = Filter(K, S)
Ox = Filter(K, S.T)
#combine to obtain gradient magnitude at each pixel
O =
|
np.sqrt(Oy**2+Ox**2)
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 16:33:26 2020
@author: user
"""
import numpy as np
from scipy.stats import entropy
def BS_Corrcoef(imagecube, num):
x, y, z = imagecube.shape
hyperspectral = imagecube.reshape((x * y, z))
scores = np.zeros((z, z))
for i in range(z):
for j in range(z):
scores[i, j] = np.min(np.min(np.corrcoef(hyperspectral[:, i], hyperspectral[:, j])))
hyperspectral_corrcoef = np.zeros((2, z))
hyperspectral_corrcoef[0, :] = range(z)
for i in range(z):
hyperspectral_corrcoef[1, :] =
|
np.sum(scores[i, :])
|
numpy.sum
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
import time
import matplotlib.pyplot as plt
def LossShapeVAE(recon_x, x, mu, logvar):
x =
|
np.reshape(x,(-1,1,50,50))
|
numpy.reshape
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Spirograph Activity
Author: tennessee
Created on: 2017-03-21
Copyright 2017, <NAME>.
"""
import numpy as np
from activity import Activity, NumericalActivityParam
class SpirographActivity(Activity):
def __init__(self, parent, drawbot, *args, **kwargs):
super(SpirographActivity, self).__init__(parent, drawbot, *args, **kwargs)
self._name = "Spirograph"
self._param_ctrls = {}
self._params = {
"turns": NumericalActivityParam(name="turns", desc="Num Turns", value=6.0, fmt="%.1f", min_val=1.0, max_val=10.0),
"l": NumericalActivityParam(name="l", desc="l", value=0.5, fmt="%.3f", min_val=0.01, max_val=0.99),
"k": NumericalActivityParam(name="k", desc="k", value=0.35, fmt="%.3f", min_val=0.01, max_val=0.99),
"R": NumericalActivityParam(name="R", desc="R", value=30.0, fmt="%.1f", min_val=10.0, max_val=30.0),
}
self._x = []
self._y = []
self.update_geometry()
def handle_event(self, event_dict):
# If parent handled event, return
if super(SpirographActivity, self).handle_event(event_dict):
return
event_type = event_dict["event"]
if event_type == "param_changed":
self._parent.handle_event({"event": "activity_updated"})
def update_geometry(self):
n_turns = self._params["turns"].value
R = self._params["R"].value
l = self._params["l"].value
k = self._params["k"].value
quality = 4
scale = 2 * np.pi / (60.0 * quality)
max_points = n_turns * 2.0 * np.pi * n_turns / scale
theta = np.arange(0, max_points) * scale
tx, ty = tuple(self._drawbot.kine.get_work_area_centroid())
self._x = tx + R * ((1 - k) * np.cos(theta) + (l * k) * np.cos(((1 - k) / k) * theta))
self._y = ty + R * ((1 - k) * np.sin(theta) - (l * k) * np.sin(((1 - k) / k) * theta))
def draw_preview(self, ax):
xmin = min(self._x)
xmax = max(self._x)
ymin = min(self._y)
ymax = max(self._y)
drawing_path = np.column_stack((self._x, self._y))
extents = [xmin, xmax, ymin, ymax]
ax.plot(drawing_path[:, 0], drawing_path[:, 1], 'b-')
#ax.axis(extents)
def start_drawing(self):
self._drawbot.pen_up()
self._drawbot.goto((self._x[0], self._y[0]))
self._drawbot.pen_down()
drawing_path =
|
np.column_stack((self._x, self._y))
|
numpy.column_stack
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import values
from tensorflow.python.eager import test
from tensorflow.python.estimator import keras as keras_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, tpu_strategy.TPUStrategy):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
strategies_minus_tpu = [
combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus]
tpu_strategies = [
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager'])
def tpu_strategy_combinations():
return combinations.combine(
distribution=tpu_strategies,
mode=['graph'])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_combinations_minus_default():
strategy_minus_default_combinations = combinations.combine(
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph', 'eager'])
return strategy_minus_default_combinations + tpu_strategy_combinations()
# TODO(priyag): Add v2 optimizers here.
def strategy_and_optimizer_combinations():
return combinations.times(
all_strategy_combinations(),
combinations.combine(
optimizer=[combinations.adagrad_optimizer_v1_fn,
combinations.adam_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.rmsprop_optimizer_v1_fn]))
def strategy_for_numpy_input_combinations():
return combinations.combine(
distribution=strategies_minus_tpu + tpu_strategies,
mode=['graph'])
class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
parameterized.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(),
'keras_mirrored_strategy_test')
gfile.MakeDirs(self._base_dir)
self._config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)
def tearDown(self):
writer_cache.FileWriterCache.clear()
if os.path.isdir(self._base_dir):
gfile.DeleteRecursively(self._base_dir)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph']))
def test_train_functional_with_distribution_strategy(self, distribution):
keras_model = simple_functional_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=distribution,
eval_distribute=distribution)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph']))
def test_train_sequential_with_distribution_strategy(self, distribution):
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=distribution)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph']))
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self, distribution):
train_data, test_data = get_multi_inputs_multi_outputs_data()
def train_input_fn():
input_dict = {
'input_a': train_data['input_a'],
'input_b': train_data['input_b'],
'input_m': train_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': train_data['output_c'],
'dense_3': train_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
def eval_input_fn():
input_dict = {
'input_a': test_data['input_a'],
'input_b': test_data['input_b'],
'input_m': test_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': test_data['output_c'],
'dense_3': test_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
self.do_test_multi_inputs_multi_outputs_with_input_fn(
distribution, train_input_fn, eval_input_fn)
def do_test_multi_inputs_multi_outputs_with_input_fn(
self, distribution, train_input_fn, eval_input_fn):
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=distribution)
with self.cached_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)
baseline_eval_results = est_keras.evaluate(
input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph']))
def test_keras_optimizer_with_distribution_strategy(self, distribution):
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.rmsprop(lr=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=distribution)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
config=config)
with self.assertRaisesRegexp(ValueError,
'Only TensorFlow native optimizers are '
'supported with DistributionStrategy.'):
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calculating_input_params_no_steps_no_batch_size(self, distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Input samples of different sizes
input_20_samples = np.zeros((20, 3), dtype=np.float32)
input_63_samples = np.zeros((63, 3), dtype=np.float32)
input_64_samples = np.zeros((64, 3), dtype=np.float32)
# Default global batch size 32 for input with 64 samples run in 2 steps
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=None, batch_size=None)
self.assertEqual(batch_size, 32 // replica_scale_factor)
self.assertEqual(steps, 2)
# Computed global batch size 20 is lower than 32 if we pass less samples.
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_20_samples, steps=None, batch_size=None)
self.assertEqual(batch_size, 20 // replica_scale_factor)
self.assertEqual(steps, 1)
# Default global batch size 32 cannot be used with 63 samples.
with self.assertRaisesRegexp(ValueError, 'not divisible by batch size'):
distributed_training_utils.get_input_params(
distribution, input_63_samples, steps=None, batch_size=None)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calculating_input_params_with_steps_no_batch_size(self,
distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Input samples of different sizes
input_63_samples = np.zeros((63, 3), dtype=np.float32)
input_64_samples = np.zeros((64, 3), dtype=np.float32)
# Computed global batch size is correct for number of specified 1 step
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=1, batch_size=None)
self.assertEqual(batch_size, 64 // replica_scale_factor)
self.assertEqual(steps, 1)
# Computed global batch size is correct for number of specified 2 steps
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=2, batch_size=None)
self.assertEqual(batch_size, 32 // replica_scale_factor)
self.assertEqual(steps, 2)
# All samples can not be consumed in specified number of steps
with self.assertRaisesRegexp(ValueError, 'not divisible by steps'):
distributed_training_utils.get_input_params(
distribution, input_63_samples, steps=2, batch_size=None)
# This cases is different for different strategies due to the
# difference in supported batch size being global or per-replica.
if replica_scale_factor == 1:
# Computed global batch size is correct even if not sharadable
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_63_samples, steps=3, batch_size=None)
self.assertEqual(batch_size, 21)
self.assertEqual(steps, 3)
else:
# Computed global batch size can not be sharded across replicas
with self.assertRaisesRegexp(ValueError, 'could not be sharded evenly '
'across the sync replicas'):
distributed_training_utils.get_input_params(
distribution, input_63_samples, steps=1, batch_size=None)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calculating_input_params_no_steps_with_batch_size(self,
distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
input_64_samples = np.zeros((64, 3), dtype=np.float32)
# Computed steps is correct for specified batch size
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=None, batch_size=16)
self.assertEqual(batch_size, 16)
self.assertEqual(steps, 4 // replica_scale_factor)
# Computed steps is correct for specified batch size
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=None, batch_size=32)
self.assertEqual(batch_size, 32)
self.assertEqual(steps, 2 // replica_scale_factor)
# Number of samples is not divisible by the global batch size
with self.assertRaisesRegexp(ValueError, 'not divisible by batch size'):
distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=None, batch_size=20)
# Number of samples is not divisible by the global batch size
with self.assertRaisesRegexp(ValueError, 'not divisible by batch size'):
distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=None, batch_size=3)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calculating_input_params_with_steps_with_batch_size(self,
distribution):
with self.cached_session():
input_64_samples = np.zeros((64, 3), dtype=np.float32)
# No change to steps and batch size if both specified and feasible
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=5, batch_size=3)
self.assertEqual(batch_size, 3)
self.assertEqual(steps, 5)
# Number of samples is less than global batch size * steps
with self.assertRaisesRegexp(ValueError, 'less than samples required'):
distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=10, batch_size=13)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps
# are smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
with distribution.scope():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(combinations.combine(
distribution=strategies_minus_tpu, mode=['graph']))
def test_numpy_with_sample_weights(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
inputs = np.zeros((20, 3), np.float32)
targets = np.zeros((20, 4), np.float32)
sample_weights = np.ones((20), np.float32)
model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
with distribution.scope():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertLen(outs, 2)
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(all_strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
with distribution.scope():
user_controlled_model = get_model()
user_controlled_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()])
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()])
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session():
with distribution.scope():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(all_strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
with distribution.scope():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_minus_tpu_combinations())
def test_dataset_with_sample_weights(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat()
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_wrong_input_shape(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_no_batch_input_validation(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
dataset = get_dataset(distribution)
# Input shapes are not fully known. Batch dimension is unknown as we are
# not using the drop_remainder argument.
dataset = dataset.repeat(100).batch(10)
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
with distribution.scope():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
model.compile(optimizer, loss, metrics=metrics)
batch_size = 8
if isinstance(distribution, mirrored_strategy.CoreMirroredStrategy):
# CoreMirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(batch_size)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
with distribution.scope():
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(batch_size)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps
ref_output = np.ones((160, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
@combinations.generate(all_strategy_combinations())
def testOptimizerWithCallbacks(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent_keras.SGD(0.01)
loss = 'mse'
model.compile(optimizer, loss)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
self.assertAllClose(0.001, keras.backend.get_value(model.optimizer.lr))
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_validating_dataset_input_tensors_with_shape_mismatch(self,
distribution):
with self.cached_session():
a = constant_op.constant([1, 2], shape=(1, 2))
b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2))
device_map = values.ReplicaDeviceMap(('/device:CPU:0', '/device:GPU:0'))
x = values.DistributedValues(device_map, (a, b))
y = values.DistributedValues(device_map, (a, a))
# Removed device and input tensor shape details from the error message
# since the order of the device and the corresponding input tensor shape
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor shapes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
with distribution.scope():
distributed_training_utils.validate_distributed_dataset_inputs(
distribution, x, y)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_validating_dataset_input_tensors_with_dtype_mismatch(self,
distribution):
with self.cached_session():
a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32)
b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64)
device_map = values.ReplicaDeviceMap(('/device:CPU:0', '/device:GPU:0'))
x = values.DistributedValues(device_map, (a, b))
y = values.DistributedValues(device_map, (a, a))
# Removed device and input tensor dtype details from the error message
# since the order of the device and the corresponding input tensor dtype
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor dtypes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
with distribution.scope():
distributed_training_utils.validate_distributed_dataset_inputs(
distribution, x, y)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_unsupported_features(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
dataset = get_dataset(distribution)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument.
with self.assertRaisesRegexp(
ValueError, 'the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
model.predict(dataset, verbose=0)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_calling_with_unsupported_predefined_callbacks(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'You must specify a Keras Optimizer V2 when '
'using'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'You must specify a Keras Optimizer V2 when '
'using'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
class TestDistributionStrategyWithLossMasking(test.TestCase,
parameterized.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_masking(self, distribution):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
with distribution.scope():
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
y =
|
np.array([[[1], [1]], [[1], [1]]])
|
numpy.array
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = [
"setup_plotting",
"test_emcee_functions",
"test_dynesty_functions",
"test_pymc3_model",
"Angle",
]
import sys
import traceback
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as tt
import pymc3 as pm
import pymc3.distributions.transforms as tr
from pymc3.distributions import generate_samples
import emcee
if not emcee.__version__.startswith("3"):
raise ImportError(
"For emcee, version 3.0 or greater is needed. "
"You can install that using: "
"'pip install emcee==3.0rc2'"
)
def setup_plotting():
plt.style.use("default")
plt.rcParams["savefig.dpi"] = 100
plt.rcParams["figure.dpi"] = 100
plt.rcParams["font.size"] = 16
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = ["Liberation Sans"]
plt.rcParams["mathtext.fontset"] = "custom"
def emcee_loglike_ref(params, x, y):
bperp, theta, logs = params
m =
|
np.tan(theta)
|
numpy.tan
|
##############################################
############時系列データの特徴抽出############
##############################################
import numpy as np
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.autograd import Variable
from torchvision import transforms
import torchvision.utils as vutils
import matplotlib.pyplot as plt
import librosa
class MyIterator(object):
def __init__(self, data):
self._data = data
self._i = 0
def __iter__(self):
return self
def __next__(self):
if self._i==self._data.shape[0]:
raise StopIteration()
value = self._data[self._i]
self._i += 1
return value
class FFT(object):
def __init__(self):
pass
def transform(self, X, t=None):#Xはデータ数×データ点数(2のn乗 : 2**n)
N = X.shape[1]
dataset = MyIterator(X)
for idx, inputs in enumerate(dataset):
F = np.fft.fft(inputs)
F_abs = np.abs(F)[:int(N/2)]
F_abs_amp = F_abs/N*2
F_abs_amp[0] = F_abs_amp[0]
F_abs_amp = F_abs_amp.reshape(1,-1)
if idx==0:
transformed_X = F_abs_amp
else:
transformed_X = np.r_[transformed_X, F_abs_amp]
if t is None:
return transformed_X
else:
dt = np.fabs(t[1]-t[0])
freq = np.linspace(0, 1.0/dt, N)
return transformed_X, freq[:int(N/2)]
class BPF(object):#バンドパスフィルタ
def __init__(self, fL = None, fH = None):
self.fL = fL
self.fH = fH
def transform(self, X, t):
N = X.shape[1]
#周波数の計算
dt = np.fabs(t[1]-t[0])
freq = np.linspace(0, 1.0/dt, N)
if self.fL is None:
self.fL = 0
if self.fH is None:
self.fH = freq[int(N/2)]
dataset = MyIterator(X)
for idx, inputs in enumerate(dataset):
F = np.fft.fft(inputs)
F_abs = np.abs(F)[:int(N/2)]
F_abs_amp = F_abs/N*2
F_abs_amp[0] = F_abs_amp[0]
F_abs_amp = F_abs_amp.reshape(1,-1)
F2 = np.copy(F)
F2[freq<self.fL] = 0
F2[freq>self.fH] = 0
F2_ifft =
|
np.fft.ifft(F2)
|
numpy.fft.ifft
|
'''
###############################################################################
"MajoranaNanowire" Python3 Module
v 1.0 (2020)
Created by <NAME> (2018)
###############################################################################
"H_class/Kane/builders" submodule
This sub-package builds 8-band k.p Hamiltonians for infinite nanowires.
###############################################################################
'''
#%%############################################################################
######################## Required Packages ############################
###############################################################################
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
import scipy.constants as cons
from MajoranaNanowires.Functions import diagonal, concatenate
#%%
def Kane_2D_builder(N,dis,mu,B=0,
params={},crystal='zincblende',
mesh=0,
sparse='yes'):
"""
2D 8-band k.p Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
wire which is infinite in one direction, decribed using 8-band k.p theory.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential.
B: float
Magnetic field along the wire's direction.
params: dic or str
Kane/Luttinger parameters of the k.p Hamiltonian. 'InAs', 'InSb',
'GaAs' and 'GaSb' selects the defult parameters for these materials.
crystal: {'zincblende','wurtzite','minimal'}
Crystal symmetry along the nanowire growth. 'minimal' is a minimal
model in which the intra-valence band coupling are ignored.
mesh: mesh
If the discretization is homogeneous, mesh=0. Otherwise, mesh
provides a mesh with the position of the sites in the mesh.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
if (params=={} or params=='InAs') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InSb') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 20.4, 8.3, 9.1
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 34.8, 15.5, 16.5
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 6.98, 2.06, 2.93
P, m_eff = 1097.45, 1.0
EF, Ecv, Evv, Ep = 0, -1519, -341, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
Ep=3/(0.063)/(3/np.abs(Ecv)+1/np.abs(Ecv+Evv))
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 13.4, 4.7, 6.0
P, m_eff = 971.3, 1.0
EF, Ecv, Evv, Ep = 0, -812, -760, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InAs') and (crystal=='wurtzite'):
m_eff = 1.0
D1,D2,D3,D4=100.3,102.3,104.1,38.8
A1,A2,A3,A4,A5,A6,A7=-1.5726,-1.6521,-2.6301,0.5126,0.1172,1.3103,-49.04
B1,B2,B3=-2.3925,2.3155,-1.7231
e1,e2=-3.2005,0.6363
P1,P2=838.6,689.87
alpha1,alpha2,alpha3=-1.89,-28.92,-51.17
beta1,beta2=-6.95,-21.71
gamma1,Ec, Ev=53.06,0,-664.9
elif crystal=='minimal' or crystal=='zincblende':
gamma0, gamma1, gamma2, gamma3 = params['gamma0'], params['gamma1'], params['gamma2'], params['gamma3']
P, m_eff = params['P'], params['m_eff']
EF, Ecv, Evv = params['EF'], params['Ecv'], params['Evv']
if crystal=='zincblende':
Ep=(cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
## Make sure that the onsite parameters are arrays:
Nx, Ny = N[0], N[1]
if np.ndim(dis)==0:
dis_x, dis_y = dis, dis
else:
dis_x, dis_y = dis[0], dis[1]
if np.isscalar(mesh):
xi_x, xi_y = np.ones(N), np.ones(N)
elif len(mesh)==2:
xi_x, xi_y = dis_x/mesh[0]*np.ones(N), dis_y/mesh[1]*np.ones(N)
else:
xi_x, xi_y = dis_x/mesh[0], dis_y/mesh[1]
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny))
#Number of bands and sites
m_b = 8 * Nx * Ny
m_s = Nx * Ny
#Obtain the eigenenergies:
tx=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)**2)/cons.e*1e3*(xi_x[1::,:]+xi_x[:-1,:])/2
ty=cons.hbar**2/(2*m_eff*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3*(xi_y[:,1::]+xi_y[:,:-1])/2
txy=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)*(dis_y*1e-9))/cons.e*1e3*np.append(np.zeros((1,Ny)),xi_x[1::,:]+xi_x[:-1,:],axis=0)/2*np.append(np.zeros((Nx,1)),xi_y[:,1::]+xi_y[:,:-1],axis=1)/2
txy=txy[1::,1::]
ax=(xi_x[1::,:]+xi_x[:-1,:])/2/(2*dis_x)
ay=(xi_y[:,1::]+xi_y[:,:-1])/2/(2*dis_y)
e = np.append(2*tx[0,:].reshape(1,Ny),np.append(tx[1::,:]+tx[:-1,:],2*tx[-1,:].reshape(1,Ny),axis=0),axis=0)
em = e - np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
e += np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
ty=np.insert(ty,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
ay=np.insert(ay,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
txy=np.insert(txy,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
e, em, mu, tx, ty = e.flatten(), em.flatten(), mu.flatten(), tx.flatten(), ty.flatten()
ax,ay=ax.flatten(),ay.flatten()
if not(B==0):
x, y = np.zeros(N), np.zeros(N)
if np.isscalar(mesh) and mesh==0:
mesh=np.ones((2,Nx,Ny))*dis[0]
for i in range(Nx):
for j in range(Ny):
x[i,j]=np.sum(mesh[0,0:i+1,j])-(Nx-1)*dis_x/2
y[i,j]=np.sum(mesh[1,i,0:j+1])-(Ny-1)*dis_y/2
for i in range(int((Nx-1)/2)):
x[Nx-i-1,:]=-x[i,:]
x[int((Nx-1)/2),:]=0
x=x/np.abs(x[0,0])*(Nx-1)*dis_x/2
for j in range(int((Ny-1)/2)):
y[:,Ny-j-1]=-y[:,j]
y[:,int((Ny-1)/2)]=0
y=y/np.abs(y[0,0])*(Ny-1)*dis_y/2
fact_B=cons.e/cons.hbar*1e-18
Mx, My = -fact_B*y/2*B, fact_B*x/2*B
Mx_kx, My_ky = (xi_x[1::,:]*Mx[1::,:]+xi_x[:-1,:]*Mx[:-1,:])/2/(2*dis_x), (xi_y[:,1::]*My[:,1::]+xi_y[:,:-1]*My[:,:-1])/2/(2*dis_y)
My_ky=np.insert(My_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mm_kx, Mm_ky = (xi_x[1::,:]*(Mx[1::,:]-1j*My[1::,:])+xi_x[:-1,:]*(Mx[:-1,:]-1j*My[:-1,:]))/2/(2*dis_x), -(xi_y[:,1::]*(Mx[:,1::]+1j*My[:,1::])+xi_y[:,:-1]*(Mx[:,:-1]+1j*My[:,:-1]))/2/(2*dis_y)
Mm_ky=np.insert(Mm_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mx, My = Mx.flatten(), My.flatten()
Mx_kx, My_ky = Mx_kx.flatten(), My_ky.flatten()
Mm_kx, Mm_ky = Mm_kx.flatten(), Mm_ky.flatten()
## Built the Hamiltonian:
if crystal=='zincblende':
T=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
G1=(concatenate((P/np.sqrt(6)*ay,-P/np.sqrt(6)*ay,-1j*P/np.sqrt(6)*ax,1j*P/np.sqrt(6)*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
O1=(concatenate(((-1/np.sqrt(3)*(gamma2+2*gamma3))*em,-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(ty*(-1/np.sqrt(3)*(gamma2+2*gamma3))),ty*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3))),1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)))),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
if not(B==0):
B_m=((Mx-1j*My),(diagonal(m_s)))
B_s=(((Mx**2+My**2)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k=(concatenate((-2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
B_s_m=(((Mx**2-My**2-2*1j*Mx*My)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k_m=(concatenate((2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
### Upper diagonal:
## row 0:
# (0,2)
args=G1[0]
index=(G1[1][0]+0,G1[1][1]+2*m_s)
# (0,4)
args=np.append(args,np.conj(G1[0])*np.sqrt(3))
index=(np.append(index[0],G1[1][1]+0),np.append(index[1],G1[1][0]+4*m_s))
# (0,7)
args=np.append(args,G1[0]*np.sqrt(2))
index=(np.append(index[0],G1[1][0]+0),np.append(index[1],G1[1][1]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-G1[0]*np.sqrt(3))
index=(np.append(index[0],G1[1][0]+m_s), np.append(index[1],G1[1][1]+3*m_s))
# (1,5)
args=np.append(args,-np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s),np.append(index[1],G1[1][0]+5*m_s))
# (1,6)
args=np.append(args,np.sqrt(2)*np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s), np.append(index[1],G1[1][0]+6*m_s))
## row 2:
# (2,4)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+2*m_s),np.append(index[1],O1[1][1]+4*m_s))
# (2,7)
args=np.append(args,-np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+7*m_s))
## row 3:
# (3,5)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+3*m_s),np.append(index[1],O1[1][1]+5*m_s))
# (3,6)
args=np.append(args,-np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+3*m_s),np.append(index[1],O1[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+4*m_s),np.append(index[1],O1[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+6*m_s))
# # If there is magentic field:
if not(B==0):
## row 0:
# (0,2)
args=np.append(args,P/np.sqrt(6)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+2*m_s))
# (0,4)
args=np.append(args,P/np.sqrt(2)*B_m[0])
index=(np.append(index[0],B_m[1][0]+0),np.append(index[1],B_m[1][1]+4*m_s))
# (0,7)
args=np.append(args,P/np.sqrt(3)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-P/np.sqrt(2)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+m_s),np.append(index[1],B_m[1][0]+3*m_s))
# (1,5)
args=np.append(args,-P/np.sqrt(6)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+5*m_s))
# (1,6)
args=np.append(args,P/np.sqrt(3)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+6*m_s))
## row 2:
# (2,7)
args=np.append(args,-np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+7*m_s))
# (2,4)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+2*m_s),np.append(index[1],B_s_m[1][1]+4*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+2*m_s),np.append(index[1],B_k_m[1][1]+4*m_s))
## row 3:
# (3,5)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+3*m_s),np.append(index[1],B_s_m[1][1]+5*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+3*m_s),np.append(index[1],B_k_m[1][1]+5*m_s))
# (3,6)
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+3*m_s),np.append(index[1],B_s_m[1][0]+6*m_s))
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+3*m_s),np.append(index[1],B_k_m[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+4*m_s),np.append(index[1],B_s_m[1][0]+7*m_s))
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+4*m_s),np.append(index[1],B_k_m[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+6*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+0),np.append(index[1],T[1][1]+0))
# (1,1)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+m_s),np.append(index[1],T[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+3*m_s),np.append(index[1],T[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+4*m_s),np.append(index[1],T[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+6*m_s),np.append(index[1],T[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+7*m_s),np.append(index[1],T[1][1]+7*m_s))
if not(B==0):
# (0,0)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+0),np.append(index[1],B_s[1][1]+0))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+0),np.append(index[1],B_k[1][1]+0))
# (1,1)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+m_s),np.append(index[1],B_s[1][1]+m_s))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+m_s),np.append(index[1],B_k[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+2*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+3*m_s),np.append(index[1],B_s[1][1]+3*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+3*m_s),np.append(index[1],B_k[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+4*m_s),np.append(index[1],B_s[1][1]+4*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+4*m_s),np.append(index[1],B_k[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+5*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+6*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+6*m_s),np.append(index[1],B_k[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+7*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+7*m_s),np.append(index[1],B_k[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate((EF*np.ones(2*m_s),Ecv*np.ones(4*m_s),(Ecv+Evv)*np.ones(2*m_s)))
elif crystal=='wurtzite':
Kc=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
Kp=(concatenate((ay,-ay,-1j*ax,1j*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
Kpc=(concatenate((em,-tx,-tx,ty,ty,-1j*txy[0:-1]/2,1j*txy/2,1j*txy/2,-1j*txy[0:-1]/2)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
### Upper diagonal:
## row 0:
# (0,1)
args=-A5*np.conj(Kpc[0])
index=(Kpc[1][1]+0,Kpc[1][0]+m_s)
# (0,2)
args=np.append(args,1j*(A7-alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+2*m_s))
# (0,4)
args=np.append(args,-1j*alpha2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+4*m_s))
# (0,6)
args=np.append(args,-(P2-beta1)/np.sqrt(2)*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+6*m_s))
## row 1:
# (1,2)
args=np.append(args,-1j*(A7+alpha1/np.sqrt(2))*Kp[0])
index=(np.append(index[0],Kp[1][0]+m_s),np.append(index[1],Kp[1][1]+2*m_s))
# (1,3)
args=np.append(args,-1j*alpha2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+m_s),np.append(index[1],Kp[1][0]+3*m_s))
# (1,5)
args=np.append(args,np.sqrt(2)*D3*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+m_s),np.append(index[1],diagonal(m_s)[1]+5*m_s))
# (1,6)
args=np.append(args,(P2+beta1)/np.sqrt(2)*Kp[0])
index=(np.append(index[0],Kp[1][0]+m_s),np.append(index[1],Kp[1][1]+6*m_s))
# (1,7)
args=np.append(args,1j*np.sqrt(2)*D4*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+m_s),np.append(index[1],diagonal(m_s)[1]+7*m_s))
## row 2:
# (2,4)
args=np.append(args,np.sqrt(2)*D3*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+2*m_s),np.append(index[1],diagonal(m_s)[1]+4*m_s))
# (2,5)
args=np.append(args,-1j*alpha3*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+2*m_s),np.append(index[1],Kp[1][0]+5*m_s))
# (2,6)
args=np.append(args, 1j*B2*Kc[0])
index=(np.append(index[0],Kc[1][0]+2*m_s),np.append(index[1],Kc[1][1]+6*m_s))
# (2,7)
args=np.append(args, beta2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+2*m_s),np.append(index[1],Kp[1][0]+7*m_s))
## row 3:
# (3,4)
args=np.append(args,-A5*Kpc[0])
index=(np.append(index[0],Kpc[1][0]+3*m_s),np.append(index[1],Kpc[1][1]+4*m_s))
# (3,5)
args=np.append(args,-1j*(A7-alpha1/np.sqrt(2))*Kp[0])
index=(np.append(index[0],Kp[1][0]+3*m_s),np.append(index[1],Kp[1][1]+5*m_s))
# (3,7)
args=np.append(args,(P2-beta1)/np.sqrt(2)*Kp[0])
index=(np.append(index[0],Kp[1][0]+3*m_s),np.append(index[1],Kp[1][1]+7*m_s))
## row 4:
# (4,5)
args=np.append(args,1j*(A7+alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+4*m_s),np.append(index[1],Kp[1][0]+5*m_s))
# (4,6)
args=np.append(args,1j*np.sqrt(2)*D4*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+4*m_s),np.append(index[1],diagonal(m_s)[1]+6*m_s))
# (4,7)
args=np.append(args,-(P2+beta1)/np.sqrt(2)*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+4*m_s),np.append(index[1],Kp[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,-beta2*Kp[0])
index=(np.append(index[0],Kp[1][0]+5*m_s),np.append(index[1],Kp[1][1]+6*m_s))
# (5,7)
args=np.append(args, 1j*B2*Kc[0])
index=(np.append(index[0],Kc[1][0]+5*m_s),np.append(index[1],Kc[1][1]+7*m_s))
## row 6:
# (6,7)
args=np.append(args,-1j*gamma1*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+6*m_s),np.append(index[1],Kp[1][0]+7*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+0),np.append(index[1],Kc[1][1]+0))
# (1,1)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+m_s),np.append(index[1],Kc[1][1]+m_s))
# (2,2)
args=np.append(args,(A2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+2*m_s),np.append(index[1],Kc[1][1]+2*m_s))
# (3,3)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+3*m_s),np.append(index[1],Kc[1][1]+3*m_s))
# (4,4)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+4*m_s),np.append(index[1],Kc[1][1]+4*m_s))
# (5,5)
args=np.append(args,(A2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+5*m_s),np.append(index[1],Kc[1][1]+5*m_s))
# (6,6)
args=np.append(args,(e2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+6*m_s),np.append(index[1],Kc[1][1]+6*m_s))
# (7,7)
args=np.append(args,(e2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+7*m_s),np.append(index[1],Kc[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate(((D1+D2+Ev)*
|
np.ones(m_s)
|
numpy.ones
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
def GenerateNumpyRandomRGB(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 256.
class RGBToHSVTest(XLATestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in self.float_types:
inp = GenerateNumpyRandomRGB(shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.test_session() as sess:
batch0 = array_ops.placeholder(nptype, shape=shape)
with self.test_scope():
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
with self.test_scope():
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2],
{batch0: inp})
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllCloseAccordingToType(
batch2, inp, bfloat16_atol=0.03, half_rtol=0.02)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in self.float_types:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.test_session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv = image_ops.rgb_to_hsv(placeholder)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(rgb_tf, rgb_np, bfloat16_atol=0.03)
def testRGBToHSVNumpy(self):
"""Tests the RGB to HSV conversion matches a reference implementation."""
for nptype in self.float_types:
rgb_flat = GenerateNumpyRandomRGB((64, 3)).astype(nptype)
rgb_np = rgb_flat.reshape(4, 4, 4, 3)
hsv_np = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_np = hsv_np.reshape(4, 4, 4, 3)
with self.test_session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv_op = image_ops.rgb_to_hsv(placeholder)
hsv_tf = hsv_op.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(hsv_tf, hsv_np)
class AdjustContrastTest(XLATestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.test_session():
x = array_ops.placeholder(x_np.dtype, shape=x_np.shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = image_ops.adjust_contrast(flt_x, contrast_factor)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllClose(y_tf, y_np, 1e-6)
def testFloatContrast(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testBatchContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.test_session():
x = array_ops.placeholder(np.float32)
with self.test_scope():
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval({x: x_np})
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustHueTest(XLATestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.test_session():
x = array_ops.placeholder(dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(x, delta_h)
y_tf = y.eval({x: x_np})
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-4)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class AdjustSaturationTest(XLATestCase):
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
with self.test_scope():
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np =
|
np.array(x_data, dtype=np.uint8)
|
numpy.array
|
# read and prepare input data, execute model(s) and save forecasts
# importing python libraries and opening settings
try:
import os
import sys
import logging
import logging.handlers as handlers
import json
import datetime
import numpy as np
import pandas as pd
import itertools as it
import tensorflow as tf
from tensorflow.keras import backend as kb
from tensorflow.keras import losses, models
from tensorflow.keras.metrics import mean_absolute_percentage_error
from sklearn.metrics import mean_squared_error, mean_absolute_error
with open('./settings.json') as local_json_file:
local_script_settings = json.loads(local_json_file.read())
local_json_file.close()
sys.path.insert(1, local_script_settings['custom_library_path'])
from metaheuristic_module import tuning_metaheuristic
from model_analyzer import model_structure
from submission_evaluator import submission_tester
if local_script_settings['metaheuristic_optimization'] == "True":
with open(''.join(
[local_script_settings['metaheuristics_path'], 'organic_settings.json'])) as local_json_file:
local_script_settings = json.loads(local_json_file.read())
local_json_file.close()
metaheuristic_predict = tuning_metaheuristic()
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
tf.keras.backend.set_floatx('float32')
except Exception as ee1:
print('Error importing libraries or opening settings (predict module)')
print(ee1)
# log setup
current_script_name = os.path.basename(__file__).split('.')[0]
log_path_filename = ''.join([local_script_settings['log_path'], current_script_name, '.log'])
logging.basicConfig(filename=log_path_filename, level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
logHandler = handlers.RotatingFileHandler(log_path_filename, maxBytes=10485760, backupCount=5)
logger.addHandler(logHandler)
# keras session and random seed reset/fix, set epsilon keras backend
kb.clear_session()
np.random.seed(1)
tf.random.set_seed(2)
kb.set_epsilon(1) # needed while using "mape" as one of the metric at training model
# classes definitions
class modified_mape(losses.Loss):
@tf.function
def call(self, local_true, local_pred):
local_true = tf.cast(local_true, dtype=tf.float32)
local_pred = tf.cast(local_pred, dtype=tf.float32)
numerator = tf.abs(tf.add(local_pred, -local_true))
denominator = tf.add(tf.convert_to_tensor(1., dtype=tf.float32), tf.abs(local_true))
return tf.math.divide_no_nan(numerator, denominator)
class customized_loss(losses.Loss):
@tf.function
def call(self, local_true, local_pred):
local_true = tf.convert_to_tensor(local_true, dtype=tf.float32)
local_pred = tf.convert_to_tensor(local_pred, dtype=tf.float32)
factor_difference = tf.reduce_mean(tf.abs(tf.add(local_pred, -local_true)))
factor_true = tf.reduce_mean(tf.add(tf.convert_to_tensor(1., dtype=tf.float32), local_true))
return tf.math.multiply_no_nan(factor_difference, factor_true)
# functions definitions
def general_mean_scaler(local_array):
if len(local_array) == 0:
return "argument length 0"
mean_local_array = np.mean(local_array, axis=1)
mean_scaling = np.divide(local_array, 1 + mean_local_array)
return mean_scaling, mean_local_array
def window_based_normalizer(local_window_array):
if len(local_window_array) == 0:
return "argument length 0"
mean_local_array = np.mean(local_window_array, axis=1)
window_based_normalized_array = np.add(local_window_array, -mean_local_array)
return window_based_normalized_array, mean_local_array
def general_mean_rescaler(local_array, local_complete_array_unit_mean, local_forecast_horizon):
if len(local_array) == 0:
return "argument length 0"
local_array = local_array.clip(0)
local_complete_array_unit_mean = np.array([local_complete_array_unit_mean, ] * local_forecast_horizon).transpose()
mean_rescaling = np.multiply(local_array, 1 + local_complete_array_unit_mean)
return mean_rescaling
def window_based_denormalizer(local_window_array, local_last_window_mean, local_forecast_horizon):
if len(local_window_array) == 0:
return "argument length 0"
local_last_window_mean = np.array([local_last_window_mean, ] * local_forecast_horizon).transpose()
window_based_denormalized_array = np.add(local_window_array, local_last_window_mean)
return window_based_denormalized_array
def predict():
try:
print('\n~predict module~')
# from 1th june, here get real unit_sales for days d_1914 to d_1941,
# for model optimization, but avoiding overfitting
# open predict settings
with open(''.join([local_script_settings['test_data_path'], 'forecast_settings.json'])) as local_f_json_file:
forecast_settings = json.loads(local_f_json_file.read())
local_f_json_file.close()
# load clean data (divided in groups) and time_serie_group
scaled_unit_sales_g1 = np.load(''.join([local_script_settings['train_data_path'], 'group1.npy']),
allow_pickle=True)
scaled_unit_sales_g2 = np.load(''.join([local_script_settings['train_data_path'], 'group2.npy']),
allow_pickle=True)
scaled_unit_sales_g3 = np.load(''.join([local_script_settings['train_data_path'], 'group3.npy']),
allow_pickle=True)
groups_list = [scaled_unit_sales_g1, scaled_unit_sales_g2, scaled_unit_sales_g3]
time_series_group = np.load(''.join([local_script_settings['train_data_path'], 'time_serie_group.npy']),
allow_pickle=True)
# print(time_series_group.shape)
# print(time_series_group)
# store the number of time_series and max_selling_time of each group
if local_script_settings['automatic_time_series_number'] == 'True':
number_of_time_series_g1 = np.shape(scaled_unit_sales_g1)[0]
number_of_time_series_g2 = np.shape(scaled_unit_sales_g2)[0]
number_of_time_series_g3 = np.shape(scaled_unit_sales_g3)[0]
else:
# open forecast settings
with open(''.join([local_script_settings['test_data_path'],
'forecast_settings.json'])) as local_f_json_file:
forecast_settings = json.loads(local_f_json_file.read())
local_f_json_file.close()
if local_script_settings['model_analyzer'] == 'on':
analyzer = model_structure()
# requires hdf5 format
model_name = '_high_loss_time_serie_model_forecaster_in_block_.h5'
analysis_result = analyzer.analize(model_name, local_script_settings)
if analysis_result:
print('model_analysis successfully, json file saved')
else:
print('error at model_analysis submodule')
# open raw_data
raw_data_filename = 'sales_train_evaluation.csv'
raw_data_sales = pd.read_csv(''.join([local_script_settings['raw_data_path'], raw_data_filename]))
print('raw sales data accessed')
# extract data and check dimensions
raw_unit_sales = raw_data_sales.iloc[:, 6:].values
max_selling_time = np.shape(raw_unit_sales)[1]
local_settings_max_selling_time = local_script_settings['max_selling_time']
if local_settings_max_selling_time < max_selling_time:
raw_unit_sales = raw_unit_sales[:, :local_settings_max_selling_time]
elif max_selling_time != local_settings_max_selling_time:
print("settings doesn't match data dimensions, it must be rechecked before continue(_predict_module)")
logger.info(''.join(['\n', datetime.datetime.now().strftime("%d.%b %Y %H:%M:%S"),
' data dimensions does not match settings']))
return False
else:
if local_script_settings['competition_stage'] != 'submitting_after_June_1th_using_1941days':
print(''.join(['\x1b[0;2;41m', 'Warning', '\x1b[0m']))
print('please check: forecast horizon days was included within the training data')
print('It was expected that the last 28 days were not included..')
print('to avoid overfitting')
elif local_script_settings['competition_stage'] == 'submitting_after_June_1th_using_1941days':
print(''.join(['\x1b[0;2;41m', 'Straight end of the competition', '\x1b[0m']))
print('raw data input collected and check of data dimensions passed (predict_module)')
# make forecast --> '28 days future predictions for unit_sales', organized in groups
forecast_horizon_days = local_script_settings['forecast_horizon_days']
# populate with the previously normalized right input for forecasting,
# *here is the code to access the preprocessed data from _1_prepare_data.py*
# x_test_from_prepare = groups_list[group][:, -forecast_horizon_days:]
# x_test_from_prepare = x_test_from_prepare.reshape(1, x_test_from_prepare.shape[1],
# x_test_from_prepare.shape[0])
# in order to not carrie dependencies, _3_predict.py module preprocess again from raw data
# if needed, could be checked with: x_test_from_prepare == x_test // output --> [[True]] * shape
nof_groups = local_script_settings['number_of_groups']
# if only neural_network (Not stochastic_simulation as first approach), ALL time_series are not_improved...
nof_time_series = raw_unit_sales.shape[0]
print('number of time_series:', nof_time_series)
time_series_not_improved = [time_serie for time_serie in range(nof_time_series)]
if local_script_settings['skip_neural_network_forecaster_in_predict_module'] == "True":
print('by settings settled, skipping neural_network forecaster')
print('using only first model for forecasting')
# reading stochastic simulation forecasts
all_forecasts = np.load(''.join([local_script_settings['train_data_path'],
'stochastic_simulation_forecasts.npy']))
else:
all_forecasts = np.zeros(shape=(nof_time_series * 2, forecast_horizon_days))
time_steps_days = local_script_settings['time_steps_days']
if local_script_settings['first_train_approach'] == 'stochastic_simulation':
nof_groups = 1
time_series_not_improved = np.load(''.join([local_script_settings['models_evaluation_path'],
'time_series_not_improved.npy']), allow_pickle=True)
# managing that only 1 group will be used
time_series_group = np.array([[time_serie, 0] for time_serie in time_series_not_improved])
groups_list = [raw_unit_sales]
for group in range(nof_groups):
# print(time_series_group.shape)
time_series_in_group = time_series_group[:, [0]][time_series_group[:, [1]] == group]
# this commented code is replaced for "the managing that only 1 group will be used" about six line above
# if local_script_settings['first_train_approach'] == 'stochastic_simulation':
# time_series_in_group = time_series_not_improved
print('time_series group shape, len:', time_series_in_group.shape, len(time_series_in_group))
x_input = groups_list[group][time_series_in_group, -time_steps_days:]
x_input = x_input.reshape(1, x_input.shape[1], x_input.shape[0])
print('x_input shape: ', np.shape(x_input))
# load model and make forecast for the time serie
if nof_groups > 1:
forecaster = models.load_model(''.join([local_script_settings['models_path'],
'model_group_', str(group),
'_forecast_.h5']),
custom_objects={'modified_mape': modified_mape,
'customized_loss': customized_loss})
# this full-group model was not obtained better results
point_forecast_original = forecaster.predict(x_input)
print('forecast shape: ',
|
np.shape(point_forecast_original)
|
numpy.shape
|
import os, argparse, math
import pickle as pkl
import numpy as np
import matplotlib
from matplotlib import rc
import matplotlib.pyplot as plt
from scipy import misc
import tensorflow as tf
from utils import reordering
matplotlib.rcParams['text.latex.unicode']=True
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
img_name = '2d_mnist_app_'
##############################################
# mnist
#sample_idx=900000
#b_idx=0
#b_idx=1
#b_idx=2
sample_idx=801000
b_idx=0
img_name += str(sample_idx).zfill(7)+'_'+str(b_idx).zfill(2)+'.png'
prefix = '../logs_mnist_c/'
dirs = [
#'07-25,10:24:11.220550', # NP(h=128)
'07-25,10:24:15.632419', # ANP(h=128)
'10-22,16:12:26.898386', # SNP(h=128)
'12-28,08:07:05.227516', # SNP-Att(K=inf)
'07-25,10:24:09.806928', # SNP-RMRA(h=128,K=25)
]
for i in range(len(dirs)):
dirs[i] = prefix+dirs[i]
labels = [
#'NP',
'ANP',
'SNP',
#'SNP-W(K=25)',
#'SNP-RMR(K=25)',
#'ASNP-W(K=25)',
#'ASNP-RMR(K=25)',
'ASNP-W',
'ASNP-RMR',
]
# get data
data = []
h_x_list = []
for idx, direc in enumerate(dirs):
with open(os.path.join(direc,'data'+str(sample_idx).zfill(7)+'.pickle'),
'rb') as f:
pred = pkl.load(f)
std = pkl.load(f)
query = pkl.load(f)
target = pkl.load(f)
hyperparam = pkl.load(f)
#if idx == len(labels)-1:
# h_x_list.append(pkl.load(f))
if idx == 0:
canvas_size = int(math.sqrt(len(target[0][0])))
# [target_x, target_y, context_x, context_y, pred_y, std_y]
if 'SNP' in labels[idx]:
data.append(reordering(query, target, pred, std, temporal=True))
else:
data.append(reordering(query, target, pred, std, temporal=False))
# plotting
pqset_point = [1.0,0.0,0.0] # Red
T = np.arange(0,50,5)
plt.figure(figsize=(4.8*(2+len(labels)), 4.8*len(T))) # [context, target(withIm), models] * len(T)
for t_idx, t in enumerate(T):
for i in range(len(labels)):
target_x, target_y, context_x, context_y, pred_y, std = data[i]
tar_canvas = np.ones((canvas_size,canvas_size,3))
cont_canvas = np.ones((canvas_size,canvas_size,3))
cont_canvas[:,:,:] = 1.0 # default color: white
tar_y = target_y[t][b_idx] + 0.5
con_x = ((context_x[t][b_idx] + 1.0) / 2) * (canvas_size-1) + 0.5
con_y = context_y[t][b_idx] + 0.5
pred_canvas = np.ones((canvas_size,canvas_size,3))
std_canvas =
|
np.ones((canvas_size,canvas_size,3))
|
numpy.ones
|
import numpy as np
from PIL import Image
from hypergan.viewer import GlobalViewer
class BaseSampler:
def __init__(self, gan, samples_per_row=8, session=None):
self.gan = gan
self.samples_per_row = samples_per_row
def _sample(self):
raise "raw _sample method called. You must override this"
def sample(self, path, save_samples):
gan = self.gan
with gan.session.as_default():
sample = self._sample()
data = sample['generator']
width = min(gan.batch_size(), self.samples_per_row)
stacks = [np.hstack(data[i*width:i*width+width]) for i in range(gan.batch_size()//width)]
sample_data =
|
np.vstack(stacks)
|
numpy.vstack
|
# Copyright 2017 Google Inc. and Skytruth Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example:
This compute metrics for the table `vessel_char_vid_features_v20190509`, comparing
results with the known values in the table `char_info_v20190509`. This second table
is typically derived from the vessel database using train/create_train_info.py.
The results, and html file, are written to dest path.
python -m classification.metrics.compute_vessel_metrics \
--inference-table machine_learning_dev_ttl_120d.vessel_char_vid_features_v20190509 \
--label-table machine_learning_dev_ttl_120d.char_info_v20190509 \
--dest-path ./test_inference_metrics_0509.html
*Note: despite the table, name this table is in terms of vessel_id*
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import csv
import subprocess
import numpy as np
import dateutil.parser
import logging
import argparse
from collections import namedtuple, defaultdict
import sys
import yattag
from classification.metadata import VESSEL_CLASS_DETAILED_NAMES, VESSEL_CATEGORIES, TEST_SPLIT
from classification.metadata import raw_schema, schema, atomic
import gzip
import dateutil.parser
import datetime
import pytz
import pandas as pd
coarse_categories = [
'bunker_or_tanker', 'cargo_or_reefer', 'passenger', 'tug', 'research', 'other_not_fishing*',
'drifting_longlines', 'gear', 'purse_seines', 'set_gillnets', 'set_longlines', 'pots_and_traps',
'trawlers', 'squid_jigger', 'other_fishing*'
]
raw_names = [x[:-1] for x in raw_schema.split() if x.strip()]
names = [x for x in raw_names if x in VESSEL_CLASS_DETAILED_NAMES]
fine_mapping = [(x, set([x])) for x in names]
all_classes = set(VESSEL_CLASS_DETAILED_NAMES)
categories = dict(VESSEL_CATEGORIES)
is_fishing = set(categories['fishing'])
not_fishing = set(categories['non_fishing'])
coarse_mapping = defaultdict(set)
used = set()
for cat in coarse_categories:
if cat.endswith('*'):
coarse_mapping[cat] = set()
else:
atomic_cats = set(categories[cat])
assert not atomic_cats & used
used |= atomic_cats
coarse_mapping[cat] = atomic_cats
unused = all_classes - used
coarse_mapping['other_fishing*'] |= (is_fishing & unused)
coarse_mapping['other_not_fishing*'] |= (not_fishing & unused)
coarse_mapping = [(k, coarse_mapping[k]) for k in coarse_categories]
fishing_mapping = [
['fishing', set(atomic(schema['unknown']['fishing']))],
['non_fishing', set(atomic(schema['unknown']['non_fishing']))],
]
# Faster than using dateutil
def _parse(x):
if isinstance(x, datetime.datetime):
return x
# 2014-08-28T13:56:16+00:00
# TODO: fix generation to generate consistent datetimes
if x[-6:] == '+00:00':
x = x[:-6]
if x.endswith('.999999'):
x = x[:-7]
if x.endswith('Z'):
x = x[:-1]
try:
dt = datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S')
except:
logging.fatal('Could not parse "%s"', x)
raise
return dt.replace(tzinfo=pytz.UTC)
class InferenceResults(object):
_indexed_scores = None
def __init__(self, # TODO: Consider reordering args so that label_list is first
ids, inferred_labels, true_labels, start_dates, scores,
label_list,
all_ids=None, all_inferred_labels=None, all_true_labels=None, all_start_dates=None, all_scores=None):
self.label_list = label_list
#
self.all_ids = all_ids
self.all_inferred_labels = all_inferred_labels
self.all_true_labels = all_true_labels
self.all_start_dates = np.asarray(all_start_dates)
self.all_scores = all_scores
#
self.ids = ids
self.inferred_labels = inferred_labels
self.true_labels = true_labels
self.start_dates = np.asarray(start_dates)
self.scores = scores
#
def all_results(self):
return InferenceResults(self.all_ids, self.all_inferred_labels,
self.all_true_labels, self.all_start_dates,
self.all_scores, self.label_list)
@property
def indexed_scores(self):
if self._indexed_scores is None:
logging.debug('create index_scores')
iscores = np.zeros([len(self.ids), len(self.label_list)])
for i, id_ in enumerate(self.ids):
for j, lbl in enumerate(self.label_list):
iscores[i, j] = self.scores[i][lbl]
self._indexed_scores = iscores
logging.debug('done')
return self._indexed_scores
AttributeResults = namedtuple(
'AttributeResults',
['id', 'inferred_attrs', 'true_attrs', 'true_labels', 'start_dates'])
LocalisationResults = namedtuple('LocalisationResults',
['true_fishing_by_id',
'pred_fishing_by_id', 'label_map'])
ConfusionMatrix = namedtuple('ConfusionMatrix', ['raw', 'scaled'])
CLASSIFICATION_METRICS = [
('fishing', 'Is Fishing'),
('coarse', 'Coarse Labels'),
('fine', 'Fine Labels'),
]
css = """
table {
text-align: center;
border-collapse: collapse;
}
.confusion-matrix th.col {
height: 140px;
white-space: nowrap;
}
.confusion-matrix th.col div {
transform: translate(16px, 49px) rotate(315deg);
width: 30px;
}
.confusion-matrix th.col span {
border-bottom: 1px solid #ccc;
padding: 5px 10px;
text-align: left;
}
.confusion-matrix th.corner {
text-align: right;
vertical-align: bottom;
}
.confusion-matrix th.row {
text-align: right;
}
.confusion-matrix td.diagonal {
border: 1px solid black;
}
.confusion-matrix td.offdiagonal {
border: 1px dotted grey;
}
.unbreakable {
page-break-inside: avoid;
}
"""
# basic metrics
def precision_score(y_true, y_pred, sample_weights=None):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
true_pos = np.array([(x == y and x != 0) for (x, y) in zip(y_true, y_pred)], dtype=float)
all_pos = np.array([(x != 0) for x in y_pred], dtype=float)
if sample_weights is not None:
true_pos *= sample_weights
all_pos *= sample_weights
return true_pos.sum() / all_pos.sum()
def recall_score(y_true, y_pred, sample_weights=None):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
true_pos = np.array([(x == y and x != 0) for (x, y) in zip(y_true, y_pred)], dtype=float)
all_true = np.array([(x != 0) for x in y_true], dtype=float)
if sample_weights is not None:
true_pos *= sample_weights
all_true *= sample_weights
return true_pos.sum() / all_true.sum()
def f1_score(y_true, y_pred, sample_weights=None):
prec = precision_score(y_true, y_pred, sample_weights)
recall = recall_score(y_true, y_pred, sample_weights)
return 2 * prec * recall / (prec + recall + 1e-10)
def accuracy_score(y_true, y_pred, sample_weights=None):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
if sample_weights is None:
sample_weights = np.ones_like(y_pred).astype(float)
weights = np.asarray(sample_weights)
correct = (y_true == y_pred)
return (sample_weights * correct).sum() / sample_weights.sum()
def weights(labels, y_true, y_pred, max_weight=200):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
weights = np.zeros([len(y_true)])
for lbl in labels:
trues = (y_true == lbl)
if trues.sum():
wt = min(len(trues) / trues.sum(), max_weight)
weights += trues * wt
return weights / weights.sum()
def weights_by_class(label_weights, y_true, y_pred):
y_true = np.asarray(y_true)
weights = np.zeros([len(y_true)])
for lbl, wt in labels:
trues = (y_true == lbl)
if trues.sum():
weights[trues] = wt
return weights / weights.sum()
def base_confusion_matrix(y_true, y_pred, labels):
n = len(labels)
label_map = {lbl: i for i, lbl in enumerate(labels)}
cm = np.zeros([n, n], dtype=int)
for yt, yp in zip(y_true, y_pred):
if yt not in label_map:
logging.warn('%s not in label_map', yt)
continue
if yp not in label_map:
logging.warn('%s not in label_map', yp)
continue
cm[label_map[yp], label_map[yt]] += 1
return cm
# Helper function formatting as HTML (using yattag)
def ydump_confusion_matrix(doc, cm, labels, **kwargs):
"""Dump an sklearn confusion matrix to HTML using yatag
Args:
doc: yatag Doc instance
cm: ConfusionMatrix instance
labels: list of str
labels for confusion matrix
"""
doc, tag, text, line = doc.ttl()
with tag('table', klass='confusion-matrix', **kwargs):
with tag('corner'):
with tag('th'):
doc.asis('true→<br/>positive↓')
for x in labels:
with tag('th', klass='col'):
with tag('div'):
line('span', x)
for i, (l, row) in enumerate(zip(labels, cm.scaled)):
with tag('tr'):
line('th', l, klass='row')
for j, x in enumerate(row):
if i == j:
if x == -1:
# No values present in this row, column
color = '#FFFFFF'
elif x > 0.5:
cval = np.clip(int(round(512 * (x - 0.5))), 0, 255)
invhexcode = '{:02x}'.format(255 - cval)
color = '#{}FF00'.format(invhexcode)
else:
cval = np.clip(int(round(512 * x)), 0, 255)
hexcode = '{:02x}'.format(cval)
color = '#FF{}00'.format(hexcode)
klass = 'diagonal'
else:
cval = np.clip(int(round(255 * x)), 0, 255)
hexcode = '{:02x}'.format(cval)
invhexcode = '{:02x}'.format(255 - cval)
color = '#FF{}{}'.format(invhexcode, invhexcode)
klass = 'offdiagonal'
with tag('td', klass=klass, bgcolor=color):
raw = cm.raw[i, j]
with tag('font',
color='#000000',
title='{0:.3f}'.format(x)):
text(str(raw))
def ydump_table(doc, headings, rows, **kwargs):
"""Dump an html table using yatag
Args:
doc: yatag Doc instance
headings: [str]
rows: [[str]]
"""
doc, tag, text, line = doc.ttl()
with tag('table', **kwargs):
with tag('tr'):
for x in headings:
line('th', str(x))
for row in rows:
with tag('tr'):
for x in row:
line('td', str(x))
def ydump_attrs(doc, results):
"""dump metrics for `results` to html using yatag
Args:
doc: yatag Doc instance
results: InferenceResults instance
"""
doc, tag, text, line = doc.ttl()
def RMS(a, b):
return np.sqrt(np.square(a - b).mean())
def MAE(a, b):
return abs(a - b).mean()
# TODO: move computations out of loops for speed.
# true_mask = np.array([(x is not None) for x in results.true_attrs])
# infer_mask = np.array([(x is not None) for x in results.inferred_attrs])
true_mask = ~np.isnan(results.true_attrs)
infer_mask = ~np.isnan(results.inferred_attrs)
rows = []
for dt in np.unique(results.start_dates):
mask = true_mask & infer_mask & (results.start_dates == dt)
rows.append(
[dt, RMS(results.true_attrs[mask], results.inferred_attrs[mask]),
MAE(results.true_attrs[mask], results.inferred_attrs[mask])])
with tag('div', klass='unbreakable'):
line('h3', 'RMS Error by Date')
ydump_table(doc, ['Start Date', 'RMS Error', 'Abs Error'],
[(a.date(), '{:.2f}'.format(b), '{:.2f}'.format(c))
for (a, b, c) in rows])
logging.info(' Consolidating attributes')
consolidated = consolidate_attribute_across_dates(results)
true_mask = ~np.isnan(consolidated.true_attrs)
infer_mask = ~np.isnan(consolidated.inferred_attrs)
logging.info(' RMS Error')
with tag('div', klass='unbreakable'):
line('h3', 'Overall RMS Error')
text('{:.2f}'.format(
RMS(consolidated.true_attrs[true_mask & infer_mask],
consolidated.inferred_attrs[true_mask & infer_mask])))
logging.info(' ABS Error')
with tag('div', klass='unbreakable'):
line('h3', 'Overall Abs Error')
text('{:.2f}'.format(
MAE(consolidated.true_attrs[true_mask & infer_mask],
consolidated.inferred_attrs[true_mask & infer_mask])))
def RMS_MAE_by_label(true_attrs, pred_attrs, true_labels):
results = []
labels = sorted(set(true_labels))
for lbl in labels:
lbl_mask = np.array([(lbl == x) for x in true_labels])
mask = true_mask & infer_mask & lbl_mask
if mask.sum():
err = RMS(true_attrs[mask], pred_attrs[mask])
abs_err = MAE(true_attrs[mask], pred_attrs[mask])
count = mask.sum()
results.append(
(lbl, count, err, abs_err, true_attrs[mask].mean(),
true_attrs[mask].std()))
return results
logging.info(' Error by Label')
with tag('div', klass='unbreakable'):
line('h3', 'RMS Error by Label')
ydump_table(
doc,
['Label', 'Count', 'RMS Error', 'Abs Error', 'Mean', 'StdDev'
], # TODO: pass in length and units
[
(a, count, '{:.2f}'.format(b), '{:.2f}'.format(ab),
'{:.2f}'.format(c), '{:.2f}'.format(d))
for (a, count, b, ab, c, d) in RMS_MAE_by_label(
consolidated.true_attrs, consolidated.inferred_attrs,
consolidated.true_labels)
])
def ydump_metrics(doc, results, weights_map):
"""dump metrics for `results` to html using yatag
Args:
doc: yatag Doc instance
results: InferenceResults instance
"""
doc, tag, text, line = doc.ttl()
rows = [
(x, accuracy_score(results.true_labels, results.inferred_labels,
(results.start_dates == x)),
(results.start_dates == x).sum())
for x in np.unique(results.start_dates)
]
with tag('div', klass='unbreakable'):
line('h3', 'Accuracy by Date')
ydump_table(doc, ['Start Date', 'Count', 'Accuracy'],
[(a.date(), c, '{:.2f}'.format(b)) for (a, b, c) in rows])
consolidated = consolidate_across_dates(results)
with tag('div', klass='unbreakable'):
line('h3', 'Overall Accuracy')
text('{:.2f}'.format(
accuracy_score(consolidated.true_labels,
consolidated.inferred_labels)))
cm = confusion_matrix(consolidated)
with tag('div', klass='unbreakable'):
line('h3', 'Confusion Matrix')
ydump_confusion_matrix(doc, cm, results.label_list)
with tag('div', klass='unbreakable'):
line('h3', 'Metrics by Label')
weights = composite_weights(weights_map, results.mapping, consolidated.true_labels)
row_vals = precision_recall_f1(consolidated.label_list,
consolidated.true_labels,
consolidated.inferred_labels,
weights)
ydump_table(doc, ['Label', 'Count', 'Precision', 'Recall', 'F1-Score'], [
(a, e, '{:.2f}'.format(b), '{:.2f}'.format(c), '{:.2f}'.format(d))
for (a, b, c, d, e) in row_vals
])
# Helper functions for computing metrics
def clean_label(x):
x = x.strip()
return x.replace('_', ' ')
def precision_recall_f1(labels, y_true, y_pred, weights):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
results = []
for lbl in labels:
trues = (y_true == lbl)
positives = (y_pred == lbl)
if trues.sum() and positives.sum():
# Only return cases where there are least one vessel present in both cases
results.append(
(lbl, precision_score(trues, positives),
recall_score(trues, positives),
f1_score(trues, positives),
trues.sum()))
# Note that the micro-avereage precision/recall/F1 are the same
# as the accuracy for the vanilla case we have here. (Predictions
# in all cases, on prediction per case.)
results.append(('ALL (unweighted)', precision_score(y_true, y_pred),
recall_score(y_true, y_pred),
f1_score(y_true, y_pred),
len(y_true)))
results.append(('ALL (by prevalence)', precision_score(y_true, y_pred, sample_weights=weights),
recall_score(y_true, y_pred, sample_weights=weights),
f1_score(y_true, y_pred, sample_weights=weights),
len(y_true))
)
return results
def consolidate_across_dates(results, date_range=None):
"""Consolidate scores for each ID across available dates.
For each id, we take the scores at all available dates, sum
them and use argmax to find the predicted results.
Optionally accepts a date range, which specifies half open ranges
for the dates.
"""
inferred_ids = []
inferred_labels = []
true_labels = []
if date_range is None:
valid_date_mask = np.ones([len(results.ids)], dtype=bool)
else:
# TODO: write out end date as well, so that we avoid this hackery
end_dates = results.start_dates + datetime.timedelta(days=180)
valid_date_mask = (results.start_dates >= date_range[0]) & (
results.start_dates < date_range[1])
id_map = {}
id_indices = []
for i, m in enumerate(results.ids):
if valid_date_mask[i]:
if m not in id_map:
id_map[m] = len(inferred_ids)
inferred_ids.append(m)
true_labels.append(results.true_labels[i])
id_indices.append(id_map[m])
else:
id_indices.append(-1)
id_indices = np.array(id_indices)
scores = np.zeros([len(inferred_ids), len(results.label_list)])
counts = np.zeros([len(inferred_ids)])
for i, valid in enumerate(valid_date_mask):
if valid:
scores[id_indices[i]] += results.indexed_scores[i]
counts[id_indices[i]] += 1
inferred_labels = []
for i, s in enumerate(scores):
inferred_labels.append(results.label_list[np.argmax(s)])
if counts[i]:
scores[i] /= counts[i]
return InferenceResults(
np.array(inferred_ids), np.array(inferred_labels),
np.array(true_labels), None, scores, results.label_list)
def consolidate_attribute_across_dates(results):
"""Consolidate scores for each ID across available dates.
For each ID, we average the attribute across all available dates
"""
inferred_attributes = []
true_attributes = []
true_labels = []
indices = np.argsort(results.ids)
ids = np.unique(results.ids)
for id_ in np.unique(results.ids):
start = np.searchsorted(results.ids, id_, side='left', sorter=indices)
stop = np.searchsorted(results.ids, id_, side='right', sorter=indices)
attrs = results.inferred_attrs[indices[start:stop]]
if len(attrs):
inferred_attributes.append(attrs.mean())
else:
inferred_attributes.append(np.nan)
trues = results.true_attrs[indices[start:stop]]
has_true = ~np.isnan(trues)
if has_true.sum():
true_attributes.append(trues[has_true].mean())
else:
true_attributes.append(np.nan)
labels = results.true_labels[indices[start:stop]]
has_labels = (labels != "Unknown")
if has_labels.sum():
true_labels.append(labels[has_labels][0])
else:
true_labels.append("Unknown")
return AttributeResults(
ids, np.array(inferred_attributes), np.array(true_attributes),
np.array(true_labels), None)
def harmonic_mean(x, y):
return 2.0 * x * y / (x + y + 1e-10)
def confusion_matrix(results):
"""Compute raw and normalized confusion matrices based on results.
Args:
results: InferenceResults instance
Returns:
ConfusionMatrix instance, with raw and normalized (`scaled`)
attributes.
"""
EPS = 1e-10
cm_raw = base_confusion_matrix(results.true_labels,
results.inferred_labels, results.label_list)
# For off axis, normalize harmonic mean of row / col inverse errors.
# The idea here is that this average will go to 1 => BAD, as
# either the row error or column error approaches 1. That is, if this
# off diagonal element dominates eitehr the predicted values for this
# label OR the actual values for this label. A standard mean will only
# go to zero if it dominates both, but these can become decoupled with
# unbalanced classes.
row_totals = cm_raw.sum(axis=1, keepdims=True)
col_totals = cm_raw.sum(axis=0, keepdims=True)
inv_row_fracs = 1 - cm_raw / (row_totals + EPS)
inv_col_fracs = 1 - cm_raw / (col_totals + EPS)
cm_normalized = 1 - harmonic_mean(inv_col_fracs, inv_row_fracs)
# For on axis, use the F1-score (also a harmonic mean!)
for i in range(len(cm_raw)):
recall = cm_raw[i, i] / (row_totals[i, 0] + EPS)
precision = cm_raw[i, i] / (col_totals[0, i] + EPS)
if row_totals[i, 0] == col_totals[0, i] == 0:
cm_normalized[i, i] = -1 # Not values to compute from
else:
cm_normalized[i, i] = harmonic_mean(recall, precision)
return ConfusionMatrix(cm_raw, cm_normalized)
def load_inferred(inference_table, label_table, extractors):
"""Load inferred data and generate comparison data
"""
query = """
SELECT inference_table.* except (ssvid), ssvid as id FROM
`{}` label_table
JOIN
`{}*` inference_table
ON (cast(label_table.id as string) = inference_table.ssvid)
where split = "Test"
""".format(label_table, inference_table)
print(query)
df = pd.read_gbq(query, project_id='world-fishing-827', dialect='standard')
for row in df.itertuples():
for ext in extractors:
ext.extract(row)
for ext in extractors:
ext.finalize()
def load_class_weights(inference_table):
query = '''
with
core as (
select * from `{}*`
where max_label is not null
),
count as (
select count(*) as total from core
)
select max_label as label, count(*) / total as fraction
from core
cross join count
group by label, total
order by fraction desc
'''.format(inference_table)
df = pd.read_gbq(query, project_id='world-fishing-827', dialect='standard')
wt_map = {x.label : x.fraction for x in df.itertuples()}
return wt_map
def composite_weights(weight_map, class_map, y_true):
y_true = np.asarray(y_true)
new_weight_map = {}
for k, atomic_set in class_map.items():
new_weight_map[k] = sum([weight_map.get(atm, 0) for atm in atomic_set])
weights = np.zeros([len(y_true)])
for lbl, wt in new_weight_map.items():
try:
trues = (y_true == lbl)
except:
print(y_true)
print(lbl)
raise
if trues.sum():
weights[trues] = wt / trues.sum()
return weights / weights.sum()
def rescale_scores(scores, T):
keys = list(scores)
logits = [np.log(scores[k] + 1e-100) for k in keys]
new_scores = [np.exp(l / T) for l in logits]
total = sum(new_scores)
return {k: s / total for (k, s) in zip(keys, new_scores)}
class ClassificationExtractor(InferenceResults):
# Conceptually an InferenceResult
# TODO: fix to make true subclass or return true inference result at finalization time or something.
def __init__(self, label_map, T):
self.label_map = label_map
self.T = T
#
self.all_ids = []
self.all_inferred_labels = []
self.all_true_labels = []
self.all_start_dates = []
self.all_scores = []
#
self.ids = []
self.inferred_labels = []
self.true_labels = []
self.start_dates = []
self.scores = []
#
self.all_labels = set(label_map.values())
def extract(self, row):
id_ = row.id
lbl = self.label_map.get(id_)
raw_label_scores = {x['label'] : x['score'] for x in row.label_scores}
label_scores = rescale_scores(raw_label_scores, self.T)
self.all_labels |= set(label_scores.keys())
start_date = row.start_time
# TODO: write out TZINFO in inference
if start_date.tzinfo is None:
start_date = start_date.replace(tzinfo=pytz.utc)
inferred = row.max_label
# Every row that has inference values get stored in all_
self.all_ids.append(id_)
self.all_start_dates.append(start_date)
self.all_true_labels.append(lbl)
self.all_inferred_labels.append(inferred)
self.all_scores.append(label_scores)
# Only values that have a known component get stored in the not all_ arrays
if lbl is not None and not (isinstance(lbl, float) and np.isnan(lbl)):
self.ids.append(id_)
self.start_dates.append(start_date)
self.true_labels.append(lbl)
self.inferred_labels.append(inferred)
self.scores.append(label_scores)
def finalize(self):
self.inferred_labels = np.array(self.inferred_labels)
self.true_labels = np.array(self.true_labels)
self.start_dates = np.array(self.start_dates)
self.scores = np.array(self.scores)
self.label_list = sorted(
self.all_labels, key=VESSEL_CLASS_DETAILED_NAMES.index)
if len(self.true_labels) == 0:
raise ValueError('no true labels')
self.ids = np.array(self.ids)
for lbl in self.label_list:
true_count = (self.true_labels == lbl).sum()
inf_count = (self.inferred_labels == lbl).sum()
logging.info("%s true and %s inferred labels for %s", true_count,
inf_count, lbl)
def __nonzero__(self):
return len(self.ids) > 0
class AttributeExtractor(object):
def __init__(self, key, attr_map, label_map):
self.key = key
self.attr_map = attr_map
self.label_map = label_map
self.ids = []
self.inferred_attrs = []
self.true_attrs = []
self.true_labels = []
self.start_dates = []
def extract(self, row):
id_ = row.id
if getattr(row, self.key) is None:
return
self.ids.append(id_)
self.start_dates.append(row.start_time)
self.true_attrs.append(
float(self.attr_map[id_]) if (id_ in self.attr_map) else np.nan)
self.true_labels.append(self.label_map.get(id_, 'Unknown'))
self.inferred_attrs.append(getattr(row, self.key))
def finalize(self):
self.inferred_attrs = np.array(self.inferred_attrs)
self.true_attrs = np.array(self.true_attrs)
self.start_dates =
|
np.array(self.start_dates)
|
numpy.array
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import tempfile
import numpy as np
import tables
from astropy.table import Table
from .. import agasc
def test_multi_agasc():
tempdir = tempfile.mkdtemp()
# Make two custom agasc files from the miniagasc, using 20 stars from
# around the middle of the table
with tables.open_file(agasc.default_agasc_file()) as h5:
middle = int(len(h5.root.data) // 2)
stars1 = Table(h5.root.data[middle: middle + 20])
stars1.write(os.path.join(tempdir, 'stars1.h5'), path='data')
stars2 = Table(h5.root.data[middle + 20:middle + 60])
stars2.write(os.path.join(tempdir, 'stars2.h5'), path='data')
# Fetch all the stars from a custom agasc and make sure we have the right number of stars
# with no errors
all_stars2 = agasc.get_agasc_cone(0, 90, radius=180,
agasc_file=os.path.join(tempdir, 'stars2.h5'))
assert len(all_stars2) == len(stars2)
# Fetch all the stars from the other custom agasc and do the same. The point of the two files
# is to confirm that the caching behavior in agasc doesn't cause problems with fetches
all_stars1 = agasc.get_agasc_cone(0, 90, radius=180,
agasc_file=os.path.join(tempdir, 'stars1.h5'))
assert len(all_stars1) == len(stars1)
# Do a position filtered search using the first star in the table as a reference and make sure
# we get the same star from the reference agasc. Do this with the stars2 file as this confirms
# that we can switch back and forth between files and get the correct content.
cone2 = agasc.get_agasc_cone(all_stars2['RA'][0], all_stars2['DEC'][0], radius=0.000001,
agasc_file=os.path.join(tempdir, 'stars2.h5'))
# And this is a read of the default agasc file after the custom ones so should confirm that
# the custom files didn't break that access.
cone2_full = agasc.get_agasc_cone(all_stars2['RA'][0], all_stars2['DEC'][0], radius=0.000001)
assert cone2[0]['AGASC_ID'] == cone2_full[0]['AGASC_ID']
# Confirm that there is just one star in this test setup (not a module test, but confirms test
# setup is as intended).
assert len(cone2_full) == 1
assert len(cone2) == len(cone2_full)
def test_update_color1_func():
"""
Test code to update the COLOR1 column.
"""
color1 = [1.0, 1.0, 1.5, 1.5, 1.5, 1.5]
color2 =
|
np.array([1.0, 1.0, 1.5, 1.5, 1.75, 2.0])
|
numpy.array
|
"""
@brief test log(time=2s)
"""
import unittest
from logging import getLogger
from contextlib import redirect_stdout
from io import StringIO
import numpy
from scipy.sparse import coo_matrix, csr_matrix, SparseEfficiencyWarning
from scipy.special import ( # pylint: disable=E0611
expit as logistic_sigmoid,
erf)
from scipy.spatial.distance import cdist
from onnx import TensorProto
from onnx.helper import make_sparse_tensor, make_tensor
from onnx.defs import onnx_opset_version
from pyquickhelper.pycode import ExtTestCase
from sklearn.utils.extmath import softmax
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxAbs, OnnxAdd, OnnxAnd, OnnxArgMax, OnnxArgMin, OnnxAtan,
OnnxBatchNormalization,
OnnxConcat, OnnxConv, OnnxConvTranspose,
OnnxCeil, OnnxClip,
OnnxConstant, OnnxConstant_9, OnnxConstant_11,
OnnxConstantOfShape,
OnnxDequantizeLinear,
OnnxDiv,
OnnxEinsum, OnnxEqual, OnnxErf, OnnxExp, OnnxEyeLike,
OnnxFlatten, OnnxFloor,
OnnxGreater, OnnxGreaterOrEqual, OnnxGemm, OnnxGlobalAveragePool,
OnnxIdentity, OnnxIsNaN,
OnnxLog, OnnxLpNormalization,
OnnxMatMul, OnnxMax, OnnxMean, OnnxMin, OnnxMul,
OnnxNeg, OnnxNot,
OnnxOr,
OnnxPow,
OnnxQuantizeLinear,
OnnxReciprocal,
OnnxReduceLogSumExp, OnnxReduceMax, OnnxReduceMean, OnnxReduceMin,
OnnxReduceProd, OnnxReduceSum, OnnxReduceSumSquare,
OnnxRelu, OnnxReshape,
OnnxShape, OnnxSlice, OnnxSigmoid, OnnxSign, OnnxSin,
OnnxSoftmax, OnnxSqueeze, OnnxSplit,
OnnxSqrt, OnnxSub, OnnxSum,
OnnxTopK, OnnxTranspose,
OnnxUnsqueeze,
)
try:
from skl2onnx.algebra.onnx_ops import OnnxCelu
except ImportError:
OnnxCelu = None
from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, DoubleTensorType
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt import OnnxInference
from mlprodict.tools.asv_options_helper import (
get_opset_number_from_onnx, get_ir_version_from_onnx)
from mlprodict.onnxrt.validate.validate_python import validate_python_inference
from mlprodict.onnxrt.ops_cpu.op_batch_normalization import _batchnorm_test_mode
from mlprodict.onnxrt.ops_cpu.op_global_average_pool import _global_average_pool
from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611
topk_element_min_double, topk_element_max_double, topk_element_fetch_double,
topk_element_min_float, topk_element_max_float, topk_element_fetch_float,
topk_element_min_int64, topk_element_max_int64, topk_element_fetch_int64)
from mlprodict.onnxrt.ops_cpu.op_celu import _vcelu1, pycelu
from mlprodict.onnxrt.ops_cpu.op_topk import topk_sorted_implementation
sparse_support = []
sparse_no_numpy = []
python_tested = []
def make_coo_matrix(*args, **kwargs):
coo = coo_matrix(*args, **kwargs)
coo.row = coo.row.astype(numpy.int64)
coo.col = coo.col.astype(numpy.int64)
return coo
class TestOnnxrtPythonRuntime(ExtTestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if __name__ == "__main__":
import pprint
print('-----------')
pprint.pprint(sparse_support)
print('-----------')
pprint.pprint(sparse_no_numpy)
print('-----------')
pprint.pprint(
list(sorted({_.__name__ for _ in python_tested})))
print('-----------')
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
@ignore_warnings(category=(RuntimeWarning, DeprecationWarning,
SparseEfficiencyWarning, PendingDeprecationWarning))
def common_test_onnxt_runtime_unary(self, onnx_cl, np_fct,
op_version=None,
outputs=None, debug=False):
if op_version is None:
op_version = get_opset_number_from_onnx()
try:
onx = onnx_cl('X', output_names=['Y'], op_version=op_version)
except RuntimeError as e:
raise RuntimeError('onnx.opset={} op_version={}'.format(
get_opset_number_from_onnx(), op_version)) from e
X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64)
model_def = onx.to_onnx(
{'X': X.astype(numpy.float32)}, target_opset=op_version,
outputs=outputs)
if debug:
print(model_def)
# python code
python_tested.append(onnx_cl)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': X.astype(numpy.float32)})
# no inplace
oinf = OnnxInference(model_def, inplace=False)
all_names = "\n".join(
"%s>=v%d" % (op.ops_.__class__.__name__,
op.ops_._schema.since_version) # pylint: disable=W0212
for op in oinf.sequence_)
if debug:
got = oinf.run({'X': X}, verbose=1, fLOG=print)
else:
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
try:
self.assertEqualArray(np_fct(X), got['Y'], decimal=6)
except AssertionError as e:
raise AssertionError(
'onnx.opset={} op_version={}\n--ONNX--\n{}\n--NAMES--\n{}'.format(
get_opset_number_from_onnx(), op_version, model_def,
all_names)) from e
# inplace
oinf = OnnxInference(model_def, input_inplace=False, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(np_fct(X), got['Y'], decimal=6)
# inplace2
onx2 = OnnxIdentity(
onnx_cl('X', op_version=op_version),
output_names=['Y'], op_version=op_version)
model_def2 = onx2.to_onnx(
{'X': X.astype(numpy.float32)}, target_opset=op_version,
outputs=outputs)
oinf = OnnxInference(model_def2, input_inplace=False, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(np_fct(X), got['Y'], decimal=6)
# input inplace
expe = np_fct(X)
oinf = OnnxInference(model_def, input_inplace=True, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(expe, got['Y'], decimal=6)
# sparse
row = numpy.array([0, 0, 1, 3, 1])
col =
|
numpy.array([0, 2, 1, 3, 1])
|
numpy.array
|
"""
DESHIMA Model signal_transmitter
Transmits a signal through all components of the ASTE telescope,
which corresponds to transmitting a signal through multiple objects in the model
"""
#import time
import math
from joblib import Parallel, delayed
import numpy as np
from pathlib import Path
import galspec
import gc
from .DESHIMA.desim import minidesim as dsm
from .Atmosphere import use_aris
from .Telescope import telescope_transmission as tt
from .DESHIMA import use_desim
from .DESHIMA.MKID import photon_noise as pn
# import DESHIMA.desim.minidesim as dsm
# import Atmosphere.use_aris as use_aris
# import Telescope.telescope_transmission as tt
# import DESHIMA.use_desim as use_desim
# import DESHIMA.MKID.photon_noise as pn
def unwrap_processInput_vec(st1, i, aris_instance, use_desim_instance, time_step, count):
"""
Wrapper function for processInput, in order to avoid bugs with joblib.parallel
"""
if st1.vecmode: st1.EL = st1.EL_vec[i]
output = st1.processInput(i=i, aris_instance=aris_instance, use_desim_instance=use_desim_instance, time_step=time_step, count=count)
gc.collect()
return output
def unwrap_processInput(st1, i, aris_instance, use_desim_instance, time_step, count):
"""
Wrapper function for processInput, in order to avoid bugs with joblib.parallel
"""
output = st1.processInput(i=i, aris_instance=aris_instance, use_desim_instance=use_desim_instance, time_step=time_step, count=count)
del st1
gc.collect()
return output
class signal_transmitter(object):
"Class that transmits the signal through all components of the model"
sampling_rate = 160
def __init__(self, input):
self.input = input
self.F_min = input['F_min']
self.num_bins = input['num_bins']
self.spec_res = input['spec_res']
self.f_spacing = input['f_spacing']
self.F0 = input['F_min']
self.time = input['time']
self.num_filters = input['num_filters']
self.windspeed = input['windspeed']
self.prefix_atm_data = input['prefix_atm_data']
self.grid = input['grid']
self.x_length_strip = input['x_length_strip']
self.beam_radius = input['beam_radius']
self.useDESIM = input['useDESIM']
self.inclAtmosphere = input['inclAtmosphere']
self.galaxy_on = input['galaxy_on']
self.luminosity = input['luminosity']
self.redshift = input['redshift']
self.linewidth = input['linewidth']
self.EL = input['EL']
self.EL_vec = input['EL_vec']
if type(self.EL_vec) == type(None): self.vecmode = False
else: self.vecmode = True
self.max_num_strips = input['max_num_strips']
self.save_name_data = input['save_name_data']
self.pwv_0 = input['pwv_0']
self.D1 = input['D1']
self.n_jobs = input['n_jobs']
self.path_model = Path(__file__).parent
self.save_path = input['savefolder']
if Path.exists(self.save_path) == False:
self.save_path.mkdir(parents = True)
self.sourcepath = input['sourcefolder']
self.F_max = self.F_min * (1 + 1/self.f_spacing)**(self.num_filters - 1)
F = np.logspace(
|
np.log10(self.F_min)
|
numpy.log10
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from builtins import str
import datetime
from astropy.io import fits
import numpy as np
from .._version import get_versions
version = get_versions()["version"]
del get_versions
def read_table(file, format):
"""
Read data from a txt file (sorted by columns), the type of data
(string, integer or float) MUST be given in "format".
This routine will ONLY read the columns for which "format" is defined.
E.g. for a txt file with 7 data columns, using format=["f", "f", "s"] will only read the 3 first columns.
Parameters
----------
file:
txt file to be read
format:
List with the format of each column of the data, using:\n
"i" for a integer\n
"f" for a float\n
"s" for a string (text)
Example
-------
>>> the_center,the_fnl,the_name = read_table("lineas_c89_python.dat", ["f", "f", "s"] )
"""
data_len = len(format)
data = [[] for x in range(data_len)]
for i in range(0, data_len):
if format[i] == "i":
data[i] = np.loadtxt(
file, skiprows=0, unpack=True, usecols=[i], dtype=int
)
if format[i] == "s":
data[i] = np.loadtxt(
file, skiprows=0, unpack=True, usecols=[i], dtype=str
)
if format[i] == "f":
data[i] = np.loadtxt(
file, skiprows=0, unpack=True, usecols=[i], dtype=float
)
return data
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def array_to_text_file(data, filename="array.dat"):
"""
Write array into a text file.
Parameters
----------
data: float
flux per wavelength
filename: string (default = "array.dat")
name of the text file where the data will be written.
Example
-------
>>> array_to_text_file(data, filename="data.dat" )
"""
f = open(filename, "w")
for i in range(len(data)):
escribe = np.str(data[i]) + " \n"
f.write(escribe)
f.close()
print("\n> Array saved in text file", filename, " !!")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def spectrum_to_text_file(wavelength, flux, filename="spectrum.txt"):
"""
Write given 1D spectrum into a text file.
Parameters
----------
wavelength: float
wavelength.
flux: float
flux per wavelength
filename: string (default = "spectrum.txt")
name of the text file where the data will be written.
Example
-------
>>> spectrum_to_text_file(wavelength, spectrum, filename="fantastic_spectrum.txt" )
"""
f = open(filename, "w")
for i in range(len(wavelength)):
escribe = np.str(wavelength[i]) + " " + np.str(flux[i]) + " \n"
f.write(escribe)
f.close()
print("\n> Spectrum saved in text file", filename, " !!")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def spectrum_to_fits_file(
wavelength,
flux,
filename="spectrum.fits",
name="spectrum",
exptime=1,
CRVAL1_CDELT1_CRPIX1=[0, 0, 0],
):
"""
Routine to save a given 1D spectrum into a fits file.
If CRVAL1_CDELT1_CRPIX1 it not given, it assumes a LINEAR dispersion,
with Delta_pix = (wavelength[-1]-wavelength[0])/(len(wavelength)-1).
Parameters
----------
wavelength: float
wavelength.
flux: float
flux per wavelength
filename: string (default = "spectrum.fits")
name of the fits file where the data will be written.
Example
-------
>>> spectrum_to_fits_file(wavelength, spectrum, filename="fantastic_spectrum.fits",
exptime=600,name="POX 4")
"""
hdu = fits.PrimaryHDU()
hdu.data = flux
hdu.header["ORIGIN"] = "Data from KOALA Python scripts"
# Wavelength calibration
hdu.header["NAXIS"] = 1
hdu.header["NAXIS1"] = len(wavelength)
hdu.header["CTYPE1"] = "Wavelength"
hdu.header["CUNIT1"] = "Angstroms"
if CRVAL1_CDELT1_CRPIX1[0] == 0:
hdu.header["CRVAL1"] = wavelength[0]
hdu.header["CRPIX1"] = 1.0
hdu.header["CDELT1"] = (wavelength[-1] - wavelength[0]) / (len(wavelength) - 1)
else:
hdu.header["CRVAL1"] = CRVAL1_CDELT1_CRPIX1[
0
] # 7.692370611909E+03 / Co-ordinate value of axis 1
hdu.header["CDELT1"] = CRVAL1_CDELT1_CRPIX1[1] # 1.575182431607E+00
hdu.header["CRPIX1"] = CRVAL1_CDELT1_CRPIX1[
2
] # 1024. / Reference pixel along axis 1
# Extra info
hdu.header["OBJECT"] = name
hdu.header["TOTALEXP"] = exptime
hdu.header["HISTORY"] = "Spectrum derived using the KOALA Python pipeline"
hdu.header[
"HISTORY"
] = "Developed by <NAME>, <NAME>, <NAME> et al."
hdu.header["HISTORY"] = version
now = datetime.datetime.now()
hdu.header["HISTORY"] = now.strftime("Created on %d %b %Y, %H:%M:%S")
hdu.header["DATE"] = now.strftime(
"%Y-%m-%dT%H:%M:%S"
) # '2002-09-16T18:52:44' # /Date of FITS file creation
hdu.writeto(filename, overwrite=True)
print("\n> Spectrum saved in fits file", filename, " !!")
if name == "spectrum":
print(" No name given to the spectrum, named 'spectrum'.")
if exptime == 1:
print(" No exposition time given, assumed exptime = 1")
if CRVAL1_CDELT1_CRPIX1[0] == 0:
print(
" CRVAL1_CDELT1_CRPIX1 values not given, using ",
wavelength[0],
"1",
(wavelength[-1] - wavelength[0]) / (len(wavelength) - 1)
)
def save_bluered_fits_file(
blue_cube,
red_cube,
fits_file,
fcalb=[0],
fcalr=[0],
ADR=False,
objeto="",
description="",
trimb=[0],
trimr=[0],
):
"""
Routine combine blue + red files and save result in a fits file fits file
Parameters
----------
Combined cube:
Combined cube
Header:
Header
"""
# Prepare the red+blue datacube
print("\n> Combining blue + red datacubes...")
if trimb[0] == 0:
lb = blue_cube.wavelength
b = blue_cube.data
else:
print(" Trimming blue cube in range [{},{}]".format(trimb[0], trimb[1]))
index_min = np.searchsorted(blue_cube.wavelength, trimb[0])
index_max = np.searchsorted(blue_cube.wavelength, trimb[1]) + 1
lb = blue_cube.wavelength[index_min:index_max]
b = blue_cube.data[index_min:index_max]
fcalb = fcalb[index_min:index_max]
if trimr[0] == 0:
lr = red_cube.wavelength
r = red_cube.data
else:
print(" Trimming red cube in range [{},{}]".format(trimr[0], trimr[1]))
index_min = np.searchsorted(red_cube.wavelength, trimr[0])
index_max = np.searchsorted(red_cube.wavelength, trimr[1]) + 1
lr = red_cube.wavelength[index_min:index_max]
r = red_cube.data[index_min:index_max]
fcalr = fcalr[index_min:index_max]
l = np.concatenate((lb, lr), axis=0)
blue_red_datacube = np.concatenate((b, r), axis=0)
if fcalb[0] == 0:
print(" No absolute flux calibration included")
else:
flux_calibration = np.concatenate((fcalb, fcalr), axis=0)
if objeto == "":
description = "UNKNOWN OBJECT"
fits_image_hdu = fits.PrimaryHDU(blue_red_datacube)
# errors = combined_cube.data*0 ### TO BE DONE
# error_hdu = fits.ImageHDU(errors)
wavelengths_hdu = fits.ImageHDU(l)
fits_image_hdu.header["ORIGIN"] = "Combined datacube from KOALA Python scripts"
fits_image_hdu.header["BITPIX"] = 16
fits_image_hdu.header["NAXIS"] = 3
fits_image_hdu.header["NAXIS1"] = len(l)
fits_image_hdu.header["NAXIS2"] = blue_red_datacube.shape[1] # CHECK !!!!!!!
fits_image_hdu.header["NAXIS2"] = blue_red_datacube.shape[2]
fits_image_hdu.header["OBJECT"] = objeto
fits_image_hdu.header["RAcen"] = blue_cube.RA_centre_deg
fits_image_hdu.header["DECcen"] = blue_cube.DEC_centre_deg
fits_image_hdu.header["PIXsize"] = blue_cube.pixel_size_arcsec
fits_image_hdu.header["Ncols"] = blue_cube.data.shape[2]
fits_image_hdu.header["Nrows"] = blue_cube.data.shape[1]
fits_image_hdu.header["PA"] = blue_cube.PA
# fits_image_hdu.header["CTYPE1"] = 'LINEAR '
# fits_image_hdu.header["CRVAL1"] = wavelength[0]
# fits_image_hdu.header["CRPIX1"] = 1.
# fits_image_hdu.header["CDELT1"] = (wavelength[-1]-wavelength[0])/len(wavelength)
# fits_image_hdu.header["CD1_1"] = (wavelength[-1]-wavelength[0])/len(wavelength)
# fits_image_hdu.header["LTM1_1"] = 1.
fits_image_hdu.header[
"COFILES"
] = blue_cube.number_of_combined_files # Number of combined files
fits_image_hdu.header["OFFSETS"] = blue_cube.offsets_files # Offsets
fits_image_hdu.header["ADRCOR"] = np.str(ADR)
if fcalb[0] == 0:
fits_image_hdu.header["FCAL"] = "False"
flux_correction_hdu = fits.ImageHDU(0 * l)
else:
flux_correction = flux_calibration
flux_correction_hdu = fits.ImageHDU(flux_correction)
fits_image_hdu.header["FCAL"] = "True"
if description == "":
description = flux_calibration.description
fits_image_hdu.header["DESCRIP"] = description
# hdu_list = fits.HDUList([fits_image_hdu, error_hdu])
hdu_list = fits.HDUList([fits_image_hdu, wavelengths_hdu, flux_correction_hdu])
hdu_list.writeto(fits_file, overwrite=True)
print("\n> Combined datacube saved to file ", fits_file)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def save_fits_file(combined_cube, fits_file, description="", ADR=False): # fcal=[0],
"""
Routine to save a fits file
Parameters
----------
Combined cube:
Combined cube
Header:
Header
"""
fits_image_hdu = fits.PrimaryHDU(combined_cube.data)
# errors = combined_cube.data*0 ### TO BE DONE
# error_hdu = fits.ImageHDU(errors)
# wavelength = combined_cube.wavelength
fits_image_hdu.header["HISTORY"] = "Combined datacube from KOALA Python pipeline"
fits_image_hdu.header[
"HISTORY"
] = "Developed by <NAME>, <NAME>, <NAME> et al."
fits_image_hdu.header["HISTORY"] = version # 'Version 0.10 - 12th February 2019'
now = datetime.datetime.now()
fits_image_hdu.header["HISTORY"] = now.strftime("Created on %d %b %Y, %H:%M:%S")
fits_image_hdu.header["DATE"] = now.strftime(
"%Y-%m-%dT%H:%M:%S"
) # '2002-09-16T18:52:44' # /Date of FITS file creation
fits_image_hdu.header["BITPIX"] = 16
fits_image_hdu.header["ORIGIN"] = "AAO" # / Originating Institution
fits_image_hdu.header["TELESCOP"] = "Anglo-Australian Telescope" # / Telescope Name
fits_image_hdu.header["ALT_OBS"] = 1164 # / Altitude of observatory in metres
fits_image_hdu.header["LAT_OBS"] = -31.27704 # / Observatory latitude in degrees
fits_image_hdu.header["LONG_OBS"] = 149.0661 # / Observatory longitude in degrees
fits_image_hdu.header["INSTRUME"] = "AAOMEGA-KOALA" # / Instrument in use
fits_image_hdu.header["GRATID"] = combined_cube.RSS.grating # / Disperser ID
if combined_cube.RSS.grating == "385R":
SPECTID = "RD"
if combined_cube.RSS.grating == "580V":
SPECTID = "BD"
if combined_cube.RSS.grating == "1000R":
SPECTID = "RD"
if combined_cube.RSS.grating == "1000I":
SPECTID = "RD"
fits_image_hdu.header["SPECTID"] = SPECTID # / Spectrograph ID
fits_image_hdu.header[
"DICHROIC"
] = "X5700" # / Dichroic name ---> CHANGE if using X6700!!
fits_image_hdu.header["OBJECT"] = combined_cube.object
fits_image_hdu.header["TOTALEXP"] = combined_cube.total_exptime
fits_image_hdu.header["NAXIS"] = 3 # / number of array dimensions
fits_image_hdu.header["NAXIS1"] = combined_cube.data.shape[1] # CHECK !!!!!!!
fits_image_hdu.header["NAXIS2"] = combined_cube.data.shape[2]
fits_image_hdu.header["NAXIS3"] = combined_cube.data.shape[0]
# WCS
fits_image_hdu.header["RADECSYS"] = "FK5" # / FK5 reference system
fits_image_hdu.header["EQUINOX"] = 2000 # / [yr] Equinox of equatorial coordinates
fits_image_hdu.header["WCSAXES"] = 3 # / Number of coordinate axes
fits_image_hdu.header["CRPIX1"] = (
combined_cube.data.shape[1] / 2.0
) # / Pixel coordinate of reference point
fits_image_hdu.header["CDELT1"] = (
-combined_cube.pixel_size_arcsec / 3600.0
) # / Coordinate increment at reference point
fits_image_hdu.header[
"CTYPE1"
] = "RA--TAN" # 'DEGREE' # / Coordinate type code
fits_image_hdu.header[
"CRVAL1"
] = combined_cube.RA_centre_deg # / Coordinate value at reference point
fits_image_hdu.header["CRPIX2"] = (
combined_cube.data.shape[2] / 2.0
) # / Pixel coordinate of reference point
fits_image_hdu.header["CDELT2"] = (
combined_cube.pixel_size_arcsec / 3600.0
) # Coordinate increment at reference point
fits_image_hdu.header[
"CTYPE2"
] = "DEC--TAN" # 'DEGREE' # / Coordinate type code
fits_image_hdu.header[
"CRVAL2"
] = combined_cube.DEC_centre_deg # / Coordinate value at reference point
fits_image_hdu.header["RAcen"] = combined_cube.RA_centre_deg
fits_image_hdu.header["DECcen"] = combined_cube.DEC_centre_deg
fits_image_hdu.header["PIXsize"] = combined_cube.pixel_size_arcsec
fits_image_hdu.header["Ncols"] = combined_cube.data.shape[2]
fits_image_hdu.header["Nrows"] = combined_cube.data.shape[1]
fits_image_hdu.header["PA"] = combined_cube.PA
# Wavelength calibration
fits_image_hdu.header["CTYPE3"] = "Wavelength" # / Label for axis 3
fits_image_hdu.header["CUNIT3"] = "Angstroms" # / Units for axis 3
fits_image_hdu.header["CRVAL3"] = combined_cube.CRVAL1_CDELT1_CRPIX1[
0
] # 7.692370611909E+03 / Co-ordinate value of axis 3
fits_image_hdu.header["CDELT3"] = combined_cube.CRVAL1_CDELT1_CRPIX1[
1
] # 1.575182431607E+00
fits_image_hdu.header["CRPIX3"] = combined_cube.CRVAL1_CDELT1_CRPIX1[
2
] # 1024. / Reference pixel along axis 3
fits_image_hdu.header["COFILES"] = (
len(combined_cube.offsets_files) + 1
) # Number of combined files
offsets_text = " "
for i in range(len(combined_cube.offsets_files)):
if i != 0:
offsets_text = offsets_text + " , "
offsets_text = (
offsets_text
+ np.str(np.around(combined_cube.offsets_files[i][0], 3))
+ " "
+ np.str(np.around(combined_cube.offsets_files[i][1], 3))
)
fits_image_hdu.header["OFFSETS"] = offsets_text # Offsets
fits_image_hdu.header["ADRCOR"] = np.str(ADR)
if np.nanmedian(combined_cube.data) > 1:
fits_image_hdu.header["FCAL"] = "False"
fits_image_hdu.header["F_UNITS"] = "Counts"
# flux_correction_hdu = fits.ImageHDU(0*wavelength)
else:
# flux_correction = fcal
# flux_correction_hdu = fits.ImageHDU(flux_correction)
fits_image_hdu.header["FCAL"] = "True"
fits_image_hdu.header["F_UNITS"] = "erg s-1 cm-2 A-1"
if description == "":
description = combined_cube.description
fits_image_hdu.header["DESCRIP"] = description
for file in range(len(combined_cube.rss_list)):
fits_image_hdu.header["HISTORY"] = (
"RSS file " + np.str(file + 1) + ":" + combined_cube.rss_list[file]
)
# hdu_list = fits.HDUList([fits_image_hdu, error_hdu])
# hdu_list = fits.HDUList([fits_image_hdu, wavelengths_hdu, flux_correction_hdu])
hdu_list = fits.HDUList([fits_image_hdu]) # , flux_correction_hdu])
hdu_list.writeto(fits_file, overwrite=True)
print("\n> Combined datacube saved to file:", fits_file)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def save_rss_fits(
rss, data=[[0], [0]], fits_file="RSS_rss.fits", description=""
): # fcal=[0], # TASK_save_rss_fits
"""
Routine to save RSS data as fits
Parameters
----------
rss is the rss
description = if you want to add a description
"""
if np.nanmedian(data[0]) == 0:
data = rss.intensity_corrected
print("\n> Using rss.intensity_corrected of given RSS file to create fits file...")
else:
if len(np.array(data).shape) != 2:
print("\n> The data provided are NOT valid, as they have a shape", data.shape)
print(" Using rss.intensity_corrected instead to create a RSS fits file !")
data = rss.intensity_corrected
else:
print("\n> Using the data provided + structure of given RSS file to create fits file...")
fits_image_hdu = fits.PrimaryHDU(data)
fits_image_hdu.header["HISTORY"] = "RSS from KOALA Python pipeline"
fits_image_hdu.header[
"HISTORY"
] = "Developed by <NAME>, <NAME>, <NAME> et al."
fits_image_hdu.header["HISTORY"] = version # 'Version 0.10 - 12th February 2019'
now = datetime.datetime.now()
fits_image_hdu.header["HISTORY"] = now.strftime("Created on %d %b %Y, %H:%M:%S")
fits_image_hdu.header["DATE"] = now.strftime(
"%Y-%m-%dT%H:%M:%S"
) # '2002-09-16T18:52:44' # /Date of FITS file creation
fits_image_hdu.header["BITPIX"] = 16
fits_image_hdu.header["ORIGIN"] = "AAO" # / Originating Institution
fits_image_hdu.header["TELESCOP"] = "Anglo-Australian Telescope" # / Telescope Name
fits_image_hdu.header["ALT_OBS"] = 1164 # / Altitude of observatory in metres
fits_image_hdu.header["LAT_OBS"] = -31.27704 # / Observatory latitude in degrees
fits_image_hdu.header["LONG_OBS"] = 149.0661 # / Observatory longitude in degrees
fits_image_hdu.header["INSTRUME"] = "AAOMEGA-KOALA" # / Instrument in use
fits_image_hdu.header["GRATID"] = rss.grating # / Disperser ID
if rss.grating == "385R":
SPECTID = "RD"
if rss.grating == "580V":
SPECTID = "BD"
if rss.grating == "1000R":
SPECTID = "RD"
if rss.grating == "1000I":
SPECTID = "RD"
fits_image_hdu.header["SPECTID"] = SPECTID # / Spectrograph ID
fits_image_hdu.header[
"DICHROIC"
] = "X5700" # / Dichroic name ---> CHANGE if using X6700!!
fits_image_hdu.header["OBJECT"] = rss.object
fits_image_hdu.header["EXPOSED"] = rss.exptime
fits_image_hdu.header["ZDSTART"] = rss.ZDSTART
fits_image_hdu.header["ZDEND"] = rss.ZDEND
fits_image_hdu.header["NAXIS"] = 2 # / number of array dimensions
fits_image_hdu.header["NAXIS1"] = rss.intensity_corrected.shape[0]
fits_image_hdu.header["NAXIS2"] = rss.intensity_corrected.shape[1]
fits_image_hdu.header["RAcen"] = rss.RA_centre_deg
fits_image_hdu.header["DECcen"] = rss.DEC_centre_deg
fits_image_hdu.header["TEL_PA"] = rss.PA
fits_image_hdu.header["CTYPE2"] = "Fibre number" # / Label for axis 2
fits_image_hdu.header["CUNIT2"] = " " # / Units for axis 2
fits_image_hdu.header["CTYPE1"] = "Wavelength" # / Label for axis 2
fits_image_hdu.header["CUNIT1"] = "Angstroms" # / Units for axis 2
fits_image_hdu.header["CRVAL1"] = rss.CRVAL1_CDELT1_CRPIX1[
0
] # / Co-ordinate value of axis 2
fits_image_hdu.header["CDELT1"] = rss.CRVAL1_CDELT1_CRPIX1[1] #
fits_image_hdu.header["CRPIX1"] = rss.CRVAL1_CDELT1_CRPIX1[
2
] # 1024. / Reference pixel along axis 2
fits_image_hdu.header[
"CRVAL2"
] = 5.000000000000e-01 # / Co-ordinate value of axis 2
fits_image_hdu.header[
"CDELT2"
] = 1.000000000000e00 # / Co-ordinate increment along axis 2
fits_image_hdu.header[
"CRPIX2"
] = 1.000000000000e00 # / Reference pixel along axis 2
if description == "":
description = rss.description
fits_image_hdu.header["DESCRIP"] = description
# TO BE DONE
errors = [0] # TO BE DONE
error_hdu = fits.ImageHDU(errors)
# Header 2 with the RA and DEC info!
header2_all_fibres = rss.header2_data
header2_good_fibre = []
header2_original_fibre = []
header2_new_fibre = []
header2_delta_RA = []
header2_delta_DEC = []
header2_2048 = []
header2_0 = []
fibre = 1
for i in range(len(header2_all_fibres)):
if header2_all_fibres[i][1] == 1:
header2_original_fibre.append(i + 1)
header2_new_fibre.append(fibre)
header2_good_fibre.append(1)
header2_delta_RA.append(header2_all_fibres[i][5])
header2_delta_DEC.append(header2_all_fibres[i][6])
header2_2048.append(2048)
header2_0.append(0)
fibre = fibre + 1
# header2_=[header2_new_fibre, header2_good_fibre, header2_good_fibre, header2_2048, header2_0, header2_delta_RA, header2_delta_DEC, header2_original_fibre]
# header2 = np.array(header2_).T.tolist()
# header2_hdu = fits.ImageHDU(header2)
col1 = fits.Column(name="Fibre", format="I", array=np.array(header2_new_fibre))
col2 = fits.Column(name="Status", format="I", array=np.array(header2_good_fibre))
col3 = fits.Column(name="Ones", format="I", array=np.array(header2_good_fibre))
col4 = fits.Column(name="Wavelengths", format="I", array=np.array(header2_2048))
col5 = fits.Column(name="Zeros", format="I", array=np.array(header2_0))
col6 = fits.Column(name="Delta_RA", format="D", array=np.array(header2_delta_RA))
col7 = fits.Column(name="Delta_Dec", format="D", array=
|
np.array(header2_delta_DEC)
|
numpy.array
|
# -*- coding: utf-8 -*-
import linecache
import numpy as np
import tensorflow as tf
import time
import pdb
class DataSet(object):
"""A data generator.
"""
def __init__(self,filename,batch_size=None,shuffle=True):
self.filename = filename
self.total_num = self._count_data_len(filename)
self.raw_data = linecache.getlines(filename)
self.all_data = None
if batch_size is not None:
self.batch_gen = self._batch_generator(batch_size,shuffle)
def get_batch(self):
return self.batch_gen.__next__()
def get_all_data(self):
if self.all_data is None:
feature_data = []
label_data = []
for data in self.raw_data:
_,feat,_,label = self._parse_line(data)
feature_data.append(feat)
label_data.append(label)
all_data = {"feature":np.array(feature_data),
"label":np.array(label_data)}
self.all_data = all_data
return self.all_data
def stats(self):
"""Get statistics of the dataset.
"""
m = n = 0
for data in self.raw_data:
field, feat, _, _ = self._parse_line(data)
m = max(m, np.max(field) + 1)
n = max(n, np.max(feat) + 1)
self.total_field = m
self.total_feature = n
print("-"*10 + ">Data Statistics<" + "-"*10)
print("Num of feature", n)
print("Num of field", m)
return
def _batch_generator(self,batch_size,shuffle=True):
indices = list(range(self.total_num))
if shuffle:
np.random.seed(724)
np.random.shuffle(indices)
batch_count = 0
while True:
if (batch_count + 1) * batch_size > self.total_num:
batch_count = 0
if shuffle:
np.random.shuffle(indices)
start_idx = batch_count * batch_size
end_idx = start_idx + batch_size
batch_count += 1
batch_data = self.raw_data[start_idx:end_idx]
batch_field, batch_feature, batch_val, batch_label = [],[],[],[]
for b in batch_data:
field,feat,val,label = self._parse_line(b)
batch_field.append(field)
batch_feature.append(feat)
batch_val.append(val)
batch_label.append(label)
yield np.array(batch_field),np.array(batch_feature),\
np.array(batch_val),np.array(batch_label)
def _parse_line(self,line):
line = line.strip().split()
label = int(line[0])
line_data = np.array([l.split(":") for l in line[1:]])
field_idx = line_data[:,0].astype(int)
feat_idx = line_data[:,1].astype(int)
vals = line_data[:,2].astype(np.float32)
return field_idx,feat_idx,vals,label
def _count_data_len(self,filename):
with open(filename) as f:
nr_of_lines = sum(1 for line in f)
return nr_of_lines
class DataSetTF(object):
"""A data generator based on tensorflow.data.Dataset API.
"""
def __init__(self,filename,batch_size=None,shuffle=True):
self.filename = filename
self.raw_data = linecache.getlines(filename)
# create numpy data and tf.data.Dataset
self._dataset_initialize()
self._dataset_transform(batch_size,shuffle)
def get_batch(self):
return self.iterator.get_next()
def init_iterator(self,sess):
sess.run(self.iterator.initializer, feed_dict = {
self.feat_plhd: self.all_data["feature"],
self.label_plhd: self.all_data["label"]})
def _dataset_initialize(self):
print("=> [INFO] Initializing dataset ... <=")
feature_data = []
label_data = []
for line in self.raw_data:
_,feat,_,label = self._parse_line(line)
feature_data.append(feat)
label_data.append(label)
self.all_data = { "feature" : np.array(feature_data), "label" : np.array(label_data)}
print("=> [INFO] Dataset is initialized ! <=")
def _dataset_transform(self,batch_size,shuffle):
# self.dataset = tf.data.Dataset.from_tensor_slices(self.all_data)
self.feat_plhd = tf.placeholder(tf.int32,self.all_data["feature"].shape)
self.label_plhd = tf.placeholder(tf.float32,self.all_data["label"].shape)
self.dataset = tf.data.Dataset.from_tensor_slices((self.feat_plhd,self.label_plhd))
if shuffle:
self.dataset = self.dataset.shuffle(buffer_size=100000)
if batch_size is not None: # for training
self.dataset = self.dataset.batch(batch_size)
self.dataset = self.dataset.repeat()
else: # for validation
self.dataset = self.dataset.batch(8192)
# self.iterator = self.dataset.make_one_shot_iterator()
self.iterator = self.dataset.make_initializable_iterator()
def _parse_line(self,line):
line = line.strip().split()
label =
|
np.float32(line[0])
|
numpy.float32
|
"""
Evaluates number of samples needed to reach some return or regret threshold.
"""
import argparse
import datetime
import os
import pickle
import subprocess
import time
from functools import partial
from textwrap import wrap
import matplotlib.pyplot as plt
import numpy as np
from frozendict import frozendict
from matplotlib.ticker import MaxNLocator
from active_reward_learning.common.constants import (
PLOTS_PATH,
color_cycle,
hatch_cycle,
marker_cycle,
)
from active_reward_learning.util.helpers import (
get_acquisition_function_label,
get_acquisition_function_label_clean,
get_swimmer_linear_reward_true_weight,
)
from active_reward_learning.util.plotting import (
plot_result_max,
plot_result_percentiles,
set_plot_style,
)
from active_reward_learning.util.results import FileExperimentResults
def load(results_folder, experiment_label, config_query=None):
if experiment_label is not None:
result = subprocess.check_output(
f"grep '{experiment_label}' -r {results_folder} "
"| grep config | cut -f 1 -d : | rev | cut -d / -f 2- | rev",
shell=True,
).decode()
subdirs = result.split("\n")
else:
subdirs = [x[0] for x in os.walk(results_folder)]
experiments = []
for i, subdir in enumerate(subdirs):
print(i, subdir)
try:
experiment = FileExperimentResults(subdir)
except Exception as e:
# print(e)
continue
valid = True
if config_query is not None:
for key, value in config_query.items():
if experiment.config[key] != value:
# print(f"{key}, {experiment.config[key]}, {value}")
valid = False
if valid:
experiments.append(experiment)
return experiments
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--results_folder", type=str, default=None)
parser.add_argument("--remote_results_folder", type=str, default=None)
parser.add_argument("--tmp_folder", type=str, default="/tmp")
parser.add_argument("--experiment_label", type=str, default=None)
parser.add_argument("--forbid_running", action="store_true")
parser.add_argument("--allow_failed", action="store_true")
parser.add_argument("--allow_interrupted", action="store_true")
parser.add_argument("--return_percentage", type=float, default=1.0)
parser.add_argument("--return_threshold", type=float, default=None)
parser.add_argument("--regret_threshold", type=float, default=None)
parser.add_argument("--sample_threshold", type=int, default=None)
parser.add_argument("--xmax", type=int, default=None)
parser.add_argument("--pickle_out_folder", type=str, default=None)
return parser.parse_args()
def get_next_style(color, hatch, marker):
color = (color + 1) % len(color_cycle)
hatch = (hatch + 1) % len(hatch_cycle)
marker = (marker + 1) % len(marker_cycle)
return color, hatch, marker
def get_sample_n(steps, values, threshold, lower=False, perc=1):
if lower:
done = np.array(values) <= threshold
else:
done = np.array(values) >= threshold
# print(1, done)
# count how many in the future are done
done = np.cumsum(done[::-1])[::-1]
# print(2, done)
# compute percentage of the future
done = done / (np.arange(len(done))[::-1] + 1)
# print(3, done)
# compute when percentage is above desired value
done = done >= perc
# print(4, done)
try:
done_idx = int(np.argwhere(done)[0])
except IndexError as e:
done_idx = len(done) - 1
print(done_idx, steps[done_idx], steps[-1])
return steps[done_idx]
def get_metric(ex, name, xmax):
steps, values, run_time = ex.get_metric(name, get_runtime=True)
steps, values, run_time = np.array(steps), np.array(values), np.array(run_time)
if xmax is None:
return steps, values, run_time
mask = steps <= xmax
return steps[mask], values[mask], run_time[mask]
def main():
global get_acquisition_function_label
args = parse_args()
assert (
args.return_threshold is not None
or args.regret_threshold is not None
or args.sample_threshold is not None
)
if args.results_folder is None and args.remote_results_folder is None:
raise ValueError("results_folder or remote_results_folder has to be given")
if args.remote_results_folder is not None:
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
tmp_result_folder = os.path.join(args.tmp_folder, timestamp)
subprocess.run(
[
"rsync",
"-av",
"-e ssh",
"--exclude='*.txt'",
"--exclude='*.pkl'",
args.remote_results_folder,
tmp_result_folder,
]
)
args.results_folder = tmp_result_folder
print("Loading '{}' from '{}'.".format(args.experiment_label, args.results_folder))
# list mdps
all_experiments = load(args.results_folder, args.experiment_label, None)
print("Found {} experiments.".format(len(all_experiments)))
all_experiments_by_mdp_af = dict()
all_acquisition_functions = set()
mdp_dict = dict()
mdps = []
for ex in all_experiments:
mdp = ex.config["mdp"]
mdp_str = str(mdp)
if mdp_str not in mdp_dict:
mdp_dict[mdp_str] = dict()
mdps.append(mdp)
af = ex.config["acquisition_function"]
all_acquisition_functions.add(frozendict(af))
af_str = str(af)
af_label = get_acquisition_function_label(af)
if af_label not in mdp_dict[mdp_str]:
mdp_dict[mdp_str][af_label] = [0, 0]
mdp_dict[mdp_str][af_label][0] += 1
if ex.status == "COMPLETED":
mdp_dict[mdp_str][af_label][1] += 1
idx = (frozenset(mdp.items()), frozenset(af.items()))
if idx in all_experiments_by_mdp_af:
all_experiments_by_mdp_af[idx].append(ex)
else:
all_experiments_by_mdp_af[idx] = [ex]
for mdp_str in mdp_dict.keys():
print(mdp_str)
for af, [count, completed] in mdp_dict[mdp_str].items():
print("\t{} COUNT {} COMPLETED {}".format(af, count, completed))
def clean_mdp(mdp):
new_mdp = dict(mdp)
if new_mdp["label"].startswith("gridworld"):
if "robots" in new_mdp:
del new_mdp["robots"]
if "use_feature_representation" in new_mdp:
del new_mdp["use_feature_representation"]
return new_mdp
#####################################
af_to_plot = sorted(
all_acquisition_functions, key=lambda x: get_acquisition_function_label(x)
)
#####################################
# remove for more informative labels in the legend
# get_acquisition_function_label = get_acquisition_function_label_clean
get_acquisition_function_label = partial(get_acquisition_function_label)
metrics_by_mdp_af = dict()
evaluation_return_by_mdp = dict()
results = dict()
for mdp in mdps:
print("Collecting results for {}".format(mdp))
for af in af_to_plot:
if (
"observation_batch_size" in af
and af["observation_batch_size"] is not None
):
observation_batch_size = af["observation_batch_size"]
else:
observation_batch_size = 1
print("\t{}".format(af))
af_label = get_acquisition_function_label(af)
mdp_frozen = frozenset(mdp.items())
af_frozen = frozenset(af.items())
idx = (mdp_frozen, af_frozen)
if idx not in all_experiments_by_mdp_af:
print("Warning experiment unavailable:", idx)
continue
experiments = all_experiments_by_mdp_af[idx]
n_samples_return, n_samples_regret = [], []
return_list, regret_list = [], []
run_time_list = []
for ex in experiments:
print(ex.status)
if (
ex.status == "COMPLETED"
or (not args.forbid_running and ex.status == "RUNNING")
or (args.allow_failed and ex.status == "FAILED")
or (args.allow_interrupted and ex.status == "INTERRUPTED")
):
print(ex.config["acquisition_function"]["label"])
try:
if args.return_threshold is not None:
if "return" in ex.metrics:
steps, values, run_time = get_metric(
ex, "return", args.xmax
)
else:
steps, values, run_time = get_metric(
ex,
"cand_policy_for_mean_of_model_return",
args.xmax,
)
steps *= observation_batch_size
n_samples = get_sample_n(
steps,
values,
args.return_threshold,
lower=False,
perc=args.return_percentage,
)
n_samples_return.append(n_samples)
if args.regret_threshold is not None:
steps, values, run_time = get_metric(
ex, "regret", args.xmax
)
if values[-1] > 0:
print(
f"Warning: Last regret at it {len(values)-1} is > 0"
)
# import pdb; pdb.set_trace()
# print(values)
steps *= observation_batch_size
n_samples = get_sample_n(
steps,
values,
args.regret_threshold,
lower=True,
perc=args.return_percentage,
)
n_samples_regret.append(n_samples)
if args.sample_threshold is not None:
steps, values, run_time = get_metric(
ex, "return", args.xmax
)
steps *= observation_batch_size
return_list.extend(values[steps == args.sample_threshold])
steps, values, run_time = get_metric(
ex, "regret", args.xmax
)
steps *= observation_batch_size
regret_list.extend(values[steps == args.sample_threshold])
run_time_list.extend(
run_time[steps == args.sample_threshold] / 3600.0
)
except Exception as e:
print(e)
if args.return_threshold is not None:
n_samples_return_mean = np.mean(n_samples_return)
n_samples_return_stderr = np.std(n_samples_return) / np.sqrt(
len(n_samples_return)
)
else:
n_samples_return_mean, n_samples_return_stderr = None, None
if args.regret_threshold is not None:
n_samples_regret_mean = np.mean(n_samples_regret)
n_samples_regret_stderr = np.std(n_samples_regret) / np.sqrt(
len(n_samples_regret)
)
n_samples_regret_median = np.median(n_samples_regret)
n_samples_regret_25 =
|
np.percentile(n_samples_regret, 25)
|
numpy.percentile
|
from . import _utils as utils
from ._libs import (string_funcs as _sf,
math as _math)
import re
import numpy as np
from numpy import nan, ndarray
from typing import (Union, Dict, List, Optional, Tuple, Callable, overload,
NoReturn, Set, Iterable, Any, TypeVar, Type, Generator)
from typing import Pattern
import textwrap
class StringClass(object):
def __init__(self, df: 'DataFrame') -> None:
self._df = df
self._dtype_acc = 'S'
self._2d = '_2d'
def _validate_columns(self, column):
if isinstance(column, str):
try:
dtype, loc = self._df._get_col_dtype_loc(column) # type: str, int
except KeyError:
raise KeyError(f'Column "{column}" does not exist in the DataFrame')
if dtype != 'S':
raise ValueError(f'Column name "{column}" is not a str column')
return [column], [loc]
elif isinstance(column, list):
locs = []
for col in column:
try:
dtype, loc = self._df._get_col_dtype_loc(col) # type: str, int
except KeyError:
raise KeyError(f'Column {col} does not exist in the DataFrame')
if dtype != 'S':
raise ValueError(f'Column name "{col}" is not a str column')
locs.append(self._df._column_info[col].loc)
if len(column) != len(set(column)):
raise ValueError('You cannot complete this operation with duplicate columns')
return column, locs
elif column is None:
locs = []
columns = []
for col, dtype, loc in self._df._col_info_iter(): # type: str, str, int
if dtype == 'S':
columns.append(col)
locs.append(loc)
return columns, locs
else:
raise TypeError('`column` must be a column name as a string, a list of string, or None')
def _validate_columns_others(self, column):
if isinstance(column, str):
column = [column]
str_cols = []
str_locs = []
for col, dtype, loc in self._df._col_info_iter(): # type: str, str, int
if dtype == 'S':
str_cols.append(col)
str_locs.append(loc)
if column is None:
return str_cols, str_locs, [], []
if isinstance(column, list):
locs = []
for col in column:
try:
dtype, loc = self._df._get_col_dtype_loc(col) # type: str, int
except KeyError:
raise KeyError(f'Column {col} does not exist in the DataFrame')
if dtype != 'S':
raise ValueError(f'Column name "{col}" is not a str column')
locs.append(self._df._column_info[col].loc)
col_set = set(column)
if len(column) != len(col_set):
raise ValueError('You cannot complete this operation with duplicate columns')
other_cols = []
other_locs = []
for col, loc in zip(str_cols, str_locs):
if col not in col_set:
other_cols.append(col)
other_locs.append(loc)
return column, locs, other_cols, other_locs
else:
raise TypeError('`column` must be a column name as a string, a list of string, or None')
def _create_df(self, arr, dtype, columns, str_reverse_map):
new_data = {dtype: arr}
new_column_info = {col: utils.Column(dtype, i, i) for i, col in enumerate(columns)}
return self._df._construct_from_new(new_data, new_column_info, np.array(columns), str_reverse_map)
def _create_df_all(self, arr, dtype, str_reverse_map):
new_data = {}
if dtype == 'S':
for old_dtype, old_data in self._df._data.items():
if old_dtype == 'S':
new_data['S'] = arr
else:
new_data[old_dtype] = old_data.copy('F')
else:
new_data = {}
add_loc = 0
if dtype in self._df._data:
add_loc = self._df._data[dtype].shape[1]
for old_dtype, old_data in self._df._data.items():
if dtype != 'S':
new_data[old_dtype] = old_data.copy('F')
if dtype in new_data:
new_data[dtype] = np.asfortranarray(
|
np.column_stack((new_data[dtype], arr))
|
numpy.column_stack
|
# https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/tutorials/nuscenes_lidarseg_tutorial.ipynb
import os
import numpy as np
import yaml
# import cv2
from pathlib import Path
from torch.utils import data
from PIL import Image
from pyquaternion import Quaternion
from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud # , Box
from nuscenes.nuscenes import NuScenes
from nuscenes.utils import splits
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix
from nuscenes.lidarseg.lidarseg_utils import colormap_to_colors, plt_to_cv2, get_stats, \
get_labels_in_coloring, create_lidarseg_legend, paint_points_label
map_name_from_general_to_segmentation_class = {
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.wheelchair': 'ignore',
'human.pedestrian.stroller': 'ignore',
'human.pedestrian.personal_mobility': 'ignore',
'human.pedestrian.police_officer': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'animal': 'ignore',
'vehicle.car': 'car',
'vehicle.motorcycle': 'motorcycle',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.truck': 'truck',
'vehicle.construction': 'construction_vehicle',
'vehicle.emergency.ambulance': 'ignore',
'vehicle.emergency.police': 'ignore',
'vehicle.trailer': 'trailer',
'movable_object.barrier': 'barrier',
'movable_object.trafficcone': 'traffic_cone',
'movable_object.pushable_pullable': 'ignore',
'movable_object.debris': 'ignore',
'static_object.bicycle_rack': 'ignore',
'flat.driveable_surface': 'driveable_surface',
'flat.other': 'other_flat',
'flat.sidewalk': 'sidewalk',
'flat.terrain': 'terrain',
'static.manmade': 'manmade',
'static.vegetation': 'vegetation',
'noise': 'ignore',
'static.other': 'ignore',
'vehicle.ego': 'ignore'
}
map_name_from_segmentation_class_to_segmentation_index = {
'ignore': 0,
'barrier': 1,
'bicycle': 2,
'bus': 3,
'car': 4,
'construction_vehicle': 5,
'motorcycle': 6,
'pedestrian': 7,
'traffic_cone': 8,
'trailer': 9,
'truck': 10,
'driveable_surface': 11,
'other_flat': 12,
'sidewalk': 13,
'terrain': 14,
'manmade': 15,
'vegetation': 16
}
class Nuscenes(data.Dataset):
def __init__(self, root,
version='v1.0-trainval',
split='train',
return_ref=False,
has_image=True,
has_pcd=True,
has_label=True):
assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
if version == 'v1.0-trainval':
train_scenes = splits.train
elif version == 'v1.0-test':
train_scenes = splits.test
elif version == 'v1.0-mini':
train_scenes = splits.mini_train
else:
raise NotImplementedError
self.split = split
self.data_path = root
self.return_ref = return_ref
self.nusc = NuScenes(
version=version, dataroot=self.data_path, verbose=False)
self.has_image = has_image
self.map_name_from_general_index_to_segmentation_index = {}
for index in self.nusc.lidarseg_idx2name_mapping:
self.map_name_from_general_index_to_segmentation_index[index] = \
map_name_from_segmentation_class_to_segmentation_index[
map_name_from_general_to_segmentation_class[self.nusc.lidarseg_idx2name_mapping[index]]]
self.mapped_cls_name = {}
for v, k in map_name_from_segmentation_class_to_segmentation_index.items():
self.mapped_cls_name[k] = v
available_scenes = get_available_scenes(self.nusc)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(
filter(lambda x: x in available_scene_names, train_scenes))
train_scenes = set(
[available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes])
if self.has_image:
train_token_list, val_token_list = get_path_infos_cam_lidar(
self.nusc, train_scenes)
else:
train_token_list, val_token_list = get_path_infos_only_lidar(
self.nusc, train_scenes)
if self.split == "train" or self.split == "test":
self.token_list = train_token_list
elif self.split == "val":
self.token_list = val_token_list
else:
raise ValueError("invalid split mode: {}".format(self.split))
print("{}: {} sample: {}".format(
version, self.split, len(self.token_list)))
def __len__(self):
'Denotes the total number of samples'
return len(self.token_list)
def parsePathInfoByIndex(self, index):
return index, ''
def loadLabelByIndex(self, index):
if self.has_image:
lidar_sample_token = self.token_list[index]['lidar_token']
else:
lidar_sample_token = self.token_list[index]
if self.split == 'test':
self.lidarseg_path = None
annotated_data = None
else:
lidarseg_path = os.path.join(self.data_path,
self.nusc.get('lidarseg', lidar_sample_token)['filename'])
annotated_data = np.fromfile(
lidarseg_path, dtype=np.uint8).reshape((-1, 1)) # label
return annotated_data
def loadDataByIndex(self, index):
if self.has_image:
lidar_sample_token = self.token_list[index]['lidar_token']
else:
lidar_sample_token = self.token_list[index]
lidar_path = os.path.join(self.data_path,
self.nusc.get('sample_data', lidar_sample_token)['filename'])
raw_data = np.fromfile(lidar_path, dtype=np.float32).reshape((-1, 5))
if self.split == 'test':
self.lidarseg_path = None
annotated_data = np.expand_dims(
np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
lidarseg_path = os.path.join(self.data_path,
self.nusc.get('lidarseg', lidar_sample_token)['filename'])
annotated_data = np.fromfile(
lidarseg_path, dtype=np.uint8).reshape((-1, 1)) # label
pointcloud = raw_data[:, :4]
sem_label = annotated_data
inst_label = np.zeros(pointcloud.shape[0], dtype=np.int32)
return pointcloud, sem_label, inst_label
def labelMapping(self, sem_label):
sem_label = np.vectorize(self.map_name_from_general_index_to_segmentation_index.__getitem__)(
sem_label) # n, 1
assert sem_label.shape[-1] == 1
sem_label = sem_label[:, 0]
return sem_label
def loadImage(self, index):
cam_sample_token = self.token_list[index]['cam_token']
cam = self.nusc.get('sample_data', cam_sample_token)
image = Image.open(os.path.join(self.nusc.dataroot, cam['filename']))
return image
def getColorMap(self):
'''
useage: coloring = colors[points_label]
:return: A numpy array which has length equal to the number of points in the pointcloud, and each value is
a RGBA array.
'''
colors = colormap_to_colors(
self.nusc.colormap, self.nusc.lidarseg_name2idx_mapping) # Shape: [num_class, 3]
return colors
def mapLidar2Camera(self,
index,
pointcloud,
img_h,
img_w,
min_dist: float = 1.0,
show_lidarseg: bool = True, # False
render_intensity: bool = True, # False
filter_lidarseg_labels=None,
vis_render_img=False
):
lidar_sample_token = self.token_list[index]['lidar_token']
pointsensor = self.nusc.get('sample_data', lidar_sample_token)
assert pointsensor['is_key_frame'], \
'Error: Only pointclouds which are keyframes have lidar segmentation labels. Rendering aborted.'
assert pointsensor['sensor_modality'] == 'lidar', 'Error: Can only render lidarseg labels for lidar, ' \
'not %s!' % pointsensor['sensor_modality']
# Projects a pointcloud into a camera image along with the lidarseg labels
cam_sample_token = self.token_list[index]['cam_token']
cam = self.nusc.get('sample_data', cam_sample_token)
pcl_path = os.path.join(self.nusc.dataroot, pointsensor['filename'])
pc = LidarPointCloud.from_file(pcl_path)
# Points live in the point sensor frame. So they need to be transformed via global to the image plane.
# First step: transform the pointcloud to the ego vehicle frame for the timestamp of the sweep.
cs_record = self.nusc.get(
'calibrated_sensor', pointsensor['calibrated_sensor_token'])
pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
pc.translate(np.array(cs_record['translation']))
# Second step: transform from ego to the global frame.
poserecord = self.nusc.get('ego_pose', pointsensor['ego_pose_token'])
pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
pc.translate(np.array(poserecord['translation']))
# Third step: transform from global into the ego vehicle frame for the timestamp of the image.
poserecord = self.nusc.get('ego_pose', cam['ego_pose_token'])
pc.translate(-np.array(poserecord['translation']))
pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)
# Fourth step: transform from ego into the camera.
cs_record = self.nusc.get(
'calibrated_sensor', cam['calibrated_sensor_token'])
pc.translate(-np.array(cs_record['translation']))
pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)
# Fifth step: actually take a "picture" of the point cloud.
# Grab the depths (camera frame z axis points away from the camera).
depths = pc.points[2, :]
if depths.shape[0] < 10000:
print(depths.shape)
print(pc.points.shape)
print(pcl_path)
# Take the actual picture (matrix multiplication with camera-matrix + renormalization).
points = view_points(pc.points[:3, :], np.array(
cs_record['camera_intrinsic']), normalize=True)
# Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
# Also make sure points are at least 1m in front of the camera to avoid seeing the lidar points on the camera
# casing for non-keyframes which are slightly out of sync.
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > min_dist)
# tmp_mask = depths < min_dist
# print("tmp_mask sum: ", tmp_mask.sum(), depths.shape)
mask = np.logical_and(mask, points[0, :] > 1)
mask = np.logical_and(mask, points[0, :] < img_h - 1)
# mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
mask = np.logical_and(mask, points[1, :] > 1)
mask =
|
np.logical_and(mask, points[1, :] < img_w - 1)
|
numpy.logical_and
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.