prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from typing import Optional, Tuple
import numpy as np
from qtpy.QtCore import QEventLoop, Qt, QThread, QTimer, Signal, Slot
from qtpy.QtWidgets import (
QHBoxLayout,
QVBoxLayout,
QSizePolicy,
QWidget,
QLineEdit,
)
from ..components.dims import Dims
from ..components.dims_constants import DimsMode
from ..util.event import Event
from .qt_scrollbar import ModifiedScrollBar
class QtDims(QWidget):
"""Qt View for Dims model.
Parameters
----------
dims : Dims
Dims object to be passed to Qt object
parent : QWidget, optional
QWidget that will be the parent of this widget
Attributes
----------
dims : Dims
Dims object
sliders : list
List of slider widgets
"""
# Qt Signals for sending events to Qt thread
update_ndim = Signal()
update_axis = Signal(int)
update_range = Signal(int)
update_display = Signal()
update_axis_labels = Signal(int)
def __init__(self, dims: Dims, parent=None):
super().__init__(parent=parent)
self.SLIDERHEIGHT = 22
# We keep a reference to the view:
self.dims = dims
# list of sliders
self.sliders = []
self.axis_labels = []
# True / False if slider is or is not displayed
self._displayed_sliders = []
self._last_used = None
self._play_ready = True # False if currently awaiting a draw event
# Initialises the layout:
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
# Update the number of sliders now that the dims have been added
self._update_nsliders()
# The next lines connect events coming from the model to the Qt event
# system: We need to go through Qt signals so that these events are run
# in the Qt event loop thread. This is all about changing thread
# context for thread-safety purposes
# ndim change listener
def update_ndim_listener(event):
self.update_ndim.emit()
self.dims.events.ndim.connect(update_ndim_listener)
self.update_ndim.connect(self._update_nsliders)
# axis change listener
def update_axis_listener(event):
self.update_axis.emit(event.axis)
self.dims.events.axis.connect(update_axis_listener)
self.update_axis.connect(self._update_slider)
# range change listener
def update_range_listener(event):
self.update_range.emit(event.axis)
self.dims.events.range.connect(update_range_listener)
self.update_range.connect(self._update_range)
# display change listener
def update_display_listener(event):
self.update_display.emit()
self.dims.events.ndisplay.connect(update_display_listener)
self.dims.events.order.connect(update_display_listener)
self.update_display.connect(self._update_display)
# axis labels change listener
def update_axis_labels_listener(event):
self.update_axis_labels.emit(event.axis)
self.dims.events.axis_labels.connect(update_axis_labels_listener)
self.update_axis_labels.connect(self._update_axis_labels)
@property
def nsliders(self):
"""Returns the number of sliders displayed
Returns
-------
nsliders: int
Number of sliders displayed
"""
return len(self.sliders)
@property
def last_used(self):
"""int: Index of slider last used.
"""
return self._last_used
@last_used.setter
def last_used(self, last_used):
if last_used == self.last_used:
return
formerly_used = self.last_used
if formerly_used is not None:
sld = self.sliders[formerly_used]
sld.setProperty('last_used', False)
sld.style().unpolish(sld)
sld.style().polish(sld)
self._last_used = last_used
if last_used is not None:
sld = self.sliders[last_used]
sld.setProperty('last_used', True)
sld.style().unpolish(sld)
sld.style().polish(sld)
def _update_slider(self, axis: int):
"""Updates position for a given slider.
Parameters
----------
axis : int
Axis index.
"""
if axis >= len(self.sliders):
return
slider = self.sliders[axis]
mode = self.dims.mode[axis]
if mode == DimsMode.POINT:
slider.setValue(self.dims.point[axis])
self.last_used = axis
def _update_range(self, axis: int):
"""Updates range for a given slider.
Parameters
----------
axis : int
Axis index.
"""
if axis >= len(self.sliders):
return
slider = self.sliders[axis]
_range = self.dims.range[axis]
_range = (_range[0], _range[1] - _range[2], _range[2])
if _range not in (None, (None, None, None)):
if _range[1] == 0:
self._displayed_sliders[axis] = False
self.last_used = None
slider.hide()
else:
if (
not self._displayed_sliders[axis]
and axis not in self.dims.displayed
):
self._displayed_sliders[axis] = True
self.last_used = axis
slider.show()
slider.setMinimum(_range[0])
slider.setMaximum(_range[1])
slider.setSingleStep(_range[2])
slider.setPageStep(_range[2])
else:
self._displayed_sliders[axis] = False
slider.hide()
nsliders = np.sum(self._displayed_sliders)
self.setMinimumHeight(nsliders * self.SLIDERHEIGHT)
def _update_display(self):
"""Updates display for all sliders."""
slider_list = reversed(list(enumerate(self.sliders)))
label_list = reversed(self.axis_labels)
for (axis, slider), label in zip(slider_list, label_list):
if axis in self.dims.displayed:
# Displayed dimensions correspond to non displayed sliders
self._displayed_sliders[axis] = False
self.last_used = None
slider.hide()
label.hide()
else:
# Non displayed dimensions correspond to displayed sliders
self._displayed_sliders[axis] = True
self.last_used = axis
slider.show()
label.show()
nsliders = np.sum(self._displayed_sliders)
self.setMinimumHeight(nsliders * self.SLIDERHEIGHT)
def _update_nsliders(self):
"""Updates the number of sliders based on the number of dimensions."""
self._trim_sliders(0)
self._create_sliders(self.dims.ndim)
self._update_display()
for i in range(self.dims.ndim):
self._update_range(i)
if self._displayed_sliders[i]:
self._update_slider(i)
def _update_axis_labels(self, axis):
"""Updates the label for the given axis."""
self.axis_labels[axis].setText(self.dims.axis_labels[axis])
def _create_sliders(self, number_of_sliders: int):
"""Creates sliders to match new number of dimensions.
Parameters
----------
number_of_sliders : int
new number of sliders
"""
# add extra sliders so that number_of_sliders are present
# add to the beginning of the list
for slider_num in range(self.nsliders, number_of_sliders):
dim_axis = number_of_sliders - slider_num - 1
axis_label = self._create_axis_label_widget(dim_axis)
slider = self._create_range_slider_widget(dim_axis)
# Hard-coded 1:50 ratio. Can be more dynamic as a function
# of the name of the label, but it might be a little bit
# over the top.
current_row = QHBoxLayout()
if axis_label.text != '':
current_row.addWidget(axis_label, stretch=1)
current_row.addWidget(slider, stretch=50)
else:
current_row.addWidget(slider)
self.layout().addLayout(current_row)
self.axis_labels.insert(0, axis_label)
self.sliders.insert(0, slider)
self._displayed_sliders.insert(0, True)
nsliders = | np.sum(self._displayed_sliders) | numpy.sum |
import multiprocessing
import sys
import itertools as it
import warnings
import numpy as np
import mdtraj as md
from progressbar import ProgressBar
from itertools import combinations_with_replacement
from scattering.utils.utils import get_dt, get_unique_atoms
from scattering.utils.constants import get_form_factor
def compute_van_hove(
trj,
chunk_length,
parallel=False,
water=False,
r_range=(0, 1.0),
bin_width=0.005,
n_bins=None,
self_correlation=True,
periodic=True,
opt=True,
partial=False,
):
"""Compute the Van Hove function of a trajectory. Atom pairs
referenced in partial Van Hove functions are in alphabetical
order. If specific ordering of atom pairs are needed, user should
use compute_partial_van_hove then vhf_from_pvhf to compute total
Van Hove function.
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
parallel : bool, default=True
Use parallel implementation with `multiprocessing`
water : bool
use X-ray form factors for water that account for polarization
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
partial : bool, default = False
Whether or not to return a dictionary including partial Van Hove function.
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass > 0])
unique_elements = list(
set([a.element for a in trj.top.atoms if a.element.mass > 0])
)
if parallel:
data = []
for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):
# Add a bool to check if self-correlations should be analyzed
self_bool = self_correlation
if elem1 != elem2:
self_bool = False
warnings.warn(
"Total VHF calculation: No self-correlations for {} and {}, setting `self_correlation` to `False`.".format(
elem1, elem2
)
)
data.append(
[
trj,
chunk_length,
"element {}".format(elem1.symbol),
"element {}".format(elem2.symbol),
r_range,
bin_width,
n_bins,
self_bool,
periodic,
opt,
]
)
manager = multiprocessing.Manager()
partial_dict = manager.dict()
jobs = []
version_info = sys.version_info
for d in data:
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
if version_info.major == 3 and version_info.minor <= 7:
p = pool.Process(target=worker, args=(partial_dict, d))
elif version_info.major == 3 and version_info.minor >= 8:
ctx = multiprocessing.get_context()
p = pool.Process(ctx, target=worker, args=(partial_dict, d))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
r = partial_dict["r"]
del partial_dict["r"]
else:
partial_dict = dict()
for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):
# Add a bool to check if self-correlations should be analyzed
self_bool = self_correlation
if elem1 != elem2:
self_bool = False
warnings.warn(
"Total VHF calculation: No self-correlations for {} and {}, setting `self_correlation` to `False`.".format(
elem1, elem2
)
)
if elem1.symbol > elem2.symbol:
temp = elem1
elem1 = elem2
elem2 = temp
print("doing {0} and {1} ...".format(elem1, elem2))
r, g_r_t_partial = compute_partial_van_hove(
trj=trj,
chunk_length=chunk_length,
selection1="element {}".format(elem1.symbol),
selection2="element {}".format(elem2.symbol),
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
self_correlation=self_bool,
periodic=periodic,
opt=opt,
)
partial_dict[
("element {}".format(elem1.symbol), "element {}".format(elem2.symbol))
] = g_r_t_partial
if partial:
return partial_dict
norm = 0
g_r_t = None
for key, val in partial_dict.items():
elem1, elem2 = key
concentration1 = (
trj.atom_slice(trj.top.select(elem1)).n_atoms / n_physical_atoms
)
concentration2 = (
trj.atom_slice(trj.top.select(elem2)).n_atoms / n_physical_atoms
)
form_factor1 = get_form_factor(element_name=elem1.split()[1], water=water)
form_factor2 = get_form_factor(element_name=elem2.split()[1], water=water)
coeff = form_factor1 * concentration1 * form_factor2 * concentration2
if g_r_t is None:
g_r_t = | np.zeros_like(val) | numpy.zeros_like |
"""
Code from <NAME>'s IDP-Parrot tool from the Holehouse Lab.
All credit for this code should go to Dan.
See https://idptools-parrot.readthedocs.io/en/latest/api.html#module-parrot.encode_sequence
for more information.
"""
"""
File containing functions for encoding a string of amino acids into a numeric vector.
.............................................................................
parrot was developed by the Holehouse lab
Original release ---- 2020
Question/comments/concerns? Raise an issue on github:
https://github.com/idptools/parrot
Licensed under the MIT license.
"""
import sys
import os
import numpy as np
import torch
ONE_HOT = {'A': 0, 'C': 1, 'D': 2, 'E': 3, 'F': 4, 'G': 5, 'H': 6, 'I': 7, 'K': 8, 'L': 9,
'M': 10, 'N': 11, 'P': 12, 'Q': 13, 'R': 14, 'S': 15, 'T': 16, 'V': 17, 'W': 18, 'Y': 19}
def one_hot(seq):
"""Convert an amino acid sequence to a PyTorch tensor of one-hot vectors
Each amino acid is represented by a length 20 vector with a single 1 and
19 0's Inputing a sequence with a nono-canonical amino acid letter will
cause the program to exit.
E.g. Glutamic acid (E) is encoded: [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
Parameters
----------
seq : str
An uppercase sequence of amino acids (single letter code)
Returns
-------
torch.IntTensor
a PyTorch tensor representing the encoded sequence
"""
l = len(seq)
m = np.zeros((l, 20))
try:
for i in range(l):
m[i, ONE_HOT[seq[i]]] = 1
except:
error_str = 'Invalid amino acid detected: ' + seq[i]
raise ValueError(error_str)
return torch.from_numpy(m)
def rev_one_hot(seq_vectors):
"""Decode a list of one-hot sequence vectors into amino acid sequences
Parameters
----------
seq_vectors : list of numpy arrays
A list containing sequence vectors
Returns
-------
list
Strings of amino acid sequences
"""
REV_ONE_HOT = 'ACDEFGHIKLMNPQRSTVWY'
sequences = []
for seq_vector in seq_vectors:
seq = []
for residue in seq_vector:
seq.append(REV_ONE_HOT[ | np.argmax(residue) | numpy.argmax |
"""
gui/average
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for three-dimensional averaging of particles
:author: <NAME>, 2017
:copyright: Copyright (c) 2017 Jungmann Lab, Max Planck Institute of Biochemistry
"""
import os.path
import sys
import traceback
import colorsys
import matplotlib.pyplot as plt
import numba
import numpy as np
import scipy
from scipy import signal
from PyQt4 import QtCore, QtGui
from .. import io, lib, render
from numpy.lib.recfunctions import stack_arrays
from cmath import rect, phase
from tqdm import tqdm
import scipy.ndimage.filters
DEFAULT_OVERSAMPLING = 1.0
INITIAL_REL_MAXIMUM = 2.0
ZOOM = 10 / 7
N_GROUP_COLORS = 8
@numba.jit(nopython=True, nogil=True)
def render_hist(x, y, oversampling, t_min, t_max):
n_pixel = int(np.ceil(oversampling * (t_max - t_min)))
in_view = (x > t_min) & (y > t_min) & (x < t_max) & (y < t_max)
x = x[in_view]
y = y[in_view]
x = oversampling * (x - t_min)
y = oversampling * (y - t_min)
image = np.zeros((n_pixel, n_pixel), dtype=np.float32)
render._fill(image, x, y)
return len(x), image
@numba.jit(nopython=True, nogil=True)
def render_histxyz(a, b, oversampling, a_min, a_max, b_min, b_max):
n_pixel_a = int(np.ceil(oversampling * (a_max - a_min)))
n_pixel_b = int(np.ceil(oversampling * (b_max - b_min)))
in_view = (a > a_min) & (b > b_min) & (a < a_max) & (b < b_max)
a = a[in_view]
b = b[in_view]
a = oversampling * (a - a_min)
b = oversampling * (b - b_min)
image = np.zeros((n_pixel_b, n_pixel_a), dtype=np.float32)
render._fill(image, a, b)
return len(a), image
def rotate_axis(axis,vx,vy,vz,angle,pixelsize):
if axis == 'z':
vx_rot = np.cos(angle) * vx - np.sin(angle) * vy
vy_rot = np.sin(angle) * vx + np.cos(angle) * vy
vz_rot = vz
elif axis == 'y':
vx_rot = np.cos(angle) * vx + np.sin(angle) * np.divide(vz, pixelsize)
vy_rot = vy
vz_rot = -np.sin(angle) * vx * pixelsize + np.cos(angle) * vz
elif axis == 'x':
vx_rot = vx
vy_rot = np.cos(angle) * vy - np.sin(angle) * np.divide(vz, pixelsize)
vz_rot = np.sin(angle) * vy * pixelsize + np.cos(angle) * vz
return vx_rot, vy_rot, vz_rot
def compute_xcorr(CF_image_avg, image):
F_image = np.fft.fft2(image)
xcorr = np.fft.fftshift(np.real(np.fft.ifft2((F_image * CF_image_avg))))
return xcorr
class ParametersDialog(QtGui.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle('Parameters')
self.setModal(False)
grid = QtGui.QGridLayout(self)
grid.addWidget(QtGui.QLabel('Oversampling:'), 0, 0)
self.oversampling = QtGui.QDoubleSpinBox()
self.oversampling.setRange(1, 200)
self.oversampling.setValue(DEFAULT_OVERSAMPLING)
self.oversampling.setDecimals(1)
self.oversampling.setKeyboardTracking(False)
self.oversampling.valueChanged.connect(self.window.updateLayout)
grid.addWidget(self.oversampling, 0, 1)
self.iterations = QtGui.QSpinBox()
self.iterations.setRange(1, 1)
self.iterations.setValue(1)
class View(QtGui.QLabel):
def __init__(self, window):
super().__init__()
self.window = window
self.setMinimumSize(1, 1)
self.setAlignment(QtCore.Qt.AlignCenter)
self.setAcceptDrops(True)
self._pixmap = None
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == '.hdf5':
self.open(path)
def resizeEvent(self, event):
if self._pixmap is not None:
self.set_pixmap(self._pixmap)
def set_image(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap('magma')(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype('uint8')
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order='C')
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
self._pixmap = QtGui.QPixmap.fromImage(qimage)
self.set_pixmap(self._pixmap)
def set_pixmap(self, pixmap):
self.setPixmap(pixmap.scaled(self.width(), self.height(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.FastTransformation))
def update_image(self, *args):
oversampling = self.window.parameters_dialog.oversampling.value()
t_min = -self.r
t_max = self.r
N_avg, image_avg = render.render_hist(self.locs, oversampling, t_min, t_min, t_max, t_max)
self.set_image(image_avg)
class DatasetDialog(QtGui.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle('Datasets')
self.setModal(False)
self.layout = QtGui.QVBoxLayout()
self.checks = []
self.setLayout(self.layout)
def add_entry(self,path):
c = QtGui.QCheckBox(path)
self.layout.addWidget(c)
self.checks.append(c)
self.checks[-1].setChecked(True)
class Window(QtGui.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Picasso: Average3')
self.resize(1024, 512)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, 'icons', 'average.ico')
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setAcceptDrops(True)
self.parameters_dialog = ParametersDialog(self)
self.dataset_dialog = DatasetDialog(self)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu('File')
open_action = file_menu.addAction('Open')
open_action.setShortcut(QtGui.QKeySequence.Open)
open_action.triggered.connect(self.open)
file_menu.addAction(open_action)
save_action = file_menu.addAction('Save')
save_action.setShortcut(QtGui.QKeySequence.Save)
save_action.triggered.connect(self.save)
file_menu.addAction(save_action)
process_menu = menu_bar.addMenu('Process')
parameters_action = process_menu.addAction('Parameters')
parameters_action.setShortcut('Ctrl+P')
parameters_action.triggered.connect(self.parameters_dialog.show)
dataset_action = process_menu.addAction('Datasets')
dataset_action.triggered.connect(self.dataset_dialog.show)
self.status_bar = self.statusBar()
self._pixmap = None
self.locs = []
self.z_state = []
self.group_index = []
self.infos = []
self.locs_paths = []
self._mode = 'Zoom'
self._pan = False
self._size_hint = (768, 768)
self.n_locs = 0
self._picks = []
self.index_blocks = []
self._drift = []
# Define DisplaySettingsDialog
self.viewxy = QtGui.QLabel('')
self.viewxz = QtGui.QLabel('')
self.viewyz = QtGui.QLabel('')
self.viewcp = QtGui.QLabel('')
minsize = 512
self.viewxy.setFixedWidth(minsize)
self.viewxy.setFixedHeight(minsize)
self.viewxz.setFixedWidth(minsize)
self.viewxz.setFixedHeight(minsize)
self.viewyz.setFixedWidth(minsize)
self.viewyz.setFixedHeight(minsize)
self.viewcp.setFixedWidth(minsize)
self.viewcp.setFixedHeight(minsize)
# Define layout
display_groupbox = QtGui.QGroupBox('Display')
displaygrid = QtGui.QGridLayout(display_groupbox)
displaygrid.addWidget(QtGui.QLabel('XY'), 0, 0)
displaygrid.addWidget(self.viewxy, 1, 0)
displaygrid.addWidget(QtGui.QLabel('XZ'), 0, 1)
displaygrid.addWidget(self.viewxz, 1, 1)
displaygrid.addWidget(QtGui.QLabel('YZ'), 2, 0)
displaygrid.addWidget(self.viewyz, 3, 0)
displaygrid.addWidget(QtGui.QLabel('CP'), 2, 1)
displaygrid.addWidget(self.viewcp, 3, 1)
button_groupbox = QtGui.QGroupBox('Buttons')
buttongrid = QtGui.QGridLayout(button_groupbox)
rotation_groupbox = QtGui.QGroupBox('Rotation + Translation')
rotationgrid = QtGui.QGridLayout(rotation_groupbox)
centerofmassbtn = QtGui.QPushButton("Center of Mass XYZ")
axis_groupbox = QtGui.QGroupBox('Axis')
axisgrid = QtGui.QGridLayout(axis_groupbox)
self.x_axisbtn = QtGui.QRadioButton("X")
self.y_axisbtn = QtGui.QRadioButton("Y")
self.z_axisbtn = QtGui.QRadioButton("Z")
self.z_axisbtn.setChecked(True)
axisgrid.addWidget(self.x_axisbtn, 0, 0)
axisgrid.addWidget(self.y_axisbtn, 0, 1)
axisgrid.addWidget(self.z_axisbtn, 0, 2)
proj_groupbox = QtGui.QGroupBox('Projection')
projgrid = QtGui.QGridLayout(proj_groupbox)
self.xy_projbtn = QtGui.QRadioButton("XY")
self.yz_projbtn = QtGui.QRadioButton("YZ")
self.xz_projbtn = QtGui.QRadioButton("XZ")
self.xy_projbtn.setChecked(True)
projgrid.addWidget(self.xy_projbtn, 0, 0)
projgrid.addWidget(self.yz_projbtn, 0, 1)
projgrid.addWidget(self.xz_projbtn, 0, 2)
rotatebtn = QtGui.QPushButton("Rotate")
self.radio_sym = QtGui.QRadioButton("x symmetry")
self.symEdit = QtGui.QSpinBox()
self.symEdit.setRange(2, 50)
self.symEdit.setValue(8)
self.radio_sym_custom = QtGui.QRadioButton("custom symmetry")
self.symcustomEdit = QtGui.QLineEdit("90,180,270")
deg_groupbox = QtGui.QGroupBox('Degrees')
deggrid = QtGui.QGridLayout(deg_groupbox)
self.full_degbtn = QtGui.QRadioButton("Full")
self.part_degbtn = QtGui.QRadioButton("Part")
self.degEdit = QtGui.QTextEdit()
self.degEdit = QtGui.QSpinBox()
self.degEdit.setRange(1, 10)
self.degEdit.setValue(5)
deggrid.addWidget(self.full_degbtn, 0, 0)
deggrid.addWidget(self.part_degbtn, 0, 1)
deggrid.addWidget(self.degEdit, 0, 2)
self.full_degbtn.setChecked(True)
# Rotation Groupbox
rotationgrid.addWidget(axis_groupbox, 0, 0, 1, 2)
rotationgrid.addWidget(proj_groupbox, 1, 0, 1, 2)
rotationgrid.addWidget(deg_groupbox, 2, 0, 1, 2)
rotationgrid.addWidget(rotatebtn, 3, 0, 1, 2)
rotationgrid.addWidget(self.symEdit, 4, 0)
rotationgrid.addWidget(self.radio_sym, 4, 1)
rotationgrid.addWidget(self.radio_sym_custom, 5, 0)
rotationgrid.addWidget(self.symcustomEdit, 5, 1)
buttongrid.addWidget(centerofmassbtn, 0, 0)
buttongrid.addWidget(rotation_groupbox, 1, 0)
centerofmassbtn.clicked.connect(self.centerofmass)
rotatebtn.clicked.connect(self.rotate_groups)
self.translatebtn = QtGui.QCheckBox("Translate only")
self.flipbtn = QtGui.QCheckBox("Consider flipped structures")
self.alignxbtn = QtGui.QPushButton("Align X")
self.alignybtn = QtGui.QPushButton("Align Y")
self.alignzzbtn = QtGui.QPushButton("Align Z_Z")
self.alignzybtn = QtGui.QPushButton("Align Z_Y")
self.translatexbtn = QtGui.QPushButton("Translate X")
self.translateybtn = QtGui.QPushButton("Translate Y")
self.translatezbtn = QtGui.QPushButton("Translate Z")
self.rotatexy_convbtn = QtGui.QPushButton('Rotate XY - Convolution')
self.scorebtn = QtGui.QPushButton('Calculate Score')
operate_groupbox = QtGui.QGroupBox('Operate')
operategrid = QtGui.QGridLayout(operate_groupbox)
rotationgrid.addWidget(self.translatebtn, 7, 0)
rotationgrid.addWidget(self.flipbtn, 8, 0)
self.x_range = QtGui.QLineEdit('-3,3')
rotationgrid.addWidget(QtGui.QLabel('x-Range (Px)'), 9, 0)
rotationgrid.addWidget(self.x_range, 9, 1)
self.y_range = QtGui.QLineEdit('-3,3')
rotationgrid.addWidget(QtGui.QLabel('y-Range (Px)'), 10, 0)
rotationgrid.addWidget(self.y_range, 10, 1)
self.z_range = QtGui.QLineEdit('-1000,1000')
rotationgrid.addWidget(QtGui.QLabel('z-Range (nm)'), 11, 0)
rotationgrid.addWidget(self.z_range, 11, 1)
self.z_range.textChanged.connect(self.adjust_z)
self.x_range.textChanged.connect(self.adjust_xy)
self.y_range.textChanged.connect(self.adjust_xy)
operategrid.addWidget(self.alignxbtn, 0, 1)
operategrid.addWidget(self.alignybtn, 1, 1)
operategrid.addWidget(self.alignzzbtn, 2, 1)
operategrid.addWidget(self.alignzybtn, 3, 1)
operategrid.addWidget(self.translatexbtn, 0, 0)
operategrid.addWidget(self.translateybtn, 1, 0)
operategrid.addWidget(self.translatezbtn, 2, 0)
operategrid.addWidget(self.rotatexy_convbtn,4,0)
operategrid.addWidget(self.scorebtn,4,1)
self.rotatexy_convbtn.clicked.connect(self.rotatexy_convolution)
self.alignxbtn.clicked.connect(self.align_x)
self.alignybtn.clicked.connect(self.align_y)
self.alignzzbtn.clicked.connect(self.align_zz)
self.alignzybtn.clicked.connect(self.align_zy)
self.translatexbtn.clicked.connect(self.translate_x)
self.translateybtn.clicked.connect(self.translate_y)
self.translatezbtn.clicked.connect(self.translate_z)
self.scorebtn.clicked.connect(self.calculate_score)
buttongrid.addWidget(operate_groupbox, 2, 0)
self.contrastEdit = QtGui.QDoubleSpinBox()
self.contrastEdit.setDecimals(1)
self.contrastEdit.setRange(0, 10)
self.contrastEdit.setValue(0.5)
self.contrastEdit.setSingleStep(0.1)
self.contrastEdit.valueChanged.connect(self.updateLayout)
self.grid = QtGui.QGridLayout()
self.grid.addWidget(display_groupbox, 0, 0, 2, 1)
self.grid.addWidget(button_groupbox, 0, 1, 1, 1)
contrast_groupbox = QtGui.QGroupBox('Contrast')
contrastgrid = QtGui.QGridLayout(contrast_groupbox)
contrastgrid.addWidget(self.contrastEdit)
buttongrid.addWidget(contrast_groupbox)
MODEL_X_DEFAULT = '0,20,40,60,0,20,40,60,0,20,40,60'
MODEL_Y_DEFAULT = '0,20,40,0,20,40,0,20,40,0,20,40'
MODEL_Z_DEFAULT = '0,0,0,0,0,0,0,0,0,0,0,0'
self.modelchk = QtGui.QCheckBox("Use Model")
self.model_x = QtGui.QLineEdit(MODEL_X_DEFAULT)
self.model_y = QtGui.QLineEdit(MODEL_Y_DEFAULT)
self.model_z = QtGui.QLineEdit(MODEL_Z_DEFAULT)
self.model_preview_btn = QtGui.QPushButton('Preview')
self.model_preview_btn.clicked.connect(self.model_preview)
self.modelblurEdit = QtGui.QDoubleSpinBox()
self.modelblurEdit.setDecimals(1)
self.modelblurEdit.setRange(0, 10)
self.modelblurEdit.setValue(0.5)
self.modelblurEdit.setSingleStep(0.1)
self.pixelsizeEdit = QtGui.QSpinBox()
self.pixelsizeEdit.setRange(1,999)
self.pixelsizeEdit.setValue(130)
model_groupbox = QtGui.QGroupBox('Model')
modelgrid = QtGui.QGridLayout(model_groupbox)
modelgrid.addWidget(self.modelchk,0,0)
modelgrid.addWidget(QtGui.QLabel('X-Coordinates'),1,0)
modelgrid.addWidget(self.model_x,1,1)
modelgrid.addWidget(QtGui.QLabel('Y-Coordinates'),2,0)
modelgrid.addWidget(self.model_y,2,1)
modelgrid.addWidget(QtGui.QLabel('Z-Coordinates'),3,0)
modelgrid.addWidget(self.model_z,3,1)
modelgrid.addWidget(QtGui.QLabel('Blur:'),4, 0)
modelgrid.addWidget(self.modelblurEdit, 4, 1)
modelgrid.addWidget(QtGui.QLabel('Pixelsize:'),5,0)
modelgrid.addWidget(self.pixelsizeEdit, 5, 1)
modelgrid.addWidget(self.model_preview_btn, 6 ,0)
modelgrid.addWidget(self.modelchk, 6, 1)
buttongrid.addWidget(model_groupbox)
mainWidget = QtGui.QWidget()
mainWidget.setLayout(self.grid)
self.setCentralWidget(mainWidget)
self.status_bar.showMessage('Average3 ready.')
def open(self):
path = QtGui.QFileDialog.getOpenFileName(self, 'Open localizations', filter='*.hdf5')
if path:
self.add(path)
def save(self, path):
n_channels = len(self.locs)
for i in range(n_channels):
cx = self.infos[i][0]['Width'] / 2
cy = self.infos[i][0]['Height'] / 2
out_locs = self.locs[i].copy()
out_locs.x += cx
out_locs.y += cy
info = self.infos[i] + [{'Generated by': 'Picasso Average3'}]
if not self.z_state[i]:
out_locs = lib.remove_from_rec(out_locs, 'z')
out_path = os.path.splitext(self.locs_paths[i])[0] + '_avg3.hdf5'
path = QtGui.QFileDialog.getSaveFileName(self, 'Save localizations', out_path, filter='*.hdf5')
io.save_locs(path, out_locs, info)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == '.hdf5':
print('Opening {} ..'.format(path))
self.add(path)
def add(self, path, rendermode=True):
try:
locs, info = io.load_locs(path, qt_parent=self)
except io.NoMetadataFileError:
return
if len(self.locs) == 0:
self.pixelsize = 0
if not hasattr(locs, 'group'):
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('Error')
msgBox.setText('Datafile does not contain group information. Please load file with picked localizations.')
msgBox.exec_()
else:
locs = lib.ensure_sanity(locs, info)
if not hasattr(locs, 'z'):
locs = lib.append_to_rec(locs, locs.x.copy(), 'z')
self.pixelsize = 1
has_z = False
else:
has_z = True
if self.pixelsize == 0:
pixelsize,ok = QtGui.QInputDialog.getInt(self,"Pixelsize Dialog","Please enter the pixelsize in nm", 130)
if ok:
self.pixelsize = pixelsize
else:
self.pixelsize = 130
self.locs.append(locs)
self.z_state.append(has_z)
self.infos.append(info)
self.locs_paths.append(path)
self.index_blocks.append(None)
self._drift.append(None)
self.dataset_dialog.add_entry(path)
self.dataset_dialog.checks[-1].stateChanged.connect(self.updateLayout)
cx = self.infos[-1][0]['Width'] / 2
cy = self.infos[-1][0]['Height'] / 2
self.locs[-1].x -= cx
self.locs[-1].y -= cy
if len(self.locs) == 1:
self.median_lp = np.mean([np.median(locs.lpx), np.median(locs.lpy)])
if hasattr(locs, 'group'):
groups = np.unique(locs.group)
groupcopy = locs.group.copy()
for i in range(len(groups)):
groupcopy[locs.group == groups[i]] = i
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[groupcopy]
if render:
self.fit_in_view(autoscale=True)
else:
if render:
self.update_scene()
self.oversampling = 1
if len(self.locs) == 1:
self.t_min = np.min([np.min(locs.x),np.min(locs.y)])
self.t_max = np.max([np.max(locs.x),np.max(locs.y)])
self.z_min = np.min(locs.z)
self.z_max = np.max(locs.z)
else:
self.t_min = np.min([np.min(locs.x),np.min(locs.y),self.t_min])
self.t_max = np.max([np.max(locs.x),np.max(locs.y),self.t_max])
self.z_min = np.min([np.min(locs.z),self.z_min])
self.z_max = np.min([np.max(locs.z),self.z_max])
if len(self.locs) == 1:
print('Dataset loaded from {}.'.format(path))
else:
print('Dataset loaded from {}, Total number of datasets {}.'.format(path, len(self.locs)))
#CREATE GROUP INDEX
if hasattr(locs, 'group'):
groups = np.unique(locs.group)
n_groups = len(groups)
n_locs = len(locs)
group_index = scipy.sparse.lil_matrix((n_groups, n_locs), dtype=np.bool)
progress = lib.ProgressDialog('Creating group index', 0, len(groups), self)
progress.set_value(0)
for i, group in enumerate(groups):
index = np.where(locs.group == group)[0]
group_index[i, index] = True
progress.set_value(i+1)
self.group_index.append(group_index)
self.n_groups = n_groups
os.chdir(os.path.dirname(path))
self.calculate_radii()
self.oversampling = 4
self.updateLayout()
def updateLayout(self):
if len(self.locs) > 0:
pixmap1, pixmap2, pixmap3 = self.hist_multi_channel(self.locs)
self.viewxy.setPixmap(pixmap1)
self.viewxz.setPixmap(pixmap2)
self.viewyz.setPixmap(pixmap3)
def centerofmass_all(self):
#Align all by center of mass
n_channels = len(self.locs)
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
#stack arrays
sel_locs_x = self.locs[j].x
sel_locs_y = self.locs[j].y
sel_locs_z = self.locs[j].z
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
out_locs_x=stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y=stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z=stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
self.locs[j].x -= mean_x
self.locs[j].y -= mean_y
self.locs[j].z -= mean_z
def calculate_radii(self):
#CALCULATE PROPER R VALUES
n_channels = len(self.locs)
self.r = 0
self.r_z = 0
for j in range(n_channels):
self.r = np.max([3 * np.sqrt(np.mean(self.locs[j].x**2 + self.locs[j].y**2)),self.r])
self.r_z = np.max([5 * np.sqrt(np.mean(self.locs[j].z**2)),self.r_z])
self.t_min = -self.r
self.t_max = self.r
self.z_min = -self.r_z
self.z_max = self.r_z
self.z_min_load = self.z_min.copy()
self.z_max_load = self.z_max.copy()
def centerofmass(self):
print('Aligning by center of mass.. ', end='', flush=True)
n_groups = self.n_groups
n_channels = len(self.locs)
progress = lib.ProgressDialog('Aligning by center of mass', 0, n_groups, self)
progress.set_value(0)
for i in range(n_groups):
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
index = self.group_index[j][i, :].nonzero()[1]
# stack arrays
sel_locs_x = self.locs[j].x[index]
sel_locs_y = self.locs[j].y[index]
sel_locs_z = self.locs[j].z[index]
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
progress.set_value(i+1)
out_locs_x = stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y = stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z = stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
index = self.group_index[j][i, :].nonzero()[1]
self.locs[j].x[index] -= mean_x
self.locs[j].y[index] -= mean_y
self.locs[j].z[index] -= mean_z
self.calculate_radii()
self.updateLayout()
print('Complete.')
def histtoImage(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap('magma')(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype('uint8')
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order='C')
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(self.viewxy.width(), np.round(self.viewxy.height()*Y/X), QtCore.Qt.KeepAspectRatioByExpanding)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def hist_multi_channel(self, locs):
oversampling = self.parameters_dialog.oversampling.value()
self.oversampling = oversampling
if locs is None:
locs = self.locs
n_channels = len(locs)
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
renderings = []
for i in range(n_channels):
if self.dataset_dialog.checks[i].isChecked():
renderings.append(render.render_hist3d(locs[i], oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize))
n_locs = sum([_[0] for _ in renderings])
images = np.array([_[1] for _ in renderings])
pixmap1 = self.pixmap_from_colors(images,colors,2)
pixmap2 = self.pixmap_from_colors(images,colors,0)
pixmap3 = self.pixmap_from_colors(images,colors,1)
return pixmap1, pixmap2, pixmap3
def pixmap_from_colors(self,images,colors,axisval):
if axisval == 2:
image = [np.sum(_, axis=axisval) for _ in images]
else:
image = [np.transpose(np.sum(_, axis=axisval)) for _ in images]
image = np.array([self.scale_contrast(_) for _ in image])
Y, X = image.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
self._bgra = self.to_8bit(bgra)
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(self.viewxy.width(), np.round(self.viewxy.height()*Y/X), QtCore.Qt.KeepAspectRatioByExpanding)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def align_x(self):
print('Align X')
self.align_all('x')
def align_y(self):
print('Align Y')
self.align_all('y')
def align_zz(self):
print('Align Z')
self.align_all('zz')
def align_zy(self):
print('Align Z')
self.align_all('zy')
def translate_x(self):
print('Translate X')
self.translate('x')
def translate_y(self):
print('Translate Y')
self.translate('y')
def translate_z(self):
print('Translate Z')
self.translate('z')
def translate(self, translateaxis):
renderings = [render.render_hist3d(_, self.oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize) for _ in self.locs]
n_locs = sum([_[0] for _ in renderings])
images = np.array([_[1] for _ in renderings])
if translateaxis == 'x':
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
elif translateaxis == 'y':
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=1) for _ in image]
elif translateaxis == 'z':
image = [np.sum(_, axis=1) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
fig = plt.figure(figsize =(5,5))
ax1 = fig.add_subplot(1, 1 ,1)
for element in signalimg:
plt.plot(element)
n_groups = self.group_index[0].shape[0]
print('Translating..')
for i in tqdm(range(n_groups)):
self.status_bar.showMessage('Group {} / {}.'.format(i, n_groups))
self.translate_group(signalimg, i, translateaxis)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
self.centerofmass_all()
self.updateLayout()
self.status_bar.showMessage('Done!')
def translate_group(self, signalimg, group, translateaxis):
n_channels = len(self.locs)
all_xcorr = np.zeros((1, n_channels))
all_da = np.zeros((1, n_channels))
if translateaxis == 'x':
proplane = 'xy'
elif translateaxis == 'y':
proplane = 'xy'
elif translateaxis == 'z':
proplane = 'xz'
plotmode = 0
for j in range(n_channels):
if plotmode:
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(signalimg[j])
ax2 = fig.add_subplot(1, 3, 2)
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
xcorr_max = 0.0
plane = self.render_planes(x_rot, y_rot, z_rot, proplane, self.pixelsize) #
if translateaxis == 'x':
projection = np.sum(plane, axis=0)
elif translateaxis == 'y':
projection = np.sum(plane, axis=1)
elif translateaxis == 'z':
projection = np.sum(plane, axis=1)
if plotmode:
plt.plot(projection)
#print('Step X')
#ax3 = fig.add_subplot(1,3,3)
#plt.imshow(plane, interpolation='nearest', cmap=plt.cm.ocean)
corrval = np.max(signal.correlate(signalimg[j],projection))
shiftval = np.argmax(signal.correlate(signalimg[j], projection))-len(signalimg[j])+1
all_xcorr[0,j] = corrval
all_da[0,j] = shiftval/self.oversampling
if plotmode:
plt.show()
#value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr,axis = 1))
dafinal = np.mean(all_da[maximumcc,:])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
if translateaxis == 'x':
self.locs[j].x[index] += dafinal
elif translateaxis == 'y':
self.locs[j].y[index] += dafinal
elif translateaxis == 'z':
self.locs[j].z[index] += dafinal*self.pixelsize
def adjust_z(self):
z_range_str = np.asarray((self.z_range.text()).split(","))
z_range = []
for element in z_range_str:
try:
z_range.append(float(element))
except ValueError:
pass
z_min = z_range[0]
z_max = z_range[1]
self.z_min = np.max([z_min, self.z_min_load])
self.z_max = np.min([z_max, self.z_max_load])
print('Z min {}, Z max {}'.format(self.z_min, self.z_max))
self.updateLayout()
def adjust_xy(self):
x_range_str = np.asarray((self.x_range.text()).split(","))
x_range = []
for element in x_range_str:
try:
x_range.append(float(element))
except ValueError:
pass
x_min = x_range[0]
x_max = x_range[1]
self.x_min = np.max([x_min, self.t_min])
self.x_max = np.min([x_max, self.t_max])
print('X min {}, X max {}'.format(self.x_min, self.x_max))
y_range_str = np.asarray((self.y_range.text()).split(","))
y_range = []
for element in y_range_str:
try:
y_range.append(float(element))
except ValueError:
pass
y_min = y_range[0]
y_max = y_range[1]
self.y_min = np.max([y_min, self.t_min])
self.y_max = np.min([y_max, self.t_max])
print('Y min {}, Y max {}'.format(self.y_min, self.y_max))
self.updateLayout()
def rotatexy_convolution_group(self, CF_image_avg, angles, group, rotaxis, proplane):
n_channels = len(self.locs)
allrot = []
alldx = []
alldy = []
alldz = []
n_angles = len(angles)
all_xcorr = np.zeros((n_angles,n_channels))
all_da = np.zeros((n_angles,n_channels))
all_db = np.zeros((n_angles,n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
xcorr_max = 0.0
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
# rotate locs
x_rot, y_rot, z_rot = rotate_axis(rotaxis, x_original, y_original, z_original, angle, self.pixelsize)
# render group image for plane
image = self.render_planes(x_rot, y_rot, z_rot, proplane, self.pixelsize)
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax1.set_aspect('equal')
plt.imshow(image, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = np.sum(np.multiply(CF_image_avg[j], image))
all_xcorr[k,j] = xcorr
#value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr,axis = 1))
rotfinal = angles[maximumcc]
dafinal = np.mean(all_da[maximumcc,:])
dbfinal = np.mean(all_db[maximumcc,:])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(rotaxis, x_original, y_original, z_original, rotfinal, self.pixelsize)
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
def rotatexy_convolution(self):
#TODO: re-write ths with kwargs at some point
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = 'x'
elif self.y_axisbtn.isChecked():
rotaxis = 'y'
elif self.z_axisbtn.isChecked():
rotaxis = 'z'
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2*np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(-degree/360*2*np.pi, degree/360*2*np.pi, a_step)
renderings = [render.render_hist3d(_, self.oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize) for _ in self.locs]
n_locs = sum([_[0] for _ in renderings])
images = np.array([_[1] for _ in renderings])
#DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = 'xy'
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = 'yz'
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = 'xz'
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
#Change CFiamge for symmetry
if self.radio_sym.isChecked():
print('Using symmetry.')
fig = plt.figure(figsize =(5,5))
ax1 = fig.add_subplot(1,2,1)
symmetry = self.symEdit.value()
ax1.set_aspect('equal')
imageold = image[0].copy()
plt.imshow(imageold, interpolation='nearest', cmap=plt.cm.ocean)
#rotate image
for i in range(symmetry-1):
image[0] += scipy.ndimage.interpolation.rotate(imageold,((i+1)*360/symmetry) , axes=(1, 0),reshape=False)
ax2 = fig.add_subplot(1,2,2)
ax2.set_aspect('equal')
plt.imshow(image[0], interpolation='nearest', cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.radio_sym_custom.isChecked():
print('Using custom symmetry.')
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(','))
print(symmetry_txt)
fig = plt.figure(figsize =(5,5))
ax1 = fig.add_subplot(1,2,1)
symmetry = self.symEdit.value()
ax1.set_aspect('equal')
imageold = image[0].copy()
plt.imshow(imageold, interpolation='nearest', cmap=plt.cm.ocean)
#rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(imageold, float(degree) , axes=(1, 0),reshape=False)
ax2 = fig.add_subplot(1,2,2)
ax2.set_aspect('equal')
plt.imshow(image[0], interpolation='nearest', cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = image
# TODO: blur auf average !!!
print('Convolving..')
for i in tqdm(range(n_groups)):
self.status_bar.showMessage('Group {} / {}.'.format(i,n_groups))
self.rotatexy_convolution_group(CF_image_avg, angles, i, rotaxis, proplane)
self.updateLayout()
self.status_bar.showMessage('Done!')
def rotate_groups(self):
#Read out values from radiobuttons
#TODO: maybe re-write this with kwargs
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = 'x'
elif self.y_axisbtn.isChecked():
rotaxis = 'y'
elif self.z_axisbtn.isChecked():
rotaxis = 'z'
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2*np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(-degree/360*2*np.pi, degree/360*2*np.pi, a_step)
renderings = [render.render_hist3d(_, self.oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize) for _ in self.locs]
n_locs = sum([_[0] for _ in renderings])
images = np.array([_[1] for _ in renderings])
#DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = 'xy'
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = 'yz'
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = 'xz'
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
if self.radio_sym.isChecked():
print('Radio sym')
fig = plt.figure(figsize = (5,5))
ax1 = fig.add_subplot(1,2,1)
symmetry = self.symEdit.value()
ax1.set_aspect('equal')
imageold = image[0].copy()
plt.imshow(imageold, interpolation='nearest', cmap=plt.cm.ocean)
#rotate image
for i in range(symmetry-1):
image[0] += scipy.ndimage.interpolation.rotate(imageold,((i+1)*360/symmetry) , axes=(1, 0),reshape=False)
ax2 = fig.add_subplot(1,2,2)
ax2.set_aspect('equal')
plt.imshow(image[0], interpolation='nearest', cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
#TODO: Sort these functions out, combine with radio_sym / also for convolving.
if self.radio_sym_custom.isChecked():
print('Using custom symmetry.')
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(','))
fig = plt.figure(figsize =(5,5))
ax1 = fig.add_subplot(1,2,1)
symmetry = self.symEdit.value()
ax1.set_aspect('equal')
imageold = image[0].copy()
plt.imshow(imageold, interpolation='nearest', cmap=plt.cm.ocean)
#rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(imageold, float(degree) , axes=(1, 0),reshape=False)
ax2 = fig.add_subplot(1,2,2)
ax2.set_aspect('equal')
plt.imshow(image[0], interpolation='nearest', cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = [np.conj(np.fft.fft2(_)) for _ in image]
#n_pixel, _ = image_avg.shape
#image_half = n_pixel / 2
# TODO: blur auf average !!!
print('Rotating..')
for i in tqdm(range(n_groups)):
self.status_bar.showMessage('Group {} / {}.'.format(i,n_groups))
self.align_group(CF_image_avg, angles, i, rotaxis, proplane)
self.updateLayout()
self.status_bar.showMessage('Done!')
def getUIstate(self):
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = 'x'
elif self.y_axisbtn.isChecked():
rotaxis = 'y'
elif self.z_axisbtn.isChecked():
rotaxis = 'z'
proplane = []
if self.xy_projbtn.isChecked():
proplane = 'xy'
elif self.yz_projbtn.isChecked():
proplane = 'yz'
elif self.xz_projbtn.isChecked():
proplane = 'xz'
return rotaxis, proplane
def projectPlanes(self, images, proplane):
if proplane == 'xy':
image = [np.sum(_, axis=2) for _ in images]
elif proplane == 'yz':
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif proplane == 'xz':
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
return image
def generate_template(self):
model_x_str = np.asarray((self.model_x.text()).split(","))
model_y_str = np.asarray((self.model_y.text()).split(","))
model_z_str = np.asarray((self.model_z.text()).split(","))
model_x = []
model_y = []
model_z = []
for element in model_x_str:
try:
model_x.append(float(element))
except ValueError:
pass
for element in model_y_str:
try:
model_y.append(float(element))
except ValueError:
pass
for element in model_z_str:
try:
model_z.append(float(element))
except ValueError:
pass
pixelsize = self.pixelsizeEdit.value()
blur = self.modelblurEdit.value()
# Center of mass
model_x = np.array(model_x)/pixelsize
model_y = | np.array(model_y) | numpy.array |
# coding: utf-8
# In[ ]:
import csv
import math
import numpy as np
import pandas
import sys
import warnings
from collections import Counter
from csv import reader
from sklearn import tree
from sklearn import decomposition
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings("ignore")
def load_csv(fileName):
file = open(fileName, "r")
lines = reader(file)
dataset = list(lines)
return dataset
def stringColumnToFloat(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
def projection_simplex(v, z=1):
n_features = v.shape[0]
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
class MulticlassSVM(BaseEstimator, ClassifierMixin):
def __init__(self, C=1, max_iter=50, tol=0.05, random_state=None, verbose=0):
self.C = C
self.max_iter = max_iter
self.tol = tol,
self.random_state = random_state
self.verbose = verbose
def _partial_gradient(self, X, y, i):
# Partial gradient for the ith sample.
g = np.dot(X[i], self.coef_.T) + 1
g[y[i]] -= 1
return g
def _violation(self, g, y, i):
# Optimality violation for the ith sample.
smallest = np.inf
for k in range(g.shape[0]):
if k == y[i] and self.dual_coef_[k, i] >= self.C:
continue
elif k != y[i] and self.dual_coef_[k, i] >= 0:
continue
smallest = min(smallest, g[k])
return g.max() - smallest
def _solve_subproblem(self, g, y, norms, i):
Ci = np.zeros(g.shape[0])
Ci[y[i]] = self.C
beta_hat = norms[i] * (Ci - self.dual_coef_[:, i]) + g / norms[i]
z = self.C * norms[i]
beta = projection_simplex(beta_hat, z)
return Ci - self.dual_coef_[:, i] - beta / norms[i]
def fit(self, X, y):
n_samples, n_features = X.shape
self._label_encoder = LabelEncoder()
y = self._label_encoder.fit_transform(y)
n_classes = len(self._label_encoder.classes_)
self.dual_coef_ = np.zeros((n_classes, n_samples), dtype=np.float64)
self.coef_ = np.zeros((n_classes, n_features))
norms = np.sqrt(np.sum(X ** 2, axis=1))
rs = check_random_state(self.random_state)
ind = np.arange(n_samples)
rs.shuffle(ind)
violation_init = None
for it in range(self.max_iter):
violation_sum = 0
for ii in range(n_samples):
i = ind[ii]
if norms[i] == 0:
continue
g = self._partial_gradient(X, y, i)
v = self._violation(g, y, i)
violation_sum += v
if v < 1e-12:
continue
delta = self._solve_subproblem(g, y, norms, i)
self.coef_ += (delta * X[i][:, np.newaxis]).T
self.dual_coef_[:, i] += delta
if it == 0:
violation_init = violation_sum
vratio = violation_sum / violation_init
if vratio < self.tol:
break
return self
def predict(self, X):
decision = np.dot(X, self.coef_.T)
pred = decision.argmax(axis=1)
# print(pred)
return pred
def calculateTimeStampWeight(row, min_timestamp, max_timestamp):
return ((pandas.to_datetime(row['timestamp'])-min_timestamp).days + 1)/((max_timestamp-min_timestamp).days+1)
def TFIDFProductValue(row):
return row['tf']*row['idf']
def CalculateMovieTF(row):
return row['tag_weightage'] / row['total_movie_weightage']
def calculateIDFData(row, total_movies):
return math.log10(total_movies / row['count_of_movies'])
def calculateTFIDFData(tfdata, idfdata):
tfidfdata = tfdata.merge(idfdata, on='tagid')
tfidfdata['tfidf'] = tfidfdata.apply(TFIDFProductValue, axis=1)
return tfidfdata[['movieid','tagid','tfidf']]
def fetchMoviesTagsData():
allmoviesTagsData =pandas.read_csv("data/mltags.csv")
min_timestamp = pandas.to_datetime(min(allmoviesTagsData['timestamp']))
max_timestamp = pandas.to_datetime(max(allmoviesTagsData['timestamp']))
allmoviesTagsData['timestamp_weightage'] = allmoviesTagsData.apply(calculateTimeStampWeight, axis=1, args=(min_timestamp, max_timestamp))
allmoviesTagsData['tag_weightage'] = allmoviesTagsData.groupby(['movieid','tagid'])['timestamp_weightage'].transform('sum')
allmoviesTagsData = allmoviesTagsData[['movieid','tagid','tag_weightage']].drop_duplicates(subset=['movieid','tagid'])
allmoviesTagsData['total_movie_weightage'] = allmoviesTagsData.groupby(['movieid'])['tag_weightage'].transform('sum')
allmoviesTagsData['tf'] = allmoviesTagsData.apply(CalculateMovieTF, axis=1)
taglist = allmoviesTagsData['tagid'].tolist()
alltagsdata = pandas.read_csv("data/mltags.csv")
specifictagsdata = alltagsdata[alltagsdata['tagid'].isin(taglist)]
specifictagsdata.drop_duplicates(subset=['tagid', 'movieid'], inplace=True)
specifictagsdata['count_of_movies'] = specifictagsdata.groupby('tagid')['movieid'].transform('count')
specifictagsdata.drop_duplicates(subset=['tagid'], inplace=True)
moviesdata = pandas.read_csv("data/mlmovies.csv")
total_movies = moviesdata.shape[0]
specifictagsdata['idf'] = specifictagsdata.apply(calculateIDFData, axis=1, total_movies=total_movies)
tfidfdata = calculateTFIDFData(allmoviesTagsData, specifictagsdata[['tagid', 'idf']])
return tfidfdata
def fetchMoviesDetails(movielist):
moviedetails = pandas.read_csv("data/mlmovies.csv")
moviedetails = moviedetails[moviedetails['movieid'].isin(movielist)]
movienamelist = moviedetails.values.tolist()
movienamelist = sorted(movienamelist, key=lambda x: x[0])
return movienamelist
def fetchLabelDetails():
labeldetails = pandas.read_csv("data/mllabels.csv")
labellist = labeldetails.values.tolist()
return labellist
def classificationUsingSVM():
print("Assigning labels using SVM classifier")
labelListDetails = fetchLabelDetails()
modifiedList =[]
labelNames = sorted(np.unique([item[1] for item in labelListDetails]))
for i in range (0, len(labelListDetails)):
modifiedList.append((labelListDetails[i][0],labelListDetails[i][1],labelNames.index(labelListDetails[i][1])))
modifiedList = sorted(modifiedList, key=lambda x: x[0])
LabelledListOfMovies = sorted(np.unique([item[0] for item in modifiedList]))
listOfLabelledIDs =[]
for ele in modifiedList:
listOfLabelledIDs.append(ele[2])
tempDataFrame = pandas.DataFrame(listOfLabelledIDs)
tempDataFrame.to_csv('ListOfLabelledIDs.csv', index=False, header=False)
moviesTagsData = fetchMoviesTagsData()
moviesList = sorted(np.unique(moviesTagsData['movieid'].tolist()))
movieTagMatrix = moviesTagsData.pivot_table(index='movieid', columns='tagid',values='tfidf', fill_value=0)
data = []
for movieID in LabelledListOfMovies:
i = moviesList.index(movieID)
data.append(movieTagMatrix.values[i])
tempDataFrame = pandas.DataFrame(data)
tempDataFrame.to_csv('LabelledMovieTagData.csv', index=False, header=False)
tempDataFrame = pandas.DataFrame(movieTagMatrix.values)
tempDataFrame.to_csv('MovieTagData.csv', index=False, header=False)
fileName = 'LabelledMovieTagData.csv'
data = load_csv(fileName)
for i in range(len(data[0])):
stringColumnToFloat(data, i)
fileName = 'ListOfLabelledIDs.csv'
labels = load_csv(fileName)
fileName = 'MovieTagData.csv'
testData = load_csv(fileName)
for i in range(len(testData[0])):
stringColumnToFloat(testData, i)
label = []
for i in labels:
label.append(i[0])
clf = MulticlassSVM(C=0.1, tol=0.01, max_iter=100, random_state=0, verbose=1)
clf.fit(np.array(data), label)
labelledResults = clf.predict( | np.array(testData) | numpy.array |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* ArtifactDetect: performs artifact detection on functional images
* StimulusCorrelation: determines correlation between stimuli
schedule and movement/intensity parameters
"""
import os
from copy import deepcopy
from nibabel import load, funcs, Nifti1Image
import numpy as np
from ..interfaces.base import (
BaseInterface,
traits,
InputMultiPath,
OutputMultiPath,
TraitedSpec,
File,
BaseInterfaceInputSpec,
isdefined,
)
from ..utils.filemanip import ensure_list, save_json, split_filename
from ..utils.misc import find_indices, normalize_mc_params
from .. import logging, config
iflogger = logging.getLogger("nipype.interface")
def _get_affine_matrix(params, source):
"""Return affine matrix given a set of translation and rotation parameters
params : np.array (upto 12 long) in native package format
source : the package that generated the parameters
supports SPM, AFNI, FSFAST, FSL, NIPY
"""
if source == "NIPY":
# nipy does not store typical euler angles, use nipy to convert
from nipy.algorithms.registration import to_matrix44
return to_matrix44(params)
params = normalize_mc_params(params, source)
# process for FSL, SPM, AFNI and FSFAST
rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)], [-np.sin(x), np.cos(x)]])
q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
if len(params) < 12:
params = np.hstack((params, q[len(params) :]))
params.shape = (len(params),)
# Translation
T = np.eye(4)
T[0:3, -1] = params[0:3]
# Rotation
Rx = np.eye(4)
Rx[1:3, 1:3] = rotfunc(params[3])
Ry = np.eye(4)
Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel()
Rz = np.eye(4)
Rz[0:2, 0:2] = rotfunc(params[5])
# Scaling
S = np.eye(4)
S[0:3, 0:3] = np.diag(params[6:9])
# Shear
Sh = np.eye(4)
Sh[(0, 0, 1), (1, 2, 2)] = params[9:12]
if source in ("AFNI", "FSFAST"):
return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh)))))
return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh)))))
def _calc_norm(mc, use_differences, source, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
mc : motion parameter estimates
[3 translation, 3 rotation (radians)]
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
affines = [_get_affine_matrix(mc[i, :], source) for i in range(mc.shape[0])]
return _calc_norm_affine(affines, use_differences, brain_pts)
def _calc_norm_affine(affines, use_differences, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
affines : list of [4 x 4] affine matrices
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
if brain_pts is None:
respos = np.diag([70, 70, 75])
resneg = np.diag([-70, -110, -45])
all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
displacement = None
else:
all_pts = brain_pts
n_pts = all_pts.size - all_pts.shape[1]
newpos = np.zeros((len(affines), n_pts))
if brain_pts is not None:
displacement = np.zeros((len(affines), int(n_pts / 3)))
for i, affine in enumerate(affines):
newpos[i, :] = np.dot(affine, all_pts)[0:3, :].ravel()
if brain_pts is not None:
displacement[i, :] = np.sqrt(
np.sum(
np.power(
np.reshape(newpos[i, :], (3, all_pts.shape[1]))
- all_pts[0:3, :],
2,
),
axis=0,
)
)
# np.savez('displacement.npz', newpos=newpos, pts=all_pts)
normdata = np.zeros(len(affines))
if use_differences:
newpos = np.concatenate(
(np.zeros((1, n_pts)), | np.diff(newpos, n=1, axis=0) | numpy.diff |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
from torch.distributions import MultivariateNormal
import time
import logz
import time
import inspect
import os
from wrappers import make_env
import matplotlib.pyplot as plt
import queue
from torch.distributions.categorical import Categorical
class Network(nn.Module):
"""
Construct conv net to recognize game image
arguments:
input_shape: shape of observation (n_channel, height, width)
n_actions: number of actions
returns:
for discrete action:
logits for each action
for gaussian action:
mean of the action
"""
def __init__(self, ob_dim, action_dim):
super(Network, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ob_dim[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(ob_dim)
self.fc = nn.Sequential(
nn.Linear(conv_out_size, 512),
nn.ReLU(),
nn.Linear(512, action_dim)
)
self.conv.apply(self.init_weight)
self.fc.apply(self.init_weight)
def init_weight(self, m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x, is_softmax=False):
x = torch.as_tensor(x)
conv_out = self.conv(x).view(x.size()[0], -1)
if is_softmax:
return F.softmax(self.fc(conv_out), dim=-1)
return self.fc(conv_out)
class Agent:
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.learning_rate = computation_graph_args['learning_rate']
self.num_target_updates = computation_graph_args['num_target_updates']
self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']
self.save_path = computation_graph_args['save_path']
self.load_path = computation_graph_args['load_path']
self.max_checkpoints = computation_graph_args['max_checkpoints']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
self.actor_nn = Network(self.ob_dim, self.ac_dim)
self.critic_nn = Network(self.ob_dim, 1)
self.log_std = nn.Parameter(torch.FloatTensor(self.ac_dim))
self.critic_optim = torch.optim.Adam(self.critic_nn.parameters())
if self.discrete:
actor_param_list = self.actor_nn.parameters()
else:
actor_param_list = list(self.actor_nn.parameters()) + list(self.log_std)
self.actor_optim = torch.optim.Adam(actor_param_list)
self.ckpt_paths = queue.Queue()
if self.load_path:
self.step, self.best_val = self.load_model(self.load_path)
else:
self.step = 0
self.best_val = -1
def get_policy_parameter(self, ob):
"""
Compute the parameters for action given this observation, which are parameters of the policy distribution p(a|s)
arguments:
ob: (bs, self.ob_dim)
return:
if discrete: logits of categorical distribution over action
action_logit: (bs, action_dim)
if continuous: tuple (mean, log_std) of a Gaussian
mean: (bs, action_dim)
log_std: (action_dim) (trainable variable, not output of nn)
"""
if self.discrete:
action_logit = self.actor_nn(ob, True)
return action_logit
else:
mean = self.actor_nn(ob)
log_std = self.log_std
return (mean, log_std)
def sample_action(self, policy_parameter):
"""
Sample a random action according to the distribution specified by policy_parameter
arguments:
for discrete action: logits of categorical distribution over actions
logits: (bs, action_dim)
for continuous action: (mean, log_std) of a Gaussian distribution over actions
mean: (bs, action_dim)
log_std: action_dim
returns:
sample_ac:
if discrete: (bs)
if continuous: (bs, action_dim)
"""
if self.discrete:
logits = policy_parameter
sampled_ac = torch.multinomial(F.softmax(logits, dim=1), 1)
else:
mean, log_std = policy_parameter
z = torch.randn(self.ac_dim)
sampled_ac = mean + torch.exp(log_std) * z
return sampled_ac
def get_log_prob(self, policy_parameter, taken_action):
"""
Compute the log probability of the taken_action under the current parameters of the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
logits: (bs, action_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
mean: (bs, action_dim)
log_std: (action_dim)
taken_action: (bs) if discrete, (bs, action_dim) if continuous
returns:
log_prob: (bs)
"""
if self.discrete:
logits = policy_parameter
bs, _ = logits.size()
log_prob_v = F.log_softmax(logits, dim=1)
log_prob = log_prob_v[range(bs), taken_action]
else:
mean, log_std = policy_parameter
cov = torch.eye(self.ac_dim)
cov[range(self.ac_dim), range(self.ac_dim)] = torch.exp(log_std) ** 2
m = MultivariateNormal(mean, cov)
log_prob = m.log_prob(taken_action)
return log_prob
def update_actor(self, obs, actions, adv):
"""
Update parameters of the policy.
arguments:
obs: (sum_of_path_lengths, ob_dim)
actions: (sum_of_path_lengths)
adv: (sum_of_path_lengths)
returns:
nothing
"""
self.actor_optim.zero_grad()
policy_parameter = self.get_policy_parameter(obs)
log_prob = self.get_log_prob(policy_parameter, actions)
loss = torch.mean(-log_prob * adv)
print('Previous log_prob: ' + str(-loss))
loss.backward()
self.actor_optim.step()
policy_parameter = self.get_policy_parameter(obs)
log_prob = self.get_log_prob(policy_parameter, actions)
loss = torch.mean(-log_prob * adv)
print('Updated log_prob: ' + str(-loss))
def update_critic(self, obs, next_obs, re, terminal):
"""
Update the parameters of the critic
arguments:
obs: (sum_of_path_lengths, ob_dim)
next_obs: (sum_of_path_lengths, ob_dim)
re: (sum_of_path_lengths)
terminal: (sum_of_path_lengths)
returns: nothing
"""
for _ in range(self.num_target_updates):
# recompute target values
v_s_next = torch.squeeze(self.critic_nn(next_obs))
target_value = re + self.gamma * v_s_next * (1 - terminal)
for _ in range(self.num_grad_steps_per_target_update):
self.critic_optim.zero_grad()
v_s_prediction = torch.squeeze(self.critic_nn(obs))
loss_fn = nn.MSELoss()
loss = loss_fn(v_s_prediction, target_value.detach())
loss.backward()
self.critic_optim.step()
def estimate_advantage(self, obs, next_obs, re, terminal):
"""
Estimates the advantage function value for each timestep.
arguments:
obs: (sum_of_path_lengths, ob_dim)
next_obs: (sum_of_path_lengths, ob_dim)
re: (sum_of_path_lengths)
terminal: (sum_of_path_lengths)
returns:
adv: (sum_of_path_lengths)
"""
v_s = torch.squeeze(self.critic_nn(obs))
v_s_next = torch.squeeze(self.critic_nn(next_obs))
q = re + self.gamma * v_s_next * (1 - terminal)
adv = q - v_s
if self.normalize_advantages:
mean = torch.mean(adv)
std = torch.std(adv)
adv = (adv - mean)/(std + 1e-8)
return adv
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
"""
sample trajectory for one episode, finish when done return by env is 1 or n_steps > self.max_path_length
"""
ob = env.reset()
obs, acs, rewards, next_obs, terminals = [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
policy_parameter = self.get_policy_parameter(ob[None, :])
ac = self.sample_action(policy_parameter)
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
# add the observation after taking a step to next_obs
next_obs.append(ob)
rewards.append(rew)
steps += 1
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
# YOUR CODE HERE
# print(steps)
# plt.imshow(ob[1], cmap='gray')
# plt.savefig ('image/grafico01' + str(steps) + '.png')
# plt.show()
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
return path
def save_model(self, step, val):
ckpt_dict = {
'actor_state': self.actor_nn.cpu().state_dict(),
'critic_state': self.critic_nn.cpu().state_dict(),
'step': step,
'best_val': self.best_val
}
checkpoint_path = os.path.join(self.save_path, 'step_{}.pth.tar'.format(step))
torch.save(ckpt_dict, checkpoint_path)
self.ckpt_paths.put(checkpoint_path)
print('Saved checkpoint: {}'.format(checkpoint_path))
if self.best_val < val:
print('New best checkpoint at step {}...'.format(step))
self.best_val = val
# remove checkpoint with lower value
if self.ckpt_paths.qsize() > self.max_checkpoints:
worst_ckpt = self.ckpt_paths.get()
try:
os.remove(worst_ckpt)
print('Removed checkpoint: {}'.format(worst_ckpt))
except OSError:
# Avoid crashing if checkpoint has been removed or protected
pass
def load_model(self, checkpoint_path):
ckpt_dict = torch.load(checkpoint_path)
self.actor_nn.load_state_dict(ckpt_dict['actor_state'])
self.critic_nn.load_state_dict(ckpt_dict['critic_state'])
step = ckpt_dict['step']
best_val = ckpt_dict['best_val']
return step, best_val
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_AC)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
def train_AC(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
save_path,
load_path,
max_checkpoints,
save_every):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
# env = gym.make(env_name)
env = make_env()
# Set random seeds
torch.manual_seed(seed)
np.random.seed(seed)
# env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec['max_episode_steps']
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'learning_rate': learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
'save_path': save_path,
'load_path': load_path,
'max_checkpoints': max_checkpoints,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(agent.step + 1, agent.step + 1 + n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
obs = np.concatenate([path["observation"] for path in paths])
actions = np.concatenate([path["action"] for path in paths])
re = np.concatenate([path["reward"] for path in paths])
next_obs = np.concatenate([path["next_observation"] for path in paths])
terminal = np.concatenate([path["terminal"] for path in paths])
print(actions[:20])
# obs = torch.from_numpy(obs).type(torch.float32)
# actions = torch.from_numpy(actions).type(torch.int8)
# re = torch.from_numpy(re).type(torch.float32)
# next_obs = torch.from_numpy(next_obs).type(torch.float32)
# Call tensorflow operations to:
# (1) update the critic, by calling agent.update_critic
# (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage
# (3) use the estimated advantage values to update the actor, by calling agent.update_actor
# YOUR CODE HERE
obs = torch.as_tensor(obs)
actions = torch.as_tensor(actions).type(torch.long)
next_obs = torch.as_tensor(next_obs)
re = torch.as_tensor(re)
terminal = torch.as_tensor(terminal)
agent.update_critic(obs, next_obs, re, terminal)
adv = agent.estimate_advantage(obs, next_obs, re, terminal)
agent.update_actor(obs, actions, adv.detach())
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", | np.std(returns) | numpy.std |
__author__ = "<NAME>, <NAME> and <NAME>"
__version__ = "0.0.1"
__license__ = "BSD"
import unittest
import numpy as np
import time
import torch
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from autoPyTorch.utils.configspace_wrapper import ConfigWrapper
from autoPyTorch.pipeline.nodes.resampling_strategy_selector import ResamplingStrategySelector
from numpy.testing import assert_array_almost_equal
from autoPyTorch.components.preprocessing.resampling import TargetSizeStrategyUpsample, \
RandomOverSamplingWithReplacement, RandomUnderSamplingWithReplacement
class TestResamplingStrategySelector(unittest.TestCase):
def test_resampling_strategy_selector_only_train(self):
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Y = np.array([[0, 1], [1, 0], [1, 0]])
train_indices = np.array([0, 1, 2])
hyperparameter_config = {
ResamplingStrategySelector.get_name() + ConfigWrapper.delimiter + "over_sampling_method": "random_over_sampling",
ResamplingStrategySelector.get_name() + ConfigWrapper.delimiter + "under_sampling_method": "random_under_sampling",
ResamplingStrategySelector.get_name() + ConfigWrapper.delimiter + "target_size_strategy": "up",
}
resampler_node = ResamplingStrategySelector()
resampler_node.add_over_sampling_method("random_over_sampling", RandomOverSamplingWithReplacement)
resampler_node.add_under_sampling_method("random_under_sampling", RandomUnderSamplingWithReplacement)
resampler_node.add_target_size_strategy("up", TargetSizeStrategyUpsample)
pipeline_config = {"random_seed": 42, "shuffle": True}
fit_result = resampler_node.fit(pipeline_config=pipeline_config, hyperparameter_config=hyperparameter_config, X=X, Y=Y, train_indices=train_indices,
valid_indices=None)
assert_array_almost_equal(sorted(fit_result['train_indices']), np.array([0, 1, 2, 3]))
num_0 = 0
num_1 = 0
for i in range(fit_result['X'].shape[0]):
x = fit_result['X'][i, :]
y = fit_result['Y'][i, :]
if np.all(y == | np.array([0, 1]) | numpy.array |
"""
NASBench-101 search space, rollout, controller, evaluator.
During the development,
referred https://github.com/automl/nas_benchmarks/blob/master/tabular_benchmarks/nas_cifar10.py
"""
import abc
import copy
import os
import re
import random
import collections
import itertools
import yaml
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import nasbench
from nasbench import api
from nasbench.lib import graph_util, config
from aw_nas import utils
from aw_nas.ops import get_op, Identity
from aw_nas.utils.exception import expect
from aw_nas.common import SearchSpace
from aw_nas.rollout.base import BaseRollout
from aw_nas.controller.base import BaseController
from aw_nas.evaluator.base import BaseEvaluator
from aw_nas.rollout.compare import CompareRollout
from aw_nas.evaluator.arch_network import ArchEmbedder
from aw_nas.utils import DenseGraphConvolution, DenseGraphFlow
from aw_nas.weights_manager.shared import SharedCell, SharedOp
from aw_nas.weights_manager.base import CandidateNet, BaseWeightsManager
INPUT = 'input'
OUTPUT = 'output'
CONV1X1 = 'conv1x1-bn-relu'
CONV3X3 = 'conv3x3-bn-relu'
MAXPOOL3X3 = 'maxpool3x3'
OUTPUT_NODE = 6
VERTICES = 7
MAX_EDGES = 9
_nasbench_cfg = config.build_config()
def parent_combinations_old(adjacency_matrix, node, n_parents=2):
"""Get all possible parent combinations for the current node."""
if node != 1:
# Parents can only be nodes which have an index that is lower than the current index,
# because of the upper triangular adjacency matrix and because the index is also a
# topological ordering in our case.
return itertools.combinations(np.argwhere(adjacency_matrix[:node, node] == 0).flatten(),
n_parents) # (e.g. (0, 1), (0, 2), (1, 2), ...
else:
return [[0]]
def parent_combinations(node, num_parents):
if node == 1 and num_parents == 1:
return [(0,)]
else:
return list(itertools.combinations(list(range(int(node))), num_parents))
def upscale_to_nasbench_format(adjacency_matrix):
"""
The search space uses only 4 intermediate nodes, rather than 5 as used in nasbench
This method adds a dummy node to the graph which is never used to be compatible with nasbench.
:param adjacency_matrix:
:return:
"""
return np.insert(
np.insert(adjacency_matrix,
5, [0, 0, 0, 0, 0, 0], axis=1),
5, [0, 0, 0, 0, 0, 0, 0], axis=0)
def _literal_np_array(arr):
if arr is None:
return None
return "np.array({})".format(np.array2string(arr, separator=",").replace("\n", " "))
class _ModelSpec(api.ModelSpec):
def __repr__(self):
return "_ModelSpec({}, {}; pruned_matrix={}, pruned_ops={})".format(
_literal_np_array(self.original_matrix),
self.original_ops,
_literal_np_array(self.matrix),
self.ops,
)
def hash_spec(self, *args, **kwargs):
return super(_ModelSpec, self).hash_spec(_nasbench_cfg["available_ops"])
class NasBench101SearchSpace(SearchSpace):
NAME = "nasbench-101"
def __init__(
self,
multi_fidelity=False,
load_nasbench=True,
compare_reduced=True,
compare_use_hash=False,
validate_spec=True,
):
super(NasBench101SearchSpace, self).__init__()
self.ops_choices = ["conv1x1-bn-relu",
"conv3x3-bn-relu", "maxpool3x3", "none"]
awnas_ops = [
"conv_bn_relu_1x1",
"conv_bn_relu_3x3",
"max_pool_3x3",
"none",
]
self.op_mapping = {k: v for k, v in zip(self.ops_choices, awnas_ops)}
self.ops_choice_to_idx = {
choice: i for i, choice in enumerate(self.ops_choices)
}
# operations: "conv3x3-bn-relu", "conv1x1-bn-relu", "maxpool3x3"
self.multi_fidelity = multi_fidelity
self.load_nasbench = load_nasbench
self.compare_reduced = compare_reduced
self.compare_use_hash = compare_use_hash
self.num_vertices = VERTICES
self.max_edges = MAX_EDGES
self.none_op_ind = self.ops_choices.index("none")
self.num_possible_edges = self.num_vertices * \
(self.num_vertices - 1) // 2
self.num_op_choices = len(self.ops_choices) # 3 + 1 (none)
self.num_ops = self.num_vertices - 2 # 5
self.idx = np.triu_indices(self.num_vertices, k=1)
self.validate_spec = validate_spec
if self.load_nasbench:
self._init_nasbench()
def __getstate__(self):
state = super(NasBench101SearchSpace, self).__getstate__().copy()
del state["nasbench"]
return state
def __setstate__(self, state):
super(NasBench101SearchSpace, self).__setstate__(state)
if self.load_nasbench:
# slow, comment this if do not need to load nasbench API when pickle load from disk
self._init_nasbench()
def pad_archs(self, archs):
return [self._pad_arch(arch) for arch in archs]
def _pad_arch(self, arch):
# padding for batchify training
adj, ops = arch
# all normalize the the reduced one
spec = self.construct_modelspec(edges=None, matrix=adj, ops=ops)
adj, ops = spec.matrix, self.op_to_idx(spec.ops)
num_v = adj.shape[0]
if num_v < VERTICES:
padded_adj = np.concatenate(
(adj[:-1], np.zeros((VERTICES - num_v, num_v), dtype=np.int8), adj[-1:])
)
padded_adj = np.concatenate(
(
padded_adj[:, :-1],
np.zeros((VERTICES, VERTICES - num_v)),
padded_adj[:, -1:],
),
axis=1,
)
padded_ops = ops + [3] * (7 - num_v)
adj, ops = padded_adj, padded_ops
return (adj, ops)
def _random_sample_ori(self):
while 1:
matrix = np.random.choice(
[0, 1], size=(self.num_vertices, self.num_vertices)
)
matrix = np.triu(matrix, 1)
ops = np.random.choice(
self.ops_choices[:-1], size=(self.num_vertices)
).tolist()
ops[0] = "input"
ops[-1] = "output"
spec = _ModelSpec(matrix=matrix, ops=ops)
if self.validate_spec and not self.nasbench.is_valid(spec):
continue
return NasBench101Rollout(
spec.original_matrix,
ops=self.op_to_idx(spec.original_ops),
search_space=self,
)
def _random_sample_me(self):
while 1:
splits = np.array(
sorted(
[0]
+ list(
np.random.randint(
0, self.max_edges + 1, size=self.num_possible_edges - 1
)
)
+ [self.max_edges]
)
)
edges = np.minimum(splits[1:] - splits[:-1], 1)
matrix = self.edges_to_matrix(edges)
ops = np.random.randint(0, self.num_op_choices, size=self.num_ops)
rollout = NasBench101Rollout(matrix, ops, search_space=self)
try:
self.nasbench._check_spec(rollout.genotype)
except api.OutOfDomainError:
# ignore out-of-domain archs (disconnected)
continue
else:
return rollout
# optional API
def genotype_from_str(self, genotype_str):
return eval(genotype_str)
return eval(re.search("(_ModelSpec\(.+);", genotype_str).group(1) + ")")
# ---- APIs ----
def random_sample(self):
m, ops = self.sample(True)
if len(ops) < len(m) - 2:
ops.append("none")
return NasBench101Rollout(m, [self.ops_choices.index(op) for op in ops], search_space=self)
return self._random_sample_ori()
def genotype(self, arch):
# return the corresponding ModelSpec
# edges, ops = arch
matrix, ops = arch
return self.construct_modelspec(edges=None, matrix=matrix, ops=ops)
def rollout_from_genotype(self, genotype):
return NasBench101Rollout(
genotype.original_matrix,
ops=self.op_to_idx(genotype.original_ops),
search_space=self,
)
def plot_arch(self, genotypes, filename, label, plot_format="pdf", **kwargs):
graph = genotypes.visualize()
graph.format = "pdf"
graph.render(filename, view=False)
return filename + ".{}".format(plot_format)
def distance(self, arch1, arch2):
pass
# ---- helpers ----
def _init_nasbench(self):
# the arch -> performances dataset
self.base_dir = os.path.join(
utils.get_awnas_dir("AWNAS_DATA", "data"), "nasbench-101"
)
if self.multi_fidelity:
self.nasbench = api.NASBench(
os.path.join(self.base_dir, "nasbench_full.tfrecord")
)
else:
self.nasbench = api.NASBench(
os.path.join(self.base_dir, "nasbench_only108.tfrecord")
)
def edges_to_matrix(self, edges):
matrix = np.zeros(
[self.num_vertices, self.num_vertices], dtype=np.int8)
matrix[self.idx] = edges
return matrix
def op_to_idx(self, ops):
return [
self.ops_choice_to_idx[op] for op in ops if op not in {"input", "output"}
]
def matrix_to_edges(self, matrix):
return matrix[self.idx]
def matrix_to_connection(self, matrix):
edges = matrix[self.idx].astype(np.bool)
node_connections = {}
concat_nodes = []
for from_, to_ in zip(self.idx[0][edges], self.idx[1][edges]):
# index of nodes starts with 1 rather than 0
if to_ < len(matrix) - 1:
node_connections.setdefault(to_, []).append(from_)
else:
if from_ >= len(matrix) - 2:
continue
concat_nodes.append(from_)
return node_connections, concat_nodes
def construct_modelspec(self, edges, matrix, ops):
if matrix is None:
assert edges is not None
matrix = self.edges_to_matrix(edges)
# expect(graph_util.num_edges(matrix) <= self.max_edges,
# "number of edges could not exceed {}".format(self.max_edges))
labeling = [self.ops_choices[op_ind] for op_ind in ops]
labeling = ["input"] + list(labeling) + ["output"]
model_spec = _ModelSpec(matrix, labeling)
return model_spec
def random_sample_arch(self):
# not uniform, and could be illegal,
# if there is not edge from the INPUT or no edge to the OUTPUT,
# Just check and reject for now
return self.random_sample().arch
def batch_rollouts(self, batch_size, shuffle=True, max_num=None):
len_ = ori_len_ = len(self.nasbench.fixed_statistics)
if max_num is not None:
len_ = min(max_num, len_)
list_ = list(self.nasbench.fixed_statistics.values())
indexes = np.arange(ori_len_)
np.random.shuffle(indexes)
ind = 0
while ind < len_:
end_ind = min(len_, ind + batch_size)
yield [
NasBench101Rollout(
list_[r_ind]["module_adjacency"],
self.op_to_idx(list_[r_ind]["module_operations"]),
search_space=self,
)
for r_ind in indexes[ind:end_ind]
]
ind = end_ind
@classmethod
def supported_rollout_types(cls):
return ["nasbench-101"]
class NasBench101OneShotSearchSpace(NasBench101SearchSpace):
# NAME = "nasbench-101-1shot"
def __init__(
self,
multi_fidelity=False,
load_nasbench=True,
compare_reduced=True,
compare_use_hash=False,
validate_spec=True,
num_cell_groups=2,
num_init_nodes=1,
cell_layout=None,
reduce_cell_groups=(1,),
num_layers=8,
):
super(NasBench101OneShotSearchSpace, self).__init__(
multi_fidelity,
load_nasbench,
compare_reduced,
compare_use_hash,
validate_spec,
)
self.num_init_nodes = num_init_nodes
self.num_cell_groups = num_cell_groups
self.reduce_cell_groups = reduce_cell_groups
self.num_layers = num_layers
if cell_layout is not None:
expect(
len(cell_layout) == self.num_layers,
"Length of `cell_layout` should equal `num_layers`",
)
expect(
np.max(cell_layout) == self.num_cell_groups - 1,
"Max of elements of `cell_layout` should equal `num_cell_groups-1`",
)
self.cell_layout = cell_layout
elif self.num_cell_groups == 2:
# by default: cell 0: normal cel, cell 1: reduce cell
self.cell_layout = [0] * self.num_layers
self.cell_layout[self.num_layers // 3] = 1
self.cell_layout[(2 * self.num_layers) // 3] = 1
else:
raise ValueError
self.loose_end = False
self.num_steps = 4
self.concat_op = "concat"
self.concat_nodes = None
self.cellwise_primitives = False
self.shared_primitives = self.ops_choices
self.num_parents = None
if self.load_nasbench:
self._init_nasbench()
def _is_valid(self, matrix):
assert self.num_parents is not None, \
"Do no use nasbench-101-1shot directly, please use nasbench-101-1shot-1, "\
"nasbench-101-1shot-2 or nasbench-101-1shot-3 search space instead."
num_node = list(matrix.sum(0))
if len(num_node) == VERTICES - 1:
num_node.insert(-2, 0)
return all([p == k for p, k in zip(self.num_parents, num_node)])
@abc.abstractmethod
def create_nasbench_adjacency_matrix(self, parents, **kwargs):
"""Based on given connectivity pattern create the corresponding adjacency matrix."""
pass
def sample(self, with_loose_ends, upscale=True):
if with_loose_ends:
adjacency_matrix_sample = self._sample_adjacency_matrix_with_loose_ends()
else:
adjacency_matrix_sample = self._sample_adjacency_matrix_without_loose_ends(
adjacency_matrix=np.zeros(
[self.num_intermediate_nodes + 2, self.num_intermediate_nodes + 2]),
node=self.num_intermediate_nodes + 1)
assert self._check_validity_of_adjacency_matrix(
adjacency_matrix_sample), 'Incorrect graph'
if upscale and self.NAME[-1] in ["1", "2"]:
adjacency_matrix_sample = upscale_to_nasbench_format(
adjacency_matrix_sample)
return adjacency_matrix_sample, random.choices(self.ops_choices[:-1], k=self.num_intermediate_nodes)
def _sample_adjacency_matrix_with_loose_ends(self):
parents_per_node = [random.sample(list(itertools.combinations(list(range(int(node))), num_parents)), 1) for
node, num_parents in self.num_parents_per_node.items()][2:]
parents = {
'0': [],
'1': [0]
}
for node, node_parent in enumerate(parents_per_node, 2):
parents[str(node)] = node_parent
adjacency_matrix = self._create_adjacency_matrix_with_loose_ends(
parents)
return adjacency_matrix
def _sample_adjacency_matrix_without_loose_ends(self, adjacency_matrix, node):
req_num_parents = self.num_parents_per_node[str(node)]
current_num_parents = np.sum(adjacency_matrix[:, node], dtype=np.int)
num_parents_left = req_num_parents - current_num_parents
sampled_parents = \
random.sample(list(parent_combinations_old(
adjacency_matrix, node, n_parents=num_parents_left)), 1)[0]
for parent in sampled_parents:
adjacency_matrix[parent, node] = 1
adjacency_matrix = self._sample_adjacency_matrix_without_loose_ends(
adjacency_matrix, parent)
return adjacency_matrix
@abc.abstractmethod
def generate_adjacency_matrix_without_loose_ends(self, **kwargs):
"""Returns every adjacency matrix in the search space without loose ends."""
pass
def convert_config_to_nasbench_format(self, config):
parents = {node: config["choice_block_{}_parents".format(node)] for node in
list(self.num_parents_per_node.keys())[1:]}
parents['0'] = []
adjacency_matrix = self.create_nasbench_adjacency_matrix_with_loose_ends(
parents)
ops = [config["choice_block_{}_op".format(node)] for node in list(
self.num_parents_per_node.keys())[1:-1]]
return adjacency_matrix, ops
def generate_search_space_without_loose_ends(self):
# Create all possible connectivity patterns
for iter, adjacency_matrix in enumerate(self.generate_adjacency_matrix_without_loose_ends()):
print(iter)
# Print graph
# Evaluate every possible combination of node ops.
n_repeats = int(np.sum(np.sum(adjacency_matrix, axis=1)[1:-1] > 0))
for combination in itertools.product([CONV1X1, CONV3X3, MAXPOOL3X3], repeat=n_repeats):
# Create node labels
# Add some op as node 6 which isn't used, here conv1x1
ops = [INPUT]
combination = list(combination)
for i in range(5):
if np.sum(adjacency_matrix, axis=1)[i + 1] > 0:
ops.append(combination.pop())
else:
ops.append(CONV1X1)
assert len(combination) == 0, 'Something is wrong'
ops.append(OUTPUT)
# Create nested list from numpy matrix
nasbench_adjacency_matrix = adjacency_matrix.astype(
np.int).tolist()
# Assemble the model spec
model_spec = api.ModelSpec(
# Adjacency matrix of the module
matrix=nasbench_adjacency_matrix,
# Operations at the vertices of the module, matches order of matrix
ops=ops)
yield adjacency_matrix, ops, model_spec
def _generate_adjacency_matrix(self, adjacency_matrix, node):
if self._check_validity_of_adjacency_matrix(adjacency_matrix):
# If graph from search space then yield.
yield adjacency_matrix
else:
req_num_parents = self.num_parents_per_node[str(node)]
current_num_parents = np.sum(
adjacency_matrix[:, node], dtype=np.int)
num_parents_left = req_num_parents - current_num_parents
for parents in parent_combinations_old(adjacency_matrix, node, n_parents=num_parents_left):
# Make copy of adjacency matrix so that when it returns to this stack
# it can continue with the unmodified adjacency matrix
adjacency_matrix_copy = copy.copy(adjacency_matrix)
for parent in parents:
adjacency_matrix_copy[parent, node] = 1
for graph in self._generate_adjacency_matrix(adjacency_matrix=adjacency_matrix_copy, node=parent):
yield graph
def _create_adjacency_matrix(self, parents, adjacency_matrix, node):
if self._check_validity_of_adjacency_matrix(adjacency_matrix):
# If graph from search space then yield.
return adjacency_matrix
else:
for parent in parents[str(node)]:
adjacency_matrix[parent, node] = 1
if parent != 0:
adjacency_matrix = self._create_adjacency_matrix(parents=parents, adjacency_matrix=adjacency_matrix,
node=parent)
return adjacency_matrix
def _create_adjacency_matrix_with_loose_ends(self, parents):
# Create the adjacency_matrix on a per node basis
adjacency_matrix = np.zeros([len(parents), len(parents)])
for node, node_parents in parents.items():
for parent in node_parents:
adjacency_matrix[parent, int(node)] = 1
return adjacency_matrix
def _check_validity_of_adjacency_matrix(self, adjacency_matrix):
"""
Checks whether a graph is a valid graph in the search space.
1. Checks that the graph is non empty
2. Checks that every node has the correct number of inputs
3. Checks that if a node has outgoing edges then it should also have incoming edges
4. Checks that input node is connected
5. Checks that the graph has no more than 9 edges
:param adjacency_matrix:
:return:
"""
# Check that the graph contains nodes
num_intermediate_nodes = sum(
np.array(np.sum(adjacency_matrix, axis=1) > 0, dtype=int)[1:-1])
if num_intermediate_nodes == 0:
return False
# Check that every node has exactly the right number of inputs
col_sums = np.sum(adjacency_matrix[:, :], axis=0)
for col_idx, col_sum in enumerate(col_sums):
# important FIX!
if col_idx > 0:
if col_sum != self.num_parents_per_node[str(col_idx)]:
return False
# Check that if a node has outputs then it should also have incoming edges (apart from zero)
col_sums = np.sum(np.sum(adjacency_matrix, axis=0) > 0)
row_sums = np.sum(np.sum(adjacency_matrix, axis=1) > 0)
if col_sums != row_sums:
return False
# Check that the input node is always connected. Otherwise the graph is disconnected.
row_sum = | np.sum(adjacency_matrix, axis=1) | numpy.sum |
# Run Grtrans with rrjet model
# The rrjet model is defined in "fluid_model_rrjet.py"
# NOTE -- currently the power law emissivity is very slow because paralleization is off
# First make grtrans with 'make'
# Then run this in python
import numpy as np
import grtrans_batch as gr
import matplotlib.pyplot as plt
import scipy.ndimage.filters as filt
ang=20.
name = 'rrjet'+str(ang)
mu = np.cos(ang*np.pi/180.)
size = 300.
uout = 1./(10*size)
npix = 100
ngeo = 5000
cmperMpc = 3.086e24
MBH = 6.7e9
DTOBH = 16.528*cmperMpc
RADPERUAS = np.pi/180./3600./1.e6
psize_rg = 2*size/npix
cmperrg = 147708.8 * MBH
psize_cm = psize_rg * cmperrg
psize_rad = psize_cm / DTOBH
psize_uas = psize_rad / RADPERUAS
pp= 2.001
RF = 43.e9
cfun = 'jet'
cfun2 = 'seismic'
RERUN = True
FNAME = 'grtrans_jet_compare.txt'
def main():
# run grtrans
x=gr.grtrans()
x.write_grtrans_inputs(name+'.in', oname=name+'.out',
fname='RRJET',phi0=0.,
betaeconst=1.e-4, ximax=10.,
nfreq=1,fmin=RF,fmax=RF,
gmin=10., gmax=1.e35, p2=pp, p1=pp,
#ename='SYNCHPL',
ename='POLSYNCHPL',
nvals=4, fpositron=0,
spin=0., standard=1,
uout=uout,
mbh=MBH,
#epcoefindx=[1,1,1,1,1,1,1],
#epcoefindx=[1,1,1,1,0,0,0],
mdotmin=1.57e15,mdotmax=1.57e15,nmdot=1,
nmu=1,mumin=mu,mumax=mu,
gridvals=[-size,size,-size,size],
nn=[npix,npix,ngeo],
hindf=1,hnt=1,
muval=1.)
if RERUN:
x.run_grtrans()
# load image
x.read_grtrans_output()
x.convert_to_Jy(DTOBH)
#grt_obj=x
save_grtrans_image(x)
display_grtrans_image(x)
def save_grtrans_image(grt_obj):
"""quick save, not ehtim compatible"""
I_im = grt_obj.ivals[:,0,0].reshape(npix,npix).flatten()
Q_im = grt_obj.ivals[:,1,0].reshape(npix,npix).flatten()
U_im = grt_obj.ivals[:,2,0].reshape(npix,npix).flatten()
V_im = grt_obj.ivals[:,3,0].reshape(npix,npix).flatten()
# convert to Tb
factor = 3.254e13/(RF**2 * psize_rad**2)
I_im *= factor
Q_im *= factor
U_im *= factor
V_im *= factor
x = np.array([[i for i in range(npix)] for j in range(npix)]).flatten()
y = np.array([[j for i in range(npix)] for j in range(npix)]).flatten()
x -= npix/2
y -= npix/2
x = x*psize_uas
y = y*psize_uas
outdat = np.vstack((x.T,y.T,I_im.T,Q_im.T,U_im.T,V_im.T)).T
np.savetxt('../rrjet_and_riaf/'+FNAME,outdat)
#np.savetxt('../rrjet_and_riaf/grtrans_jet_compare_positron_noconv.txt',outdat)
return
def display_grtrans_image(grt_obj,nvec=20,veccut=0.005,blur_kernel=1.25):
plt.close('all')
I_im = grt_obj.ivals[:,0,0].reshape(npix,npix)
Q_im = grt_obj.ivals[:,1,0].reshape(npix,npix)
U_im = grt_obj.ivals[:,2,0].reshape(npix,npix)
V_im = grt_obj.ivals[:,3,0].reshape(npix,npix)
I_im = filt.gaussian_filter(I_im, (blur_kernel, blur_kernel))
Q_im = filt.gaussian_filter(Q_im, (blur_kernel, blur_kernel))
U_im = filt.gaussian_filter(U_im, (blur_kernel, blur_kernel))
V_im = filt.gaussian_filter(V_im, (blur_kernel, blur_kernel))
# convert to Tb
factor = 3.254e13/(RF**2 * psize_rad**2)
I_im *= factor
Q_im *= factor
U_im *= factor
V_im *= factor
# Polarization Vectors
P_im = np.abs(Q_im + 1j*U_im)
m_im = P_im/I_im
thin = npix//nvec
mask = I_im > veccut * np.max(I_im)
mask2 = mask[::thin, ::thin]
m = m_im[::thin, ::thin][mask2]
x = (np.array([[i for i in range(npix)] for j in range(npix)])[::thin, ::thin])
x = x[mask2]
y = (np.array([[j for i in range(npix)] for j in range(npix)])[::thin, ::thin])
y = y[mask2]
a = (-np.sin(np.angle(Q_im+1j*U_im)/2)[::thin, ::thin])
a = a[mask2]
#a = m*a
b = ( np.cos(np.angle(Q_im+1j*U_im)/2)[::thin, ::thin])
b = b[mask2]
#b = m*b
P_im[ | np.logical_not(mask) | numpy.logical_not |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Diagnostic script to reproduce Cox et al. (2018).
Description
-----------
Plot effective climate sensitivity ECS vs. temperature variability metric psi
to establish an emergent relationship for ECS.
Author
------
<NAME> (DLR, Germany)
Project
-------
CRESCENDO
Configuration options in recipe
-------------------------------
confidence_level : float, optional (default: 0.66)
Confidence level for ECS error estimation.
"""
import logging
import os
import iris
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
import esmvaltool.diag_scripts.emergent_constraints as ec
import esmvaltool.diag_scripts.shared.iris_helpers as ih
from esmvaltool.diag_scripts.shared import (
ProvenanceLogger, get_diagnostic_filename, get_plot_filename,
group_metadata, io, plot, run_diagnostic, select_metadata)
logger = logging.getLogger(os.path.basename(__file__))
plt.style.use(plot.get_path_to_mpl_style())
COLOR_SMALL_LAMBDA = '#800060'
COLOR_LARGE_LAMBDA = '#009900'
(FIG, AXES) = plt.subplots()
ECS_ATTRS = {
'short_name': 'ecs',
'long_name': 'Effective Climate Sensitivity (ECS)',
'units': 'K',
}
TASA_ATTRS = {
'short_name': 'tasa',
'long_name': 'Near-Surface Air Temperature Anomaly',
'units': 'K',
}
PSI_ATTRS = {
'short_name': 'psi',
'long_name': 'Temperature variability metric',
'units': 'K',
}
def _get_ancestor_files(cfg, obs_name, projects=None):
"""Get ancestor files for provenance."""
if projects is None:
projects = _get_project(cfg)
if isinstance(projects, str):
projects = [projects]
datasets = []
for project in projects:
datasets.extend(
select_metadata(cfg['input_data'].values(), project=project))
datasets.extend(
select_metadata(cfg['input_data'].values(), dataset=obs_name))
return [d['filename'] for d in datasets]
def _get_model_color(model, lambda_cube):
"""Get color of model dependent on climate feedback parameter."""
clim_sens = lambda_cube.extract(iris.Constraint(dataset=model)).data
if clim_sens < 1.0:
col = COLOR_SMALL_LAMBDA
else:
col = COLOR_LARGE_LAMBDA
return col
def _plot_model_point(model, psi_cube, ecs_cube, lambda_cube):
"""Plot a single model point for emergent relationship."""
col = _get_model_color(model, lambda_cube)
style = plot.get_dataset_style(model, 'cox18nature')
AXES.plot(
psi_cube.extract(iris.Constraint(dataset=model)).data,
ecs_cube.extract(iris.Constraint(dataset=model)).data,
linestyle='none',
marker=style['mark'],
markeredgecolor=col,
markerfacecolor=col,
markersize=style['size'])
def _get_line_plot_legend():
"""Add legend for line plots."""
color_obs = plot.get_dataset_style('OBS', 'cox18nature')['color']
handles = [
mlines.Line2D([], [],
color=COLOR_SMALL_LAMBDA,
label=r'$\lambda < 1.0$ Wm$^{-2}$K$^{-1}$'),
mlines.Line2D([], [],
color=COLOR_LARGE_LAMBDA,
label=r'$\lambda > 1.0$ Wm$^{-2}$K$^{-1}$'),
mlines.Line2D([], [],
linestyle='none',
marker='o',
markeredgecolor=color_obs,
markerfacecolor=color_obs,
label='Observations'),
]
return AXES.legend(handles=handles, loc='upper left')
def _get_project(cfg):
"""Extract project from cfg."""
input_data = cfg['input_data'].values()
projects = list(group_metadata(input_data, 'project').keys())
projects = [p for p in projects if 'obs' not in p.lower()]
if len(projects) == 1:
return projects[0]
return projects
def _save_fig(cfg, basename, legend=None):
"""Save matplotlib figure."""
path = get_plot_filename(basename, cfg)
if legend is None:
legend = []
else:
legend = [legend]
FIG.savefig(
path,
additional_artists=legend,
bbox_inches='tight',
orientation='landscape')
logger.info("Wrote %s", path)
AXES.cla()
return path
def get_external_cubes(cfg):
"""Get external cubes for psi, ECS and lambda."""
cubes = iris.cube.CubeList()
for filename in ('psi.nc', 'ecs.nc', 'lambda.nc'):
filepath = io.get_ancestor_file(cfg, filename)
cube = iris.load_cube(filepath)
cube = cube.extract(
ih.iris_project_constraint(['OBS'], cfg, negate=True))
cubes.append(cube)
cubes = ih.intersect_dataset_coordinates(cubes)
return (cubes[0], cubes[1], cubes[2])
def get_provenance_record(caption, statistics, plot_type, ancestor_files):
"""Create a provenance record describing the diagnostic data and plot."""
record = {
'ancestors': ancestor_files,
'authors': ['schlund_manuel'],
'caption': caption,
'domains': ['global'],
'plot_type': plot_type,
'realms': ['atmos'],
'references': ['cox18nature'],
'statistics': statistics,
'themes': ['EC'],
}
return record
def plot_temperature_anomaly(cfg, tas_cubes, lambda_cube, obs_name):
"""Plot temperature anomaly versus time."""
for cube in tas_cubes.values():
cube.data -= np.mean(
cube.extract(
iris.Constraint(year=lambda cell: 1961 <= cell <= 1990)).data)
# Save netcdf file and provencance
filename = 'temperature_anomaly_{}'.format(obs_name)
netcdf_path = get_diagnostic_filename(filename, cfg)
io.save_1d_data(tas_cubes, netcdf_path, 'year', TASA_ATTRS)
project = _get_project(cfg)
provenance_record = get_provenance_record(
"Simulated change in global temperature from {} models (coloured "
"lines), compared to the global temperature anomaly from the {} "
"dataset (black dots). The anomalies are relative to a baseline "
"period of 1961-1990.".format(project, obs_name), ['anomaly'],
['times'], _get_ancestor_files(cfg, obs_name))
# Plot
if cfg['write_plots']:
models = lambda_cube.coord('dataset').points
# Plot lines
for model in models:
cube = tas_cubes[model]
AXES.plot(
cube.coord('year').points,
cube.data,
color=_get_model_color(model, lambda_cube))
obs_style = plot.get_dataset_style('OBS', 'cox18nature')
obs_cube = tas_cubes[obs_name]
AXES.plot(
obs_cube.coord('year').points,
obs_cube.data,
linestyle='none',
marker='o',
markeredgecolor=obs_style['color'],
markerfacecolor=obs_style['color'])
# Plot appearance
AXES.set_title('Simulation of global warming record')
AXES.set_xlabel('Year')
AXES.set_ylabel('Temperature anomaly / K')
legend = _get_line_plot_legend()
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def plot_psi(cfg, psi_cubes, lambda_cube, obs_name):
"""Plot temperature variability metric psi versus time."""
filename = 'temperature_variability_metric_{}'.format(obs_name)
netcdf_path = get_diagnostic_filename(filename, cfg)
io.save_1d_data(psi_cubes, netcdf_path, 'year', PSI_ATTRS)
project = _get_project(cfg)
provenance_record = get_provenance_record(
"Psi metric of variability versus time, from the {0} models "
"(coloured lines), and the {1} observational data (black circles). "
"The psi values are calculated for windows of width {2} yr, after "
"linear de-trending in each window. These {2}-yr windows are shown "
"for different end times.".format(project, obs_name,
cfg.get('window_length', 55)),
['corr', 'var'], ['times'], _get_ancestor_files(cfg, obs_name))
# Plot
if cfg['write_plots']:
models = lambda_cube.coord('dataset').points
# Plot lines
for model in models:
cube = psi_cubes[model]
AXES.plot(
cube.coord('year').points,
cube.data,
color=_get_model_color(model, lambda_cube))
obs_style = plot.get_dataset_style('OBS', 'cox18nature')
obs_cube = psi_cubes[obs_name]
AXES.plot(
obs_cube.coord('year').points,
obs_cube.data,
linestyle='none',
marker='o',
markeredgecolor=obs_style['color'],
markerfacecolor=obs_style['color'])
# Plot appearance
AXES.set_title('Metric of variability versus time')
AXES.set_xlabel('Year')
AXES.set_ylabel(r'$\Psi$ / K')
legend = _get_line_plot_legend()
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def plot_emergent_relationship(cfg, psi_cube, ecs_cube, lambda_cube, obs_cube):
"""Plot emergent relationship."""
filename = 'emergent_relationship_{}'.format(
obs_cube.attributes['dataset'])
cube = ecs_cube.copy()
cube.add_aux_coord(
iris.coords.AuxCoord(psi_cube.data, **ih.convert_to_iris(PSI_ATTRS)),
0)
netcdf_path = get_diagnostic_filename(filename, cfg)
io.iris_save(cube, netcdf_path)
provenance_record = get_provenance_record(
"Emergent relationship between ECS and the psi metric. The black dot-"
"dashed line shows the best-fit linear regression across the model "
"ensemble, with the prediction error for the fit given by the black "
"dashed lines. The vertical blue lines show the observational "
"constraint from the {} observations: the mean (dot-dashed line) and "
"the mean plus and minus one standard deviation (dashed lines).".
format(obs_cube.attributes['dataset']), ['mean', 'corr', 'var'],
['scatter'], _get_ancestor_files(cfg, obs_cube.attributes['dataset']))
# Plot
if cfg['write_plots']:
obs_mean = np.mean(obs_cube.data)
obs_std = np.std(obs_cube.data)
# Calculate regression line
lines = ec.regression_line(psi_cube.data, ecs_cube.data)
logger.info("Found emergent relationship with slope %.2f (r = %.2f)",
lines['slope'], lines['rvalue'])
# Plot points
for model in psi_cube.coord('dataset').points:
_plot_model_point(model, psi_cube, ecs_cube, lambda_cube)
# Plot lines
AXES.set_xlim(auto=False)
AXES.set_ylim(auto=False)
AXES.plot(
lines['x'],
lines['y_best_estim'],
color='black',
linestyle='dashdot',
label='Linear regression')
AXES.plot(
lines['x'],
lines['y_minus_err'],
color='black',
linestyle='dashed')
AXES.plot(
lines['x'], lines['y_plus_err'], color='black', linestyle='dashed')
AXES.axvline(
obs_mean,
color='blue',
linestyle='dashdot',
label='Observational constraint')
AXES.axvline(obs_mean - obs_std, color='blue', linestyle='dashed')
AXES.axvline(obs_mean + obs_std, color='blue', linestyle='dashed')
# Plot appearance
AXES.set_title('Emergent relationship fit')
AXES.set_xlabel(r'$\Psi$ / K')
AXES.set_ylabel('ECS / K')
legend = AXES.legend(loc='upper left')
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def plot_pdf(cfg, psi_cube, ecs_cube, obs_cube):
"""Plot probability density function of ECS."""
obs_mean = np.mean(obs_cube.data)
obs_std = np.std(obs_cube.data)
(ecs_lin, ecs_pdf) = ec.gaussian_pdf(psi_cube.data, ecs_cube.data,
obs_mean, obs_std)
# Provenance
filename = 'pdf_{}'.format(obs_cube.attributes['dataset'])
netcdf_path = get_diagnostic_filename(filename, cfg)
cube = iris.cube.Cube(
ecs_pdf,
var_name='pdf',
long_name='Probability density function',
units='K-1')
cube.add_aux_coord(
iris.coords.AuxCoord(ecs_lin, **ih.convert_to_iris(ECS_ATTRS)), 0)
io.iris_save(cube, netcdf_path)
project = _get_project(cfg)
provenance_record = get_provenance_record(
"The PDF for ECS. The orange histograms show the prior distributions "
"that arise from equal weighting of the {} models in 0.5 K bins.".
format(project), ['mean'], ['other'],
_get_ancestor_files(cfg, obs_cube.attributes['dataset']))
# Plot
if cfg['write_plots']:
AXES.plot(
ecs_lin,
ecs_pdf,
color='black',
linewidth=2.0,
label='Emergent constraint')
AXES.hist(
ecs_cube.data,
bins=6,
range=(2.0, 5.0),
density=True,
color='orange',
label='{} models'.format(project))
# Plot appearance
AXES.set_title('PDF of emergent constraint')
AXES.set_xlabel('ECS / K')
AXES.set_ylabel('Probability density')
legend = AXES.legend(loc='upper left')
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def plot_cdf(cfg, psi_cube, ecs_cube, obs_cube):
"""Plot cumulative distribution function of ECS."""
confidence_level = cfg.get('confidence_level', 0.66)
(ecs_lin, ecs_pdf) = ec.gaussian_pdf(psi_cube.data, ecs_cube.data,
np.mean(obs_cube.data),
np.std(obs_cube.data))
ecs_cdf = ec.cdf(ecs_lin, ecs_pdf)
# Provenance
filename = 'cdf_{}'.format(obs_cube.attributes['dataset'])
netcdf_path = get_diagnostic_filename(filename, cfg)
cube = iris.cube.Cube(
ecs_cdf,
var_name='cdf',
long_name='Cumulative distribution function',
units='1')
cube.add_aux_coord(
iris.coords.AuxCoord(ecs_lin, **ih.convert_to_iris(ECS_ATTRS)), 0)
io.iris_save(cube, netcdf_path)
project = _get_project(cfg)
provenance_record = get_provenance_record(
"The CDF for ECS. The horizontal dot-dashed lines show the {}% "
"confidence limits. The orange histograms show the prior "
"distributions that arise from equal weighting of the {} models in "
"0.5 K bins.".format(int(confidence_level * 100), project), ['mean'],
['other'], _get_ancestor_files(cfg, obs_cube.attributes['dataset']))
# Plot
if cfg['write_plots']:
AXES.plot(
ecs_lin,
ecs_cdf,
color='black',
linewidth=2.0,
label='Emergent constraint')
AXES.hist(
ecs_cube.data,
bins=6,
range=(2.0, 5.0),
cumulative=True,
density=True,
color='orange',
label='{} models'.format(project))
AXES.axhline(
(1.0 - confidence_level) / 2.0, color='black', linestyle='dashdot')
AXES.axhline(
(1.0 + confidence_level) / 2.0, color='black', linestyle='dashdot')
# Plot appearance
AXES.set_title('CDF of emergent constraint')
AXES.set_xlabel('ECS / K')
AXES.set_ylabel('CDF')
legend = AXES.legend(loc='upper left')
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def get_ecs_range(cfg, psi_cube, ecs_cube, obs_cube):
"""Get constrained ecs range."""
confidence_level = cfg.get('confidence_level', 0.66)
conf_low = (1.0 - confidence_level) / 2.0
conf_high = (1.0 + confidence_level) / 2.0
# Calculate PDF and CDF
(ecs_lin, ecs_pdf) = ec.gaussian_pdf(psi_cube.data, ecs_cube.data,
| np.mean(obs_cube.data) | numpy.mean |
from __future__ import division
import glob
import numpy as NP
from functools import reduce
import numpy.ma as MA
import progressbar as PGB
import h5py
import healpy as HP
import warnings
import copy
import astropy.cosmology as CP
from astropy.time import Time, TimeDelta
from astropy.io import fits
from astropy import units as U
from astropy import constants as FCNST
from scipy import interpolate
from astroutils import DSP_modules as DSP
from astroutils import constants as CNST
from astroutils import nonmathops as NMO
from astroutils import mathops as OPS
from astroutils import lookup_operations as LKP
import prisim
from prisim import interferometry as RI
from prisim import primary_beams as PB
from prisim import delay_spectrum as DS
try:
from pyuvdata import UVBeam
except ImportError:
uvbeam_module_found = False
else:
uvbeam_module_found = True
prisim_path = prisim.__path__[0]+'/'
cosmoPlanck15 = CP.Planck15 # Planck 2015 cosmology
cosmo100 = cosmoPlanck15.clone(name='Modified Planck 2015 cosmology with h=1.0', H0=100.0) # Modified Planck 2015 cosmology with h=1.0, H= 100 km/s/Mpc
################################################################################
def write_PRISim_bispectrum_phase_to_npz(infile_prefix, outfile_prefix,
triads=None, bltriplet=None,
hdf5file_prefix=None, infmt='npz',
datakey='noisy', blltol=0.1):
"""
----------------------------------------------------------------------------
Write closure phases computed in a PRISim simulation to a NPZ file with
appropriate format for further analysis.
Inputs:
infile_prefix
[string] HDF5 file or NPZ file created by a PRISim simulation or
its replication respectively. If infmt is specified as 'hdf5',
then hdf5file_prefix will be ignored and all the observing
info will be read from here. If infmt is specified as 'npz',
then hdf5file_prefix needs to be specified in order to read the
observing parameters.
triads [list or numpy array or None] Antenna triads given as a list of
3-element lists or a ntriads x 3 array. Each element in the
inner list is an antenna label. They will be converted to
strings internally. If set to None, then all triads determined
by bltriplet will be used. If specified, then inputs in blltol
and bltriplet will be ignored.
bltriplet [numpy array or None] 3x3 numpy array containing the 3 baseline
vectors. The first axis denotes the three baselines, the second
axis denotes the East, North, Up coordinates of the baseline
vector. Units are in m. Will be used only if triads is set to
None.
outfile_prefix
[string] Prefix of the NPZ file. It will be appended by
'_noiseless', '_noisy', and '_noise' and further by extension
'.npz'
infmt [string] Format of the input file containing visibilities.
Accepted values are 'npz' (default), and 'hdf5'. If infmt is
specified as 'npz', then hdf5file_prefix also needs to be
specified for reading the observing parameters
datakey [string] Specifies which -- 'noiseless', 'noisy' (default), or
'noise' -- visibilities are to be written to the output. If set
to None, and infmt is 'hdf5', then all three sets of
visibilities are written. The datakey string will also be added
as a suffix in the output file.
blltol [scalar] Baseline length tolerance (in m) for matching baseline
vectors in triads. It must be a scalar. Default = 0.1 m. Will
be used only if triads is set to None and bltriplet is to be
used.
----------------------------------------------------------------------------
"""
if not isinstance(infile_prefix, str):
raise TypeError('Input infile_prefix must be a string')
if not isinstance(outfile_prefix, str):
raise TypeError('Input outfile_prefix must be a string')
if (triads is None) and (bltriplet is None):
raise ValueError('One of triads or bltriplet must be set')
if triads is None:
if not isinstance(bltriplet, NP.ndarray):
raise TypeError('Input bltriplet must be a numpy array')
if not isinstance(blltol, (int,float)):
raise TypeError('Input blltol must be a scalar')
if bltriplet.ndim != 2:
raise ValueError('Input bltriplet must be a 2D numpy array')
if bltriplet.shape[0] != 3:
raise ValueError('Input bltriplet must contain three baseline vectors')
if bltriplet.shape[1] != 3:
raise ValueError('Input bltriplet must contain baseline vectors along three corrdinates in the ENU frame')
else:
if not isinstance(triads, (list, NP.ndarray)):
raise TypeError('Input triads must be a list or numpy array')
triads = NP.asarray(triads).astype(str)
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input file format must be npz or hdf5')
if infmt.lower() == 'npz':
if not isinstance(hdf5file_prefix, str):
raise TypeError('If infmt is npz, then hdf5file_prefix needs to be specified for observing parameters information')
if datakey is None:
datakey = ['noisy']
if isinstance(datakey, str):
datakey = [datakey]
elif not isinstance(datakey, list):
raise TypeError('Input datakey must be a list')
for dkey in datakey:
if dkey.lower() not in ['noiseless', 'noisy', 'noise']:
raise ValueError('Invalid input found in datakey')
if infmt.lower() == 'hdf5':
fullfnames_with_extension = glob.glob(infile_prefix + '*' + infmt.lower())
fullfnames_without_extension = [fname.split('.hdf5')[0] for fname in fullfnames_with_extension]
else:
fullfnames_without_extension = [infile_prefix]
if len(fullfnames_without_extension) == 0:
raise IOError('No input files found with pattern {0}'.format(infile_prefix))
try:
if infmt.lower() == 'hdf5':
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[0])
else:
simvis = RI.InterferometerArray(None, None, None, init_file=hdf5file_prefix)
except:
raise IOError('Input PRISim file does not contain a valid PRISim output')
latitude = simvis.latitude
longitude = simvis.longitude
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
last = simvis.lst / 15.0 / 24.0 # from degrees to fraction of day
last = last.reshape(-1,1)
daydata = NP.asarray(simvis.timestamp[0]).ravel()
if infmt.lower() == 'npz':
simvisinfo = NP.load(fullfnames_without_extension[0]+'.'+infmt.lower())
skyvis = simvisinfo['noiseless'][0,...]
vis = simvisinfo['noisy']
noise = simvisinfo['noise']
n_realize = vis.shape[0]
else:
n_realize = len(fullfnames_without_extension)
cpdata = {}
outfile = {}
for fileind in range(n_realize):
if infmt.lower() == 'npz':
simvis.vis_freq = vis[fileind,...]
simvis.vis_noise_freq = noise[fileind,...]
else:
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[fileind])
if fileind == 0:
if triads is None:
triads, bltriplets = simvis.getThreePointCombinations(unique=False)
# triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3)
# bltriplets = NP.asarray(prisim_BSP_info['baseline_triplets'])
triads = NP.asarray(triads).reshape(-1,3)
bltriplets = NP.asarray(bltriplets)
blinds = []
matchinfo = LKP.find_NN(bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
revind = []
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
revind += [blnum]
if len(revind) > 0:
flip_factor = NP.ones(3, dtype=NP.float)
flip_factor[NP.array(revind)] = -1
rev_bltriplet = bltriplet * flip_factor.reshape(-1,1)
matchinfo = LKP.find_NN(rev_bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
raise ValueError('Some baselines in the triplet are not found in the model triads')
triadinds = []
for blnum in NP.arange(bltriplet.shape[0]):
triadind, blind = NP.unravel_index(NP.asarray(matchinfo[0][blnum]), (bltriplets.shape[0], bltriplets.shape[1]))
triadinds += [triadind]
triadind_intersection = NP.intersect1d(triadinds[0], NP.intersect1d(triadinds[1], triadinds[2]))
if triadind_intersection.size == 0:
raise ValueError('Specified triad not found in the PRISim model. Try other permutations of the baseline vectors and/or reverse individual baseline vectors in the triad before giving up.')
triads = triads[triadind_intersection,:]
selected_bltriplets = bltriplets[triadind_intersection,:,:].reshape(-1,3,3)
prisim_BSP_info = simvis.getClosurePhase(antenna_triplets=triads.tolist(),
delay_filter_info=None,
specsmooth_info=None,
spectral_window_info=None,
unique=False)
if fileind == 0:
triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # Re-establish the triads returned after the first iteration (to accunt for any order flips)
for outkey in datakey:
if fileind == 0:
outfile[outkey] = outfile_prefix + '_{0}.npz'.format(outkey)
if outkey == 'noiseless':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]), axis=0)
if outkey == 'noisy':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]), axis=0)
if outkey == 'noise':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:]
cpdata[outkey] = prisim_BSP_info['closure_phase_noise'][NP.newaxis,:,:]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_noise'][NP.newaxis,...]), axis=0)
for outkey in datakey:
cpdata[outkey] = NP.rollaxis(cpdata[outkey], 3, start=0)
flagsdata = NP.zeros(cpdata[outkey].shape, dtype=NP.bool)
NP.savez_compressed(outfile[outkey], closures=cpdata[outkey],
flags=flagsdata, triads=triads,
last=last+NP.zeros((1,n_realize)),
days=daydata+NP.arange(n_realize))
################################################################################
def loadnpz(npzfile, longitude=0.0, latitude=0.0, lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
return a dictionary
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
Output:
cpinfo [dictionary] Contains one top level keys, namely, 'raw'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan), and some other optional keys
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
cpinfo = {}
datapool = ['raw']
for dpool in datapool:
cpinfo[dpool] = {}
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
if qty == 'cphase':
cpinfo[dpool][qty] = NP.copy(cp)
elif qty == 'triads':
cpinfo[dpool][qty] = NP.copy(triadsdata)
elif qty == 'flags':
cpinfo[dpool][qty] = NP.copy(flags)
elif qty == 'lst':
cpinfo[dpool][qty] = NP.copy(lstHA)
elif qty == 'lst-day':
cpinfo[dpool][qty] = NP.copy(lstday.jd)
elif qty == 'days':
cpinfo[dpool][qty] = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_lst)
return cpinfo
################################################################################
def npz2hdf5(npzfile, hdf5file, longitude=0.0, latitude=0.0,
lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
save it to HDF5 format
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
ehich is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
hdf5file [string] Output HDF5 file including full path.
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
if 'averaged_closures' in npzdata:
day_avg_cpdata = npzdata['averaged_closures']
cp_dayavg = day_avg_cpdata.astype(NP.float64)
if 'std_dev_triad' in npzdata:
std_triads_cpdata = npzdata['std_dev_triad']
cp_std_triads = std_triads_cpdata.astype(NP.float64)
if 'std_dev_lst' in npzdata:
std_lst_cpdata = npzdata['std_dev_lst']
cp_std_lst = std_lst_cpdata.astype(NP.float64)
with h5py.File(hdf5file, 'w') as fobj:
datapool = ['raw']
for dpool in datapool:
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
data = None
if qty == 'cphase':
data = NP.copy(cp)
elif qty == 'triads':
data = NP.copy(triadsdata)
elif qty == 'flags':
data = NP.copy(flags)
elif qty == 'lst':
data = NP.copy(lstHA)
elif qty == 'lst-day':
data = NP.copy(lstday.jd)
elif qty == 'days':
data = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
data = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
data = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
data = NP.copy(cp_std_lst)
if data is not None:
dset = fobj.create_dataset('{0}/{1}'.format(dpool, qty), data=data, compression='gzip', compression_opts=9)
################################################################################
def save_CPhase_cross_power_spectrum(xcpdps, outfile):
"""
----------------------------------------------------------------------------
Save cross-power spectrum information in a dictionary to a HDF5 file
Inputs:
xcpdps [dictionary] This dictionary is essentially an output of the
member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
with h5py.File(outfile, 'w') as fileobj:
hdrgrp = fileobj.create_group('header')
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
dset = hdrgrp.create_dataset(key, data=xcpdps[key])
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
smplgrp = fileobj.create_group(smplng)
for key in sampling_keys:
dset = smplgrp.create_dataset(key, data=xcpdps[smplng][key])
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
dpoolgrp = smplgrp.create_group(dpool)
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][key], dict):
subgrp = dpoolgrp.create_group(key)
for subkey in xcpdps[smplng][dpool][key]:
dset = subgrp.create_dataset(str(subkey), data=xcpdps[smplng][dpool][key][subkey])
else:
dset = dpoolgrp.create_dataset(key, data=xcpdps[smplng][dpool][key])
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][stat], list):
for ii in range(len(xcpdps[smplng][dpool][stat])):
dset = dpoolgrp.create_dataset(stat+'/diagcomb_{0}'.format(ii), data=xcpdps[smplng][dpool][stat][ii].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat][ii].si.unit)
else:
dset = dpoolgrp.create_dataset(stat, data=xcpdps[smplng][dpool][stat].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat].si.unit)
################################################################################
def read_CPhase_cross_power_spectrum(infile):
"""
----------------------------------------------------------------------------
Read information about cross power spectrum from an external HDF5 file into
a dictionary. This is the counterpart to save_CPhase_corss_power_spectrum()
Input:
infile [string] Full path to the external HDF5 file that contains info
about cross-power spectrum.
Output:
xcpdps [dictionary] This dictionary has structure the same as output
of the member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
xcpdps = {}
with h5py.File(infile, 'r') as fileobj:
hdrgrp = fileobj['header']
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
xcpdps[key] = hdrgrp[key].value
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in fileobj:
smplgrp = fileobj[smplng]
xcpdps[smplng] = {}
for key in sampling_keys:
xcpdps[smplng][key] = smplgrp[key].value
for dpool in dpool_keys:
if dpool in smplgrp:
xcpdps[smplng][dpool] = {}
dpoolgrp = smplgrp[dpool]
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in dpoolgrp:
if isinstance(dpoolgrp[key], h5py.Group):
xcpdps[smplng][dpool][key] = {}
for subkey in dpoolgrp[key]:
xcpdps[smplng][dpool][key][int(subkey)] = dpoolgrp[key][subkey].value
elif isinstance(dpoolgrp[key], h5py.Dataset):
xcpdps[smplng][dpool][key] = dpoolgrp[key].value
else:
raise TypeError('Invalid h5py data type encountered')
for stat in ['mean', 'median']:
if stat in dpoolgrp:
if isinstance(dpoolgrp[stat], h5py.Dataset):
valunits = dpoolgrp[stat].attrs['units']
xcpdps[smplng][dpool][stat] = dpoolgrp[stat].value * U.Unit(valunits)
elif isinstance(dpoolgrp[stat], h5py.Group):
xcpdps[smplng][dpool][stat] = []
for diagcomb_ind in range(len(dpoolgrp[stat].keys())):
if 'diagcomb_{0}'.format(diagcomb_ind) in dpoolgrp[stat]:
valunits = dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].attrs['units']
xcpdps[smplng][dpool][stat] += [dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].value * U.Unit(valunits)]
return xcpdps
################################################################################
def incoherent_cross_power_spectrum_average(xcpdps, excpdps=None, diagoffsets=None):
"""
----------------------------------------------------------------------------
Perform incoherent averaging of cross power spectrum along specified axes
Inputs:
xcpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information coming possible from different sources, and they
will be averaged be averaged incoherently. If a single
dictionary is provided instead of a list of dictionaries, the
said averaging does not take place. Each dictionary is
essentially an output of the member function
compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It
has the following key-value structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and
'residual' each of which is a dictionary. 'whole' contains power
spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained
as a difference between 'whole' and 'submodel'. It contains the
following keys and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
excpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information of subsample differences coming possible from
different sources, and they will be averaged be averaged
incoherently. This is optional. If not set (default=None), no
incoherent averaging happens. If a single dictionary is provided
instead of a list of dictionaries, the said averaging does not
take place. Each dictionary is essentially an output of the
member function compute_power_spectrum_uncertainty() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,)
array), 'dday' ((ndaycomb,) array), 'oversampled' and
'resampled' corresponding to whether resample was set to False
or True in call to member function FT(). Values under keys
'triads_ind' and 'lst_ind' are numpy array corresponding to
triad and time indices used in selecting the data. Values under
keys 'oversampled' and 'resampled' each contain a dictionary
with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of
the frequency subbands of the subband delay spectra. It
is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform.
It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains a key named 'errinfo' which is a dictionary.
It contains information about power spectrum uncertainties
obtained from subsample differences. It contains the following
keys and values:
'mean' [numpy array] Delay power spectrum uncertainties
incoherently estimated over the axes specified in
xinfo['axes'] using the 'mean' key in input cpds or
attribute cPhaseDS['errinfo']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum uncertainties
incoherently averaged over the axes specified in incohax
using the 'median' key in input cpds or attribute
cPhaseDS['errinfo']['dspec']. It has shape that depends
on the combination of input parameters. See examples
below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets
for those axes. If 'avgcov' was set, those entries will
be removed from 'diagoffsets' since all the leading
diagonal elements have been collapsed (averaged) further.
Value under each key is a numpy array where each element
in the array corresponds to the index of that leading
diagonal. This should match the size of the output along
that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
diagoffsets [NoneType or dictionary or list of dictionaries] This info is
used for incoherent averaging along specified diagonals along
specified axes. This incoherent averaging is performed after
incoherently averaging multiple cross-power spectra (if any).
If set to None, this incoherent averaging is not performed.
Many combinations of axes and diagonals can be specified as
individual dictionaries in a list. If only one dictionary is
specified, then it assumed that only one combination of axes
and diagonals is requested. If a list of dictionaries is given,
each dictionary in the list specifies a different combination
for incoherent averaging. Each dictionary should have the
following key-value pairs. The key is the axis number (allowed
values are 1, 2, 3) that denote the axis type (1=LST, 2=Days,
3=Triads to be averaged), and the value under they keys is a
list or numpy array of diagonals to be averaged incoherently.
These axes-diagonal combinations apply to both the inputs
xcpdps and excpdps, except axis=2 does not apply to excpdps
(since it is made of subsample differences already) and will be
skipped.
Outputs:
A tuple consisting of two dictionaries. The first dictionary contains the
incoherent averaging of xcpdps as specified by the inputs, while the second
consists of incoherent of excpdps as specified by the inputs. The structure
of these dictionaries are practically the same as the dictionary inputs
xcpdps and excpdps respectively. The only differences in dictionary
structure are:
* Under key ['oversampled'/'resampled']['whole'/'submodel'/'residual'
/'effinfo']['mean'/'median'] is a list of numpy arrays, where each
array in the list corresponds to the dictionary in the list in input
diagoffsets that defines the axes-diagonal combination.
----------------------------------------------------------------------------
"""
if isinstance(xcpdps, dict):
xcpdps = [xcpdps]
if not isinstance(xcpdps, list):
raise TypeError('Invalid data type provided for input xcpdps')
if excpdps is not None:
if isinstance(excpdps, dict):
excpdps = [excpdps]
if not isinstance(excpdps, list):
raise TypeError('Invalid data type provided for input excpdps')
if len(xcpdps) != len(excpdps):
raise ValueError('Inputs xcpdps and excpdps found to have unequal number of values')
out_xcpdps = {'triads': xcpdps[0]['triads'], 'triads_ind': xcpdps[0]['triads_ind'], 'lst': xcpdps[0]['lst'], 'lst_ind': xcpdps[0]['lst_ind'], 'dlst': xcpdps[0]['dlst'], 'days': xcpdps[0]['days'], 'day_ind': xcpdps[0]['day_ind'], 'dday': xcpdps[0]['dday']}
out_excpdps = None
if excpdps is not None:
out_excpdps = {'triads': excpdps[0]['triads'], 'triads_ind': excpdps[0]['triads_ind'], 'lst': excpdps[0]['lst'], 'lst_ind': excpdps[0]['lst_ind'], 'dlst': excpdps[0]['dlst'], 'days': excpdps[0]['days'], 'day_ind': excpdps[0]['day_ind'], 'dday': excpdps[0]['dday']}
for smplng in ['oversampled', 'resampled']:
if smplng in xcpdps[0]:
out_xcpdps[smplng] = {'z': xcpdps[0][smplng]['z'], 'kprll': xcpdps[0][smplng]['kprll'], 'lags': xcpdps[0][smplng]['lags'], 'freq_center': xcpdps[0][smplng]['freq_center'], 'bw_eff': xcpdps[0][smplng]['bw_eff'], 'shape': xcpdps[0][smplng]['shape'], 'freq_wts': xcpdps[0][smplng]['freq_wts'], 'lag_corr_length': xcpdps[0][smplng]['lag_corr_length']}
if excpdps is not None:
out_excpdps[smplng] = {'z': excpdps[0][smplng]['z'], 'kprll': excpdps[0][smplng]['kprll'], 'lags': excpdps[0][smplng]['lags'], 'freq_center': excpdps[0][smplng]['freq_center'], 'bw_eff': excpdps[0][smplng]['bw_eff'], 'shape': excpdps[0][smplng]['shape'], 'freq_wts': excpdps[0][smplng]['freq_wts'], 'lag_corr_length': excpdps[0][smplng]['lag_corr_length']}
for dpool in ['whole', 'submodel', 'residual']:
if dpool in xcpdps[0][smplng]:
out_xcpdps[smplng][dpool] = {'diagoffsets': xcpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': xcpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in xcpdps[0][smplng][dpool]:
out_xcpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(xcpdps)):
arr += [xcpdps[i][smplng][dpool][stat].si.value]
arr_units = xcpdps[i][smplng][dpool][stat].si.unit
if isinstance(xcpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(xcpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in xcpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[xcpdps[i][smplng][dpool]['axesmap'][ax]] = xcpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * xcpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(xcpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(xcpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_xcpdps[smplng][dpool][stat] = arr
out_xcpdps[smplng][dpool]['diagweights'] = diagweights
for dpool in ['errinfo']:
if dpool in excpdps[0][smplng]:
out_excpdps[smplng][dpool] = {'diagoffsets': excpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': excpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in excpdps[0][smplng][dpool]:
out_excpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(excpdps)):
arr += [excpdps[i][smplng][dpool][stat].si.value]
arr_units = excpdps[i][smplng][dpool][stat].si.unit
if isinstance(excpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(excpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in excpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[excpdps[i][smplng][dpool]['axesmap'][ax]] = excpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * excpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(excpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(excpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_excpdps[smplng][dpool][stat] = arr
out_excpdps[smplng][dpool]['diagweights'] = diagweights
if diagoffsets is not None:
if isinstance(diagoffsets, dict):
diagoffsets = [diagoffsets]
if not isinstance(diagoffsets, list):
raise TypeError('Input diagoffsets must be a list of dictionaries')
for ind in range(len(diagoffsets)):
for ax in diagoffsets[ind]:
if not isinstance(diagoffsets[ind][ax], (list, NP.ndarray)):
raise TypeError('Values in input dictionary diagoffsets must be a list or numpy array')
diagoffsets[ind][ax] = NP.asarray(diagoffsets[ind][ax])
for smplng in ['oversampled', 'resampled']:
if smplng in out_xcpdps:
for dpool in ['whole', 'submodel', 'residual']:
if dpool in out_xcpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_xcpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_xcpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_xcpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_xcpdps[smplng][dpool]['diagweights'])
out_xcpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_xcpdps[smplng][dpool]:
arr = NP.copy(out_xcpdps[smplng][dpool][stat].si.value)
arr_units = out_xcpdps[smplng][dpool][stat].si.unit
out_xcpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_xcpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind]])
out_xcpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_xcpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_xcpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
if excpdps is not None:
for smplng in ['oversampled', 'resampled']:
if smplng in out_excpdps:
for dpool in ['errinfo']:
if dpool in out_excpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_excpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
if ax != 2:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_excpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_excpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_excpdps[smplng][dpool]['diagweights'])
out_excpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_excpdps[smplng][dpool]:
arr = NP.copy(out_excpdps[smplng][dpool][stat].si.value)
arr_units = out_excpdps[smplng][dpool][stat].si.unit
out_excpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_excpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind] if ax!=2])
out_excpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_excpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_excpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
return (out_xcpdps, out_excpdps)
################################################################################
def incoherent_kbin_averaging(xcpdps, kbins=None, num_kbins=None, kbintype='log'):
"""
----------------------------------------------------------------------------
Averages the power spectrum incoherently by binning in bins of k. Returns
the power spectrum in units of both standard power spectrum and \Delta^2
Inputs:
xcpdps [dictionary] A dictionary that contains the incoherent averaged
power spectrum along LST and/or triads axes. This dictionary is
essentially the one(s) returned as the output of the function
incoherent_cross_power_spectrum_average()
kbins [NoneType, list or numpy array] Bins in k. If set to None
(default), it will be determined automatically based on the
inputs in num_kbins, and kbintype. If num_kbins is None and
kbintype='linear', the negative and positive values of k are
folded into a one-sided power spectrum. In this case, the
bins will approximately have the same resolution as the k-values
in the input power spectrum for all the spectral windows.
num_kbins [NoneType or integer] Number of k-bins. Used only if kbins is
set to None. If kbintype is set to 'linear', the negative and
positive values of k are folded into a one-sided power spectrum.
In this case, the bins will approximately have the same
resolution as the k-values in the input power spectrum for all
the spectral windows.
kbintype [string] Specifies the type of binning, used only if kbins is
set to None. Accepted values are 'linear' and 'log' for linear
and logarithmic bins respectively.
Outputs:
Dictionary containing the power spectrum information. At the top level, it
contains keys specifying the sampling to be 'oversampled' or 'resampled'.
Under each of these keys is another dictionary containing the following
keys:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
or one key named 'errinfo' each of which is a dictionary. 'whole'
contains power spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained as a
difference between 'whole' and 'submodel'. 'errinfo' contains power
spectrum information about the subsample differences. There is also
another dictionary under key 'kbininfo' that contains information about
k-bins. These dictionaries contain the following keys and values:
'whole'/'submodel'/'residual'/'errinfo'
[dictionary] It contains the following keys and values:
'mean' [dictionary] Delay power spectrum information under the
'mean' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'median'
[dictionary] Delay power spectrum information under the
'median' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'kbininfo'
[dictionary] Contains the k-bin information. It contains the
following key-value pairs:
'counts'
[list] List of numpy arrays where each numpy array in the stores
the counts in the determined k-bins. Each numpy array in the
list corresponds to a spectral window (redshift subband). The
shape of each numpy array is (nkbins,)
'kbin_edges'
[list] List of numpy arrays where each numpy array contains the
k-bin edges. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nkbins+1,).
'kbinnum'
[list] List of numpy arrays containing the bin number under
which the k value falls. Each array in the list corresponds to
a spectral window (redshift subband). The shape of each array
is (nlags,).
'ri'
[list] List of numpy arrays containing the reverse indices for
each k-bin. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nlags+nkbins+1,).
'whole'/'submodel'/'residual' or 'errinfo' [dictionary] k-bin info
estimated for the different datapools under different stats
and PS definitions. It has the keys 'mean' and 'median' for the
mean and median statistic respectively. Each of them contain a
dictionary with the following key-value pairs:
'PS' [list] List of numpy arrays where each numpy array
contains a standard power spectrum typically in units of
'K2 Mpc3'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
'Del2' [list] List of numpy arrays where each numpy array
contains a Delta^2 power spectrum typically in units of
'K2'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
if kbins is not None:
if not isinstance(kbins, (list,NP.ndarray)):
raise TypeError('Input kbins must be a list or numpy array')
else:
if not isinstance(kbintype, str):
raise TypeError('Input kbintype must be a string')
if kbintype.lower() not in ['linear', 'log']:
raise ValueError('Input kbintype must be set to "linear" or "log"')
if kbintype.lower() == 'log':
if num_kbins is None:
num_kbins = 10
psinfo = {}
keys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in keys:
psinfo[key] = xcpdps[key]
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
psinfo[smplng] = {}
for key in sampling_keys:
psinfo[smplng][key] = xcpdps[smplng][key]
kprll = xcpdps[smplng]['kprll']
lags = xcpdps[smplng]['lags']
eps = 1e-10
if kbins is None:
dkprll = NP.max(NP.mean(NP.diff(kprll, axis=-1), axis=-1))
if kbintype.lower() == 'linear':
bins_kprll = NP.linspace(eps, NP.abs(kprll).max()+eps, num=kprll.shape[1]/2+1, endpoint=True)
else:
bins_kprll = NP.geomspace(eps, NP.abs(kprll).max()+eps, num=num_kbins+1, endpoint=True)
bins_kprll = NP.insert(bins_kprll, 0, -eps)
else:
bins_kprll = NP.asarray(kbins)
num_kbins = bins_kprll.size - 1
psinfo[smplng]['kbininfo'] = {'counts': [], 'kbin_edges': [], 'kbinnum': [], 'ri': []}
for spw in range(kprll.shape[0]):
counts, kbin_edges, kbinnum, ri = OPS.binned_statistic(NP.abs(kprll[spw,:]), statistic='count', bins=bins_kprll)
counts = counts.astype(NP.int)
psinfo[smplng]['kbininfo']['counts'] += [NP.copy(counts)]
psinfo[smplng]['kbininfo']['kbin_edges'] += [kbin_edges / U.Mpc]
psinfo[smplng]['kbininfo']['kbinnum'] += [NP.copy(kbinnum)]
psinfo[smplng]['kbininfo']['ri'] += [NP.copy(ri)]
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
psinfo[smplng][dpool] = {}
psinfo[smplng]['kbininfo'][dpool] = {}
keys = ['diagoffsets', 'diagweights', 'axesmap']
for key in keys:
psinfo[smplng][dpool][key] = xcpdps[smplng][dpool][key]
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
psinfo[smplng][dpool][stat] = {'PS': [], 'Del2': []}
psinfo[smplng]['kbininfo'][dpool][stat] = []
for combi in range(len(xcpdps[smplng][dpool][stat])):
outshape = NP.asarray(xcpdps[smplng][dpool][stat][combi].shape)
outshape[-1] = num_kbins
tmp_dps = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit)
tmp_Del2 = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit / U.Mpc**3)
tmp_kprll = NP.full(tuple(outshape), NP.nan, dtype=NP.float) / U.Mpc
for spw in range(kprll.shape[0]):
counts = NP.copy(psinfo[smplng]['kbininfo']['counts'][spw])
ri = NP.copy(psinfo[smplng]['kbininfo']['ri'][spw])
print('Processing datapool={0}, stat={1}, LST-Day-Triad combination={2:0d}, spw={3:0d}...'.format(dpool, stat, combi, spw))
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} k-bins '.format(num_kbins), PGB.ETA()], maxval=num_kbins).start()
for binnum in range(num_kbins):
if counts[binnum] > 0:
ind_kbin = ri[ri[binnum]:ri[binnum+1]]
tmp_dps[spw,...,binnum] = NP.nanmean(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1)
k_shape = NP.ones(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1).ndim, dtype=NP.int)
k_shape[-1] = -1
tmp_Del2[spw,...,binnum] = NP.nanmean(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc)**3 * NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) / (2*NP.pi**2)
tmp_kprll[spw,...,binnum] = NP.nansum(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc) * NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) / NP.nansum(NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1)
progress.update(binnum+1)
progress.finish()
psinfo[smplng][dpool][stat]['PS'] += [copy.deepcopy(tmp_dps)]
psinfo[smplng][dpool][stat]['Del2'] += [copy.deepcopy(tmp_Del2)]
psinfo[smplng]['kbininfo'][dpool][stat] += [copy.deepcopy(tmp_kprll)]
return psinfo
################################################################################
class ClosurePhase(object):
"""
----------------------------------------------------------------------------
Class to hold and operate on Closure Phase information.
It has the following attributes and member functions.
Attributes:
extfile [string] Full path to external file containing information
of ClosurePhase instance. The file is in HDF5 format
cpinfo [dictionary] Contains the following top level keys,
namely, 'raw', 'processed', and 'errinfo'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan).
Under the 'processed' key are more subkeys, namely,
'native', 'prelim', and optionally 'submodel' and 'residual'
each holding a dictionary.
Under 'native' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)), and 'wts' (masked
array: (nlst,ndays,ntriads,nchan)).
Under 'prelim' dictionary, the subsubkeys for further
dictionaries are 'tbins' (numpy array of tbin centers
after smoothing), 'dtbins' (numpy array of tbin
intervals), 'wts' (masked array:
(ntbins,ndays,ntriads,nchan)), 'eicp' and 'cphase'.
The dictionaries under 'eicp' are indexed by keys
'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
'median' (masked array: (ntbins,ndays,ntriads,nchan)),
'rms' (masked array: (ntbins,ndays,ntriads,nchan)), and
'mad' (masked array: (ntbins,ndays,ntriads,nchan)). The
last one denotes Median Absolute Deviation.
Under 'submodel' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), and 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)).
Under 'residual' dictionary, the subsubkeys for further
dictionaries are 'cphase' and 'eicp'. These are
dictionaries too. The dictionaries under 'eicp' are
indexed by keys 'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
and 'median' (masked array:
(ntbins,ndays,ntriads,nchan)).
Under key 'errinfo', it contains the following keys and
values:
'list_of_pair_of_pairs'
List of pair of pairs for which differences of
complex exponentials have been computed, where the
elements are bins of days. The number of elements
in the list is ncomb. And each element is a smaller
(4-element) list of pair of pairs
'eicp_diff'
Difference of complex exponentials between pairs
of day bins. This will be used in evaluating noise
properties in power spectrum. It is a dictionary
with two keys '0' and '1' where each contains the
difference from a pair of subsamples. Each of these
keys contains a numpy array of shape
(nlstbins,ncomb,2,ntriads,nchan)
'wts' Weights in difference of complex exponentials
obtained by sum of squares of weights that are
associated with the pair that was used in the
differencing. It is a dictionary with two keys '0'
and '1' where each contains the weights associated
It is of shape (nlstbins,ncomb,2,ntriads,nchan)
Member functions:
__init__() Initialize an instance of class ClosurePhase
expicp() Compute and return complex exponential of the closure phase
as a masked array
smooth_in_tbins()
Smooth the complex exponentials of closure phases in LST
bins. Both mean and median smoothing is produced.
subtract() Subtract complex exponential of the bispectrum phase
from the current instance and updates the cpinfo attribute
subsample_differencing()
Create subsamples and differences between subsamples to
evaluate noise properties from the data set.
save() Save contents of attribute cpinfo in external HDF5 file
----------------------------------------------------------------------------
"""
def __init__(self, infile, freqs, infmt='npz'):
"""
------------------------------------------------------------------------
Initialize an instance of class ClosurePhase
Inputs:
infile [string] Input file including full path. It could be a NPZ
with raw data, or a HDF5 file that could contain raw or
processed data. The input file format is specified in the
input infmt. If it is a NPZ file, it must contain the
following keys/files:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA
units which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is
(nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard
deviation of closure phases across days. Shape
is (nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard
deviation of closure phases across triads.
Shape is (nlst,ndays,nchan)
freqs [numpy array] Frequencies (in Hz) in the input. Size is
nchan.
infmt [string] Input file format. Accepted values are 'npz'
(default) and 'hdf5'.
------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
if not isinstance(freqs, NP.ndarray):
raise TypeError('Input freqs must be a numpy array')
freqs = freqs.ravel()
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input infmt must be "npz" or "hdf5"')
if infmt.lower() == 'npz':
infilesplit = infile.split('.npz')
infile_noext = infilesplit[0]
self.cpinfo = loadnpz(infile)
# npz2hdf5(infile, infile_noext+'.hdf5')
self.extfile = infile_noext + '.hdf5'
else:
# if not isinstance(infile, h5py.File):
# raise TypeError('Input infile is not a valid HDF5 file')
self.extfile = infile
self.cpinfo = NMO.load_dict_from_hdf5(self.extfile)
if freqs.size != self.cpinfo['raw']['cphase'].shape[-1]:
raise ValueError('Input frequencies do not match with dimensions of the closure phase data')
self.f = freqs
self.df = freqs[1] - freqs[0]
force_expicp = False
if 'processed' not in self.cpinfo:
force_expicp = True
else:
if 'native' not in self.cpinfo['processed']:
force_expicp = True
self.expicp(force_action=force_expicp)
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['errinfo'] = {}
############################################################################
def expicp(self, force_action=False):
"""
------------------------------------------------------------------------
Compute the complex exponential of the closure phase as a masked array
Inputs:
force_action [boolean] If set to False (default), the complex
exponential is computed only if it has not been done so
already. Otherwise the computation is forced.
------------------------------------------------------------------------
"""
if 'processed' not in self.cpinfo:
self.cpinfo['processed'] = {}
force_action = True
if 'native' not in self.cpinfo['processed']:
self.cpinfo['processed']['native'] = {}
force_action = True
if 'cphase' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['cphase'] = MA.array(self.cpinfo['raw']['cphase'].astype(NP.float64), mask=self.cpinfo['raw']['flags'])
force_action = True
if not force_action:
if 'eicp' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
else:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
############################################################################
def smooth_in_tbins(self, daybinsize=None, ndaybins=None, lstbinsize=None):
"""
------------------------------------------------------------------------
Smooth the complex exponentials of closure phases in time bins. Both
mean and median smoothing is produced.
Inputs:
daybinsize [Nonetype or scalar] Day bin size (in days) over which mean
and median are estimated across different days for a fixed
LST bin. If set to None, it will look for value in input
ndaybins. If both are None, no smoothing is performed. Only
one of daybinsize or ndaybins must be set to non-None value.
ndaybins [NoneType or integer] Number of bins along day axis. Only
if daybinsize is set to None. It produces bins that roughly
consist of equal number of days in each bin regardless of
how much the days in each bin are separated from each other.
If both are None, no smoothing is performed. Only one of
daybinsize or ndaybins must be set to non-None value.
lstbinsize [NoneType or scalar] LST bin size (in seconds) over which
mean and median are estimated across the LST. If set to
None, no smoothing is performed
------------------------------------------------------------------------
"""
if (ndaybins is not None) and (daybinsize is not None):
raise ValueError('Only one of daybinsize or ndaybins should be set')
if (daybinsize is not None) or (ndaybins is not None):
if daybinsize is not None:
if not isinstance(daybinsize, (int,float)):
raise TypeError('Input daybinsize must be a scalar')
dres = NP.diff(self.cpinfo['raw']['days']).min() # in days
dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days
if daybinsize > dres:
daybinsize = NP.clip(daybinsize, dres, dextent)
eps = 1e-10
daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize)
ndaybins = daybins.size
daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps]))
if ndaybins > 1:
daybinintervals = daybins[1:] - daybins[:-1]
daybincenters = daybins[:-1] + 0.5 * daybinintervals
else:
daybinintervals = NP.asarray(daybinsize).reshape(-1)
daybincenters = daybins[0] + 0.5 * daybinintervals
counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins)
counts = counts.astype(NP.int)
# if 'prelim' not in self.cpinfo['processed']:
# self.cpinfo['processed']['prelim'] = {}
# self.cpinfo['processed']['prelim']['eicp'] = {}
# self.cpinfo['processed']['prelim']['cphase'] = {}
# self.cpinfo['processed']['prelim']['daybins'] = daybincenters
# self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals
wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
for binnum in xrange(counts.size):
ind_daybin = ri[ri[binnum]:ri[binnum+1]]
wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1)
eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1)))
eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1)))
cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data
cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data
# mask = wts_daybins <= 0.0
# self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask)
# self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask)
# self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask)
else:
if not isinstance(ndaybins, int):
raise TypeError('Input ndaybins must be an integer')
if ndaybins <= 0:
raise ValueError('Input ndaybins must be positive')
days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins)
daybincenters = NP.asarray([NP.mean(days) for days in days_split])
daybinintervals = NP.asarray([days.max()-days.min() for days in days_split])
counts = NP.asarray([days.size for days in days_split])
wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1)
# mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1)
wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan
wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan
mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1)
eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1)
eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean))
eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan
eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian))
eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1)
cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['processed']['prelim']['eicp'] = {}
self.cpinfo['processed']['prelim']['cphase'] = {}
self.cpinfo['processed']['prelim']['daybins'] = daybincenters
self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals
mask = wts_daybins <= 0.0
self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask)
rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities
if NP.any(rawlst > 24.0):
rawlst -= 24.0
if rawlst.shape[0] > 1: # LST bin only if there are multiple LST
if lstbinsize is not None:
if not isinstance(lstbinsize, (int,float)):
raise TypeError('Input lstbinsize must be a scalar')
lstbinsize = lstbinsize / 3.6e3 # in hours
tres = NP.diff(rawlst[:,0]).min() # in hours
textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours
eps = 1e-10
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
no_change_in_lstbins = False
if lstbinsize > tres:
lstbinsize = NP.clip(lstbinsize, tres, textent)
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize)
nlstbins = lstbins.size
lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps]))
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
lstbincenters = lstbins[0] + 0.5 * lstbinintervals
self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters
self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
no_change_in_lstbins = False
else:
# Perform no binning and keep the current LST resolution, data and weights
warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
lstbinsize = tres
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
nlstbins = lstbins.size - 1
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
self.cpinfo['processed']['prelim']['lstbins'] = lstbins[:-1]
# Ensure that the LST bins are inside the min/max envelope to
# error-free interpolation later
self.cpinfo['processed']['prelim']['lstbins'][0] += eps
self.cpinfo['processed']['prelim']['lstbins'][-1] -= eps
no_change_in_lstbins = True
counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins)
counts = counts.astype(NP.int)
if 'wts' not in self.cpinfo['processed']['prelim']:
outshape = (counts.size, self.cpinfo['processed']['native']['eicp'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
else:
outshape = (counts.size, self.cpinfo['processed']['prelim']['wts'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
wts_lstbins = NP.zeros(outshape)
eicp_tmean = NP.zeros(outshape, dtype=NP.complex128)
eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128)
cp_trms = NP.zeros(outshape)
cp_tmad = NP.zeros(outshape)
for binnum in xrange(counts.size):
if no_change_in_lstbins:
ind_lstbin = [binnum]
else:
ind_lstbin = ri[ri[binnum]:ri[binnum+1]]
if 'wts' not in self.cpinfo['processed']['prelim']:
indict = self.cpinfo['processed']['native']
else:
indict = self.cpinfo['processed']['prelim']
wts_lstbins[binnum,:,:,:] = NP.sum(indict['wts'][ind_lstbin,:,:,:].data, axis=0)
if 'wts' not in self.cpinfo['processed']['prelim']:
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(indict['eicp'][ind_lstbin,:,:,:], axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(indict['eicp'][ind_lstbin,:,:,:].real, axis=0) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][ind_lstbin,:,:,:].imag, axis=0)))
cp_trms[binnum,:,:,:] = MA.std(indict['cphase'][ind_lstbin,:,:,:], axis=0).data
cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data
else:
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*indict['cphase']['mean'][ind_lstbin,:,:,:]), axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0)))
cp_trms[binnum,:,:,:] = MA.std(indict['cphase']['mean'][ind_lstbin,:,:,:], axis=0).data
cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase']['median'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data
mask = wts_lstbins <= 0.0
self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_lstbins, mask=mask)
if 'eicp' not in self.cpinfo['processed']['prelim']:
self.cpinfo['processed']['prelim']['eicp'] = {}
if 'cphase' not in self.cpinfo['processed']['prelim']:
self.cpinfo['processed']['prelim']['cphase'] = {}
self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_tmean, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_tmedian, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_tmean), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_tmedian), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_trms, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_tmad, mask=mask)
# else:
# # Perform no binning and keep the current LST resolution, data and weights
# warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
# lstbinsize = tres
# lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
# nlstbins = lstbins.size - 1
# if nlstbins > 1:
# lstbinintervals = lstbins[1:] - lstbins[:-1]
# lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
# else:
# lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
# lstbincenters = lstbins[0] + 0.5 * lstbinintervals
# if 'prelim' not in self.cpinfo['processed']:
# self.cpinfo['processed']['prelim'] = {}
# self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters
# self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
if (rawlst.shape[0] <= 1) or (lstbinsize is None):
nlstbins = rawlst.shape[0]
lstbins = NP.mean(rawlst, axis=1)
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['processed']['prelim']['lstbins'] = lstbins
if lstbinsize is not None:
self.cpinfo['processed']['prelim']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1)
else:
self.cpinfo['processed']['prelim']['dlstbins'] = NP.zeros(1)
############################################################################
def subtract(self, cphase):
"""
------------------------------------------------------------------------
Subtract complex exponential of the bispectrum phase from the current
instance and updates the cpinfo attribute
Inputs:
cphase [masked array] Bispectrum phase array as a maked array. It
must be of same size as freqs along the axis specified in
input axis.
Action: Updates 'submodel' and 'residual' keys under attribute
cpinfo under key 'processed'
------------------------------------------------------------------------
"""
if not isinstance(cphase, NP.ndarray):
raise TypeError('Input cphase must be a numpy array')
if not isinstance(cphase, MA.MaskedArray):
cphase = MA.array(cphase, mask=NP.isnan(cphase))
if not OPS.is_broadcastable(cphase.shape, self.cpinfo['processed']['prelim']['cphase']['median'].shape):
raise ValueError('Input cphase has shape incompatible with that in instance attribute')
else:
minshape = tuple(NP.ones(self.cpinfo['processed']['prelim']['cphase']['median'].ndim - cphase.ndim, dtype=NP.int)) + cphase.shape
cphase = cphase.reshape(minshape)
# cphase = NP.broadcast_to(cphase, minshape)
eicp = NP.exp(1j*cphase)
self.cpinfo['processed']['submodel'] = {}
self.cpinfo['processed']['submodel']['cphase'] = cphase
self.cpinfo['processed']['submodel']['eicp'] = eicp
self.cpinfo['processed']['residual'] = {'eicp': {}, 'cphase': {}}
for key in ['mean', 'median']:
eicpdiff = self.cpinfo['processed']['prelim']['eicp'][key] - eicp
eicpratio = self.cpinfo['processed']['prelim']['eicp'][key] / eicp
self.cpinfo['processed']['residual']['eicp'][key] = eicpdiff
self.cpinfo['processed']['residual']['cphase'][key] = MA.array(NP.angle(eicpratio.data), mask=self.cpinfo['processed']['residual']['eicp'][key].mask)
############################################################################
def subsample_differencing(self, daybinsize=None, ndaybins=4, lstbinsize=None):
"""
------------------------------------------------------------------------
Create subsamples and differences between subsamples to evaluate noise
properties from the data set.
Inputs:
daybinsize [Nonetype or scalar] Day bin size (in days) over which mean
and median are estimated across different days for a fixed
LST bin. If set to None, it will look for value in input
ndaybins. If both are None, no smoothing is performed. Only
one of daybinsize or ndaybins must be set to non-None value.
Must yield greater than or equal to 4 bins
ndaybins [NoneType or integer] Number of bins along day axis. Only
if daybinsize is set to None. It produces bins that roughly
consist of equal number of days in each bin regardless of
how much the days in each bin are separated from each other.
If both are None, no smoothing is performed. Only one of
daybinsize or ndaybins must be set to non-None value. If set,
it must be set to greater than or equal to 4
lstbinsize [NoneType or scalar] LST bin size (in seconds) over which
mean and median are estimated across the LST. If set to
None, no smoothing is performed
------------------------------------------------------------------------
"""
if (ndaybins is not None) and (daybinsize is not None):
raise ValueError('Only one of daybinsize or ndaybins should be set')
if (daybinsize is not None) or (ndaybins is not None):
if daybinsize is not None:
if not isinstance(daybinsize, (int,float)):
raise TypeError('Input daybinsize must be a scalar')
dres = NP.diff(self.cpinfo['raw']['days']).min() # in days
dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days
if daybinsize > dres:
daybinsize = NP.clip(daybinsize, dres, dextent)
eps = 1e-10
daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize)
ndaybins = daybins.size
daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps]))
if ndaybins >= 4:
daybinintervals = daybins[1:] - daybins[:-1]
daybincenters = daybins[:-1] + 0.5 * daybinintervals
else:
raise ValueError('Could not find at least 4 bins along repeating days. Adjust binning interval.')
counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins)
counts = counts.astype(NP.int)
wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
for binnum in xrange(counts.size):
ind_daybin = ri[ri[binnum]:ri[binnum+1]]
wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1)
eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1)))
eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1)))
cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data
cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data
else:
if not isinstance(ndaybins, int):
raise TypeError('Input ndaybins must be an integer')
if ndaybins < 4:
raise ValueError('Input ndaybins must be greater than or equal to 4')
days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins)
daybincenters = NP.asarray([NP.mean(days) for days in days_split])
daybinintervals = NP.asarray([days.max()-days.min() for days in days_split])
counts = NP.asarray([days.size for days in days_split])
wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1)
# mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1)
wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan
wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan
mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1)
eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1)
eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean))
eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan
eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian))
eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1)
cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan
mask = wts_daybins <= 0.0
wts_daybins = MA.array(wts_daybins, mask=mask)
cp_dmean = MA.array(NP.angle(eicp_dmean), mask=mask)
cp_dmedian = MA.array(NP.angle(eicp_dmedian), mask=mask)
self.cpinfo['errinfo']['daybins'] = daybincenters
self.cpinfo['errinfo']['diff_dbins'] = daybinintervals
self.cpinfo['errinfo']['wts'] = {'{0}'.format(ind): None for ind in range(2)}
self.cpinfo['errinfo']['eicp_diff'] = {'{0}'.format(ind): {} for ind in range(2)}
rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities
if NP.any(rawlst > 24.0):
rawlst -= 24.0
if rawlst.shape[0] > 1: # LST bin only if there are multiple LST
if lstbinsize is not None:
if not isinstance(lstbinsize, (int,float)):
raise TypeError('Input lstbinsize must be a scalar')
lstbinsize = lstbinsize / 3.6e3 # in hours
tres = NP.diff(rawlst[:,0]).min() # in hours
textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours
eps = 1e-10
no_change_in_lstbins = False
if lstbinsize > tres:
lstbinsize = NP.clip(lstbinsize, tres, textent)
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize)
nlstbins = lstbins.size
lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps]))
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
lstbincenters = lstbins[0] + 0.5 * lstbinintervals
self.cpinfo['errinfo']['lstbins'] = lstbincenters
self.cpinfo['errinfo']['dlstbins'] = lstbinintervals
no_change_in_lstbins = False
else:
# Perform no binning and keep the current LST resolution
warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
lstbinsize = tres
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
nlstbins = lstbins.size - 1
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
self.cpinfo['errinfo']['dlstbins'] = lstbinintervals
self.cpinfo['errinfo']['lstbins'] = lstbins[:-1]
# Ensure that the LST bins are inside the min/max envelope to
# error-free interpolation later
self.cpinfo['errinfo']['lstbins'][0] += eps
self.cpinfo['errinfo']['lstbins'][-1] -= eps
no_change_in_lstbins = True
counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins)
counts = counts.astype(NP.int)
outshape = (counts.size, wts_daybins.shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
wts_lstbins = NP.zeros(outshape)
eicp_tmean = NP.zeros(outshape, dtype=NP.complex128)
eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128)
cp_trms = NP.zeros(outshape)
cp_tmad = NP.zeros(outshape)
for binnum in xrange(counts.size):
if no_change_in_lstbins:
ind_lstbin = [binnum]
else:
ind_lstbin = ri[ri[binnum]:ri[binnum+1]]
wts_lstbins[binnum,:,:,:] = NP.sum(wts_daybins[ind_lstbin,:,:,:].data, axis=0)
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*cp_dmean[ind_lstbin,:,:,:]), axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(cp_dmedian[ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(cp_dmedian[ind_lstbin,:,:,:]), axis=0)))
mask = wts_lstbins <= 0.0
wts_lstbins = MA.array(wts_lstbins, mask=mask)
eicp_tmean = MA.array(eicp_tmean, mask=mask)
eicp_tmedian = MA.array(eicp_tmedian, mask=mask)
else:
wts_lstbins = MA.copy(wts_daybins)
mask = wts_lstbins.mask
eicp_tmean = MA.array(NP.exp(1j*NP.angle(NP.exp(1j*cp_dmean))), mask=mask)
eicp_tmedian = MA.array(NP.exp(1j*NP.angle(NP.cos(cp_dmedian) + 1j * NP.sin(cp_dmedian))), mask=mask)
if (rawlst.shape[0] <= 1) or (lstbinsize is None):
nlstbins = rawlst.shape[0]
lstbins = NP.mean(rawlst, axis=1)
self.cpinfo['errinfo']['lstbins'] = lstbins
if lstbinsize is not None:
self.cpinfo['errinfo']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1)
else:
self.cpinfo['errinfo']['dlstbins'] = NP.zeros(1)
ncomb = NP.sum(NP.asarray([(ndaybins-i-1)*(ndaybins-i-2)*(ndaybins-i-3)/2 for i in range(ndaybins-3)])).astype(int)
diff_outshape = (nlstbins, ncomb, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
for diffind in range(2):
self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['mean'] = MA.empty(diff_outshape, dtype=NP.complex)
self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['median'] = MA.empty(diff_outshape, dtype=NP.complex)
self.cpinfo['errinfo']['wts']['{0}'.format(diffind)] = MA.empty(diff_outshape, dtype=NP.float)
ind = -1
self.cpinfo['errinfo']['list_of_pair_of_pairs'] = []
list_of_pair_of_pairs = []
for i in range(ndaybins-1):
for j in range(i+1,ndaybins):
for k in range(ndaybins-1):
if (k != i) and (k != j):
for m in range(k+1,ndaybins):
if (m != i) and (m != j):
pair_of_pairs = [set([i,j]), set([k,m])]
if (pair_of_pairs not in list_of_pair_of_pairs) and (pair_of_pairs[::-1] not in list_of_pair_of_pairs):
ind += 1
list_of_pair_of_pairs += [copy.deepcopy(pair_of_pairs)]
self.cpinfo['errinfo']['list_of_pair_of_pairs'] += [[i,j,k,m]]
for stat in ['mean', 'median']:
if stat == 'mean':
self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,j,:,:].data - eicp_tmean[:,i,:,:].data), mask=NP.logical_or(eicp_tmean[:,j,:,:].mask, eicp_tmean[:,i,:,:].mask))
self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,m,:,:].data - eicp_tmean[:,k,:,:].data), mask=NP.logical_or(eicp_tmean[:,m,:,:].mask, eicp_tmean[:,k,:,:].mask))
self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,j,:,:].data**2 + wts_lstbins[:,i,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,j,:,:].mask, wts_lstbins[:,i,:,:].mask))
self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,m,:,:].data**2 + wts_lstbins[:,k,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,m,:,:].mask, wts_lstbins[:,k,:,:].mask))
# self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,j,:,:] - eicp_tmean[:,i,:,:])
# self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,m,:,:] - eicp_tmean[:,k,:,:])
# self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,j,:,:]**2 + wts_lstbins[:,i,:,:]**2)
# self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,m,:,:]**2 + wts_lstbins[:,k,:,:]**2)
else:
self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,j,:,:].data - eicp_tmedian[:,i,:,:].data), mask=NP.logical_or(eicp_tmedian[:,j,:,:].mask, eicp_tmedian[:,i,:,:].mask))
self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,m,:,:].data - eicp_tmedian[:,k,:,:].data), mask=NP.logical_or(eicp_tmedian[:,m,:,:].mask, eicp_tmedian[:,k,:,:].mask))
# self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,j,:,:] - eicp_tmedian[:,i,:,:])
# self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,m,:,:] - eicp_tmedian[:,k,:,:])
mask0 = self.cpinfo['errinfo']['wts']['0'] <= 0.0
mask1 = self.cpinfo['errinfo']['wts']['1'] <= 0.0
self.cpinfo['errinfo']['eicp_diff']['0'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['0'][stat], mask=mask0)
self.cpinfo['errinfo']['eicp_diff']['1'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['1'][stat], mask=mask1)
self.cpinfo['errinfo']['wts']['0'] = MA.array(self.cpinfo['errinfo']['wts']['0'], mask=mask0)
self.cpinfo['errinfo']['wts']['1'] = MA.array(self.cpinfo['errinfo']['wts']['1'], mask=mask1)
############################################################################
def save(self, outfile=None):
"""
------------------------------------------------------------------------
Save contents of attribute cpinfo in external HDF5 file
Inputs:
outfile [NoneType or string] Output file (HDF5) to save contents to.
If set to None (default), it will be saved in the file
pointed to by the extfile attribute of class ClosurePhase
------------------------------------------------------------------------
"""
if outfile is None:
outfile = self.extfile
NMO.save_dict_to_hdf5(self.cpinfo, outfile, compressinfo={'compress_fmt': 'gzip', 'compress_opts': 9})
################################################################################
class ClosurePhaseDelaySpectrum(object):
"""
----------------------------------------------------------------------------
Class to hold and operate on Closure Phase information.
It has the following attributes and member functions.
Attributes:
cPhase [instance of class ClosurePhase] Instance of class
ClosurePhase
f [numpy array] Frequencies (in Hz) in closure phase spectra
df [float] Frequency resolution (in Hz) in closure phase
spectra
cPhaseDS [dictionary] Possibly oversampled Closure Phase Delay
Spectrum information.
cPhaseDS_resampled
[dictionary] Resampled Closure Phase Delay Spectrum
information.
Member functions:
__init__() Initialize instance of class ClosurePhaseDelaySpectrum
FT() Fourier transform of complex closure phase spectra mapping
from frequency axis to delay axis.
subset() Return triad and time indices to select a subset of
processed data
compute_power_spectrum()
Compute power spectrum of closure phase data. It is in units
of Mpc/h.
rescale_power_spectrum()
Rescale power spectrum to dimensional quantity by converting
the ratio given visibility amplitude information
average_rescaled_power_spectrum()
Average the rescaled power spectrum with physical units
along certain axes with inverse variance or regular
averaging
beam3Dvol() Compute three-dimensional volume of the antenna power
pattern along two transverse axes and one LOS axis.
----------------------------------------------------------------------------
"""
def __init__(self, cPhase):
"""
------------------------------------------------------------------------
Initialize instance of class ClosurePhaseDelaySpectrum
Inputs:
cPhase [class ClosurePhase] Instance of class ClosurePhase
------------------------------------------------------------------------
"""
if not isinstance(cPhase, ClosurePhase):
raise TypeError('Input cPhase must be an instance of class ClosurePhase')
self.cPhase = cPhase
self.f = self.cPhase.f
self.df = self.cPhase.df
self.cPhaseDS = None
self.cPhaseDS_resampled = None
############################################################################
def FT(self, bw_eff, freq_center=None, shape=None, fftpow=None, pad=None,
datapool='prelim', visscaleinfo=None, method='fft', resample=True,
apply_flags=True):
"""
------------------------------------------------------------------------
Fourier transform of complex closure phase spectra mapping from
frequency axis to delay axis.
Inputs:
bw_eff [scalar or numpy array] effective bandwidths (in Hz) on the
selected frequency windows for subband delay transform of
closure phases. If a scalar value is provided, the same
will be applied to all frequency windows
freq_center [scalar, list or numpy array] frequency centers (in Hz) of
the selected frequency windows for subband delay transform
of closure phases. The value can be a scalar, list or numpy
array. If a scalar is provided, the same will be applied to
all frequency windows. Default=None uses the center
frequency from the class attribute named channels
shape [string] frequency window shape for subband delay transform
of closure phases. Accepted values for the string are
'rect' or 'RECT' (for rectangular), 'bnw' and 'BNW' (for
Blackman-Nuttall), and 'bhw' or 'BHW' (for
Blackman-Harris). Default=None sets it to 'rect'
(rectangular window)
fftpow [scalar] the power to which the FFT of the window will be
raised. The value must be a positive scalar. Default = 1.0
pad [scalar] padding fraction relative to the number of
frequency channels for closure phases. Value must be a
non-negative scalar. For e.g., a pad of 1.0 pads the
frequency axis with zeros of the same width as the number
of channels. After the delay transform, the transformed
closure phases are downsampled by a factor of 1+pad. If a
negative value is specified, delay transform will be
performed with no padding. Default=None sets to padding
factor to 1.0
datapool [string] Specifies which data set is to be Fourier
transformed
visscaleinfo
[dictionary] Dictionary containing reference visibilities
based on which the closure phases will be scaled to units
of visibilities. It contains the following keys and values:
'vis' [numpy array or instance of class
InterferometerArray] Reference visibilities from the
baselines that form the triad. It can be an instance
of class RI.InterferometerArray or a numpy array.
If an instance of class InterferometerArray, the
baseline triplet must be set in key 'bltriplet'
and value in key 'lst' will be ignored. If the
value under this key 'vis' is set to a numpy array,
it must be of shape (nbl=3, nlst_vis, nchan). In
this case the value under key 'bltriplet' will be
ignored. The nearest LST will be looked up and
applied after smoothing along LST based on the
smoothing parameter 'smooth'
'bltriplet'
[Numpy array] Will be used in searching for matches
to these three baseline vectors if the value under
key 'vis' is set to an instance of class
InterferometerArray. However, if value under key
'vis' is a numpy array, this key 'bltriplet' will
be ignored.
'lst' [numpy array] Reference LST (in hours). It is of
shape (nlst_vis,). It will be used only if value
under key 'vis' is a numpy array, otherwise it will
be ignored and read from the instance of class
InterferometerArray passed under key 'vis'. If the
specified LST range does not cover the data LST
range, those LST will contain NaN in the delay
spectrum
'smoothinfo'
[dictionary] Dictionary specifying smoothing and/or
interpolation parameters. It has the following keys
and values:
'op_type' [string] Specifies the interpolating
operation. Must be specified (no
default). Accepted values are
'interp1d' (scipy.interpolate),
'median' (skimage.filters), 'tophat'
(astropy.convolution) and 'gaussian'
(astropy.convolution)
'interp_kind' [string (optional)] Specifies the
interpolation kind (if 'op_type' is
set to 'interp1d'). For accepted
values, see
scipy.interpolate.interp1d()
'window_size' [integer (optional)] Specifies the
size of the interpolating/smoothing
kernel. Only applies when 'op_type'
is set to 'median', 'tophat' or
'gaussian' The kernel is a tophat
function when 'op_type' is set to
'median' or 'tophat'. If refers to
FWHM when 'op_type' is set to
'gaussian'
resample [boolean] If set to True (default), resample the delay
spectrum axis to independent samples along delay axis. If
set to False, return the results as is even if they may be
be oversampled and not all samples may be independent
method [string] Specifies the Fourier transform method to be used.
Accepted values are 'fft' (default) for FFT and 'nufft' for
non-uniform FFT
apply_flags [boolean] If set to True (default), weights determined from
flags will be applied. If False, no weights from flagging
will be applied, and thus even flagged data will be included
Outputs:
A dictionary that contains the oversampled (if resample=False) or
resampled (if resample=True) delay spectrum information. It has the
following keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is roughly
equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed. It
is of size n_win. It is roughly equivalent to width
in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform. It
is of size nlags=nchan+npad if resample=True, where
npad is the number of frequency channels padded
specified under the key 'npad'. If resample=False,
nlags = number of delays after resampling only
independent delays. The lags roughly correspond to
k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_win x nlst x ndays x ntriads x nlags.
nlags=nchan+npad if resample=True, where npad is the
number of frequency channels padded specified under
the key 'npad'. If resample=False, nlags = number of
delays after resampling only independent delays.
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth. It
is of size n_win. The unit size of a pixel is
determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth of
the subband specified in bw_eff
'whole' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'prelim' key of attribute cpinfo.
Contains the following keys and values:
'dspec' [dictionary] Contains the following keys and
values:
'twts' [numpy array] Weights from time-based
flags that went into time-averaging.
Shape=(nlst,ndays,ntriads,nchan)
'mean' [numpy array] Delay spectrum of closure
phases based on their mean across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'median'
[numpy array] Delay spectrum of closure
phases based on their median across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'submodel' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'submodel' key of attribute cpinfo.
Contains the following keys and values:
'dspec' [numpy array] Delay spectrum of closure phases
Shape=(nspw,nlst,ndays,ntriads,nlags)
'residual' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'residual' key of attribute cpinfo
after subtracting 'submodel' bispectrum phase from that
of 'prelim'. It contains the following keys and values:
'dspec' [dictionary] Contains the following keys and
values:
'twts' [numpy array] Weights from time-based
flags that went into time-averaging.
Shape=(nlst,ndays,ntriads,nchan)
'mean' [numpy array] Delay spectrum of closure
phases based on their mean across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'median'
[numpy array] Delay spectrum of closure
phases based on their median across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'errinfo' [dictionary] It has two keys 'dspec0' and 'dspec1' each
of which are dictionaries with the following keys and
values:
'twts' [numpy array] Weights for the subsample
difference. It is of shape (nlst, ndays,
ntriads, nchan)
'mean' [numpy array] Delay spectrum of the
subsample difference obtained by using the
mean statistic. It is of shape (nspw, nlst,
ndays, ntriads, nlags)
'median'
[numpy array] Delay spectrum of the subsample
difference obtained by using the median
statistic. It is of shape (nspw, nlst, ndays,
ntriads, nlags)
------------------------------------------------------------------------
"""
try:
bw_eff
except NameError:
raise NameError('Effective bandwidth must be specified')
else:
if not isinstance(bw_eff, (int, float, list, NP.ndarray)):
raise TypeError('Value of effective bandwidth must be a scalar, list or numpy array')
bw_eff = NP.asarray(bw_eff).reshape(-1)
if NP.any(bw_eff <= 0.0):
raise ValueError('All values in effective bandwidth must be strictly positive')
if freq_center is None:
freq_center = NP.asarray(self.f[self.f.size/2]).reshape(-1)
elif isinstance(freq_center, (int, float, list, NP.ndarray)):
freq_center = NP.asarray(freq_center).reshape(-1)
if NP.any((freq_center <= self.f.min()) | (freq_center >= self.f.max())):
raise ValueError('Value(s) of frequency center(s) must lie strictly inside the observing band')
else:
raise TypeError('Values(s) of frequency center must be scalar, list or numpy array')
if (bw_eff.size == 1) and (freq_center.size > 1):
bw_eff = NP.repeat(bw_eff, freq_center.size)
elif (bw_eff.size > 1) and (freq_center.size == 1):
freq_center = NP.repeat(freq_center, bw_eff.size)
elif bw_eff.size != freq_center.size:
raise ValueError('Effective bandwidth(s) and frequency center(s) must have same number of elements')
if shape is not None:
if not isinstance(shape, str):
raise TypeError('Window shape must be a string')
if shape not in ['rect', 'bhw', 'bnw', 'RECT', 'BHW', 'BNW']:
raise ValueError('Invalid value for window shape specified.')
else:
shape = 'rect'
if fftpow is None:
fftpow = 1.0
else:
if not isinstance(fftpow, (int, float)):
raise TypeError('Power to raise window FFT by must be a scalar value.')
if fftpow < 0.0:
raise ValueError('Power for raising FFT of window by must be positive.')
if pad is None:
pad = 1.0
else:
if not isinstance(pad, (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad < 0.0:
pad = 0.0
if verbose:
print('\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).')
if not isinstance(datapool, str):
raise TypeError('Input datapool must be a string')
if datapool.lower() not in ['prelim']:
raise ValueError('Specified datapool not supported')
if visscaleinfo is not None:
if not isinstance(visscaleinfo, dict):
raise TypeError('Input visscaleinfo must be a dictionary')
if 'vis' not in visscaleinfo:
raise KeyError('Input visscaleinfo does not contain key "vis"')
if not isinstance(visscaleinfo['vis'], RI.InterferometerArray):
if 'lst' not in visscaleinfo:
raise KeyError('Input visscaleinfo does not contain key "lst"')
lst_vis = visscaleinfo['lst'] * 15.0
if not isinstance(visscaleinfo['vis'], (NP.ndarray,MA.MaskedArray)):
raise TypeError('Input visibilities must be a numpy or a masked array')
if not isinstance(visscaleinfo['vis'], MA.MaskedArray):
visscaleinfo['vis'] = MA.array(visscaleinfo['vis'], mask=NP.isnan(visscaleinfo['vis']))
vistriad = MA.copy(visscaleinfo['vis'])
else:
if 'bltriplet' not in visscaleinfo:
raise KeyError('Input dictionary visscaleinfo does not contain key "bltriplet"')
blind, blrefind, dbl = LKP.find_1NN(visscaleinfo['vis'].baselines, visscaleinfo['bltriplet'], distance_ULIM=0.2, remove_oob=True)
if blrefind.size != 3:
blind_missing = NP.setdiff1d(NP.arange(3), blind, assume_unique=True)
blind_next, blrefind_next, dbl_next = LKP.find_1NN(visscaleinfo['vis'].baselines, -1*visscaleinfo['bltriplet'][blind_missing,:], distance_ULIM=0.2, remove_oob=True)
if blind_next.size + blind.size != 3:
raise ValueError('Exactly three baselines were not found in the reference baselines')
else:
blind = NP.append(blind, blind_missing[blind_next])
blrefind = NP.append(blrefind, blrefind_next)
else:
blind_missing = []
vistriad = NP.transpose(visscaleinfo['vis'].skyvis_freq[blrefind,:,:], (0,2,1))
if len(blind_missing) > 0:
vistriad[-blrefind_next.size:,:,:] = vistriad[-blrefind_next.size:,:,:].conj()
vistriad = MA.array(vistriad, mask=NP.isnan(vistriad))
lst_vis = visscaleinfo['vis'].lst
viswts = MA.array(NP.ones_like(vistriad.data), mask=vistriad.mask, dtype=NP.float)
lst_out = self.cPhase.cpinfo['processed']['prelim']['lstbins'] * 15.0
if lst_vis.size == 1: # Apply the visibility scaling from one reference LST to all LST
vis_ref = vistriad * NP.ones(lst_out.size).reshape(1,-1,1)
wts_ref = viswts * NP.ones(lst_out.size).reshape(1,-1,1)
else:
vis_ref, wts_ref = OPS.interpolate_masked_array_1D(vistriad, viswts, 1, visscaleinfo['smoothinfo'], inploc=lst_vis, outloc=lst_out)
if not isinstance(method, str):
raise TypeError('Input method must be a string')
if method.lower() not in ['fft', 'nufft']:
raise ValueError('Specified FFT method not supported')
if not isinstance(apply_flags, bool):
raise TypeError('Input apply_flags must be boolean')
flagwts = 1.0
visscale = 1.0
if datapool.lower() == 'prelim':
if method.lower() == 'fft':
freq_wts = NP.empty((bw_eff.size, self.f.size), dtype=NP.float_) # nspw x nchan
frac_width = DSP.window_N2width(n_window=None, shape=shape, fftpow=fftpow, area_normalize=False, power_normalize=True)
window_loss_factor = 1 / frac_width
n_window = NP.round(window_loss_factor * bw_eff / self.df).astype(NP.int)
ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.f.reshape(-1,1), freq_center.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True)
sortind = NP.argsort(ind_channels)
ind_freq_center = ind_freq_center[sortind]
ind_channels = ind_channels[sortind]
dfrequency = dfrequency[sortind]
n_window = n_window[sortind]
for i,ind_chan in enumerate(ind_channels):
window = NP.sqrt(frac_width * n_window[i]) * DSP.window_fftpow(n_window[i], shape=shape, fftpow=fftpow, centering=True, peak=None, area_normalize=False, power_normalize=True)
window_chans = self.f[ind_chan] + self.df * (NP.arange(n_window[i]) - int(n_window[i]/2))
ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.f.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True)
sind = NP.argsort(ind_window_chans)
ind_window_chans = ind_window_chans[sind]
ind_chans = ind_chans[sind]
dfreq = dfreq[sind]
window = window[ind_window_chans]
window = NP.pad(window, ((ind_chans.min(), self.f.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0)))
freq_wts[i,:] = window
npad = int(self.f.size * pad)
lags = DSP.spectral_axis(self.f.size + npad, delx=self.df, use_real=False, shift=True)
result = {'freq_center': freq_center, 'shape': shape, 'freq_wts': freq_wts, 'bw_eff': bw_eff, 'fftpow': fftpow, 'npad': npad, 'lags': lags, 'lag_corr_length': self.f.size / NP.sum(freq_wts, axis=-1), 'whole': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'residual': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'errinfo': {'dspec0': {'twts': self.cPhase.cpinfo['errinfo']['wts']['0']}, 'dspec1': {'twts': self.cPhase.cpinfo['errinfo']['wts']['1']}}, 'submodel': {}}
if visscaleinfo is not None:
visscale = NP.nansum(NP.transpose(vis_ref[NP.newaxis,NP.newaxis,:,:,:], axes=(0,3,1,2,4)) * freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) / NP.nansum(freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) # nspw x nlst x (ndays=1) x (nbl=3) x (nchan=1)
visscale = NP.sqrt(1.0/NP.nansum(1/NP.abs(visscale)**2, axis=-2, keepdims=True)) # nspw x nlst x (ndays=1) x (ntriads=1) x (nchan=1)
for dpool in ['errinfo', 'prelim', 'submodel', 'residual']:
if dpool.lower() == 'errinfo':
for diffind in range(2):
if apply_flags:
flagwts = NP.copy(self.cPhase.cpinfo['errinfo']['wts']['{0}'.format(diffind)].data)
flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan
for stat in self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)]:
eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].data) # Minimum shape as stored
# eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].filled(0.0)) # Minimum shape as stored
eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].shape) # Broadcast to final shape
eicp = eicp[NP.newaxis,...] # nlst x ndayscomb x ntriads x nchan --> (nspw=1) x nlst x ndayscomb x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
result[dpool]['dspec{0}'.format(diffind)][stat] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
if dpool in self.cPhase.cpinfo['processed']:
if apply_flags:
flagwts = NP.copy(self.cPhase.cpinfo['processed'][datapool]['wts'].data)
flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan
if dpool == 'submodel':
eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].data) # Minimum shape as stored
# eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].filled(1.0)) # Minimum shape as stored
eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo['processed'][datapool]['eicp']['mean'].shape) # Broadcast to final shape
eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
result[dpool]['dspec'] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
for key in self.cPhase.cpinfo['processed'][dpool]['eicp']:
eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].data)
# eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].filled(1.0))
eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
if dpool == 'prelim':
result['whole']['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
result[dpool]['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
result['lag_kernel'] = DSP.FT1D(NP.pad(flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
self.cPhaseDS = result
if resample:
result_resampled = copy.deepcopy(result)
downsample_factor = NP.min((self.f.size + npad) * self.df / bw_eff)
result_resampled['lags'] = DSP.downsampler(result_resampled['lags'], downsample_factor, axis=-1, method='interp', kind='linear')
result_resampled['lag_kernel'] = DSP.downsampler(result_resampled['lag_kernel'], downsample_factor, axis=-1, method='interp', kind='linear')
for dpool in ['errinfo', 'prelim', 'submodel', 'residual']:
if dpool.lower() == 'errinfo':
for diffind in self.cPhase.cpinfo[dpool]['eicp_diff']:
for key in self.cPhase.cpinfo[dpool]['eicp_diff'][diffind]:
result_resampled[dpool]['dspec'+diffind][key] = DSP.downsampler(result_resampled[dpool]['dspec'+diffind][key], downsample_factor, axis=-1, method='FFT')
if dpool in self.cPhase.cpinfo['processed']:
if dpool == 'submodel':
result_resampled[dpool]['dspec'] = DSP.downsampler(result_resampled[dpool]['dspec'], downsample_factor, axis=-1, method='FFT')
else:
for key in self.cPhase.cpinfo['processed'][datapool]['eicp']:
if dpool == 'prelim':
result_resampled['whole']['dspec'][key] = DSP.downsampler(result_resampled['whole']['dspec'][key], downsample_factor, axis=-1, method='FFT')
else:
result_resampled[dpool]['dspec'][key] = DSP.downsampler(result_resampled[dpool]['dspec'][key], downsample_factor, axis=-1, method='FFT')
self.cPhaseDS_resampled = result_resampled
return result_resampled
else:
return result
############################################################################
def subset(self, selection=None):
"""
------------------------------------------------------------------------
Return triad and time indices to select a subset of processed data
Inputs:
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
Outputs:
Tuple (triad_ind, lst_ind, day_ind, day_ind_eicpdiff) containing the
triad, LST, day, and day-pair (for subsample differences) indices,
each as a numpy array
------------------------------------------------------------------------
"""
if selection is None:
selsection = {}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
triads = map(tuple, self.cPhase.cpinfo['raw']['triads'])
if 'triads' not in selection:
selection['triads'] = triads
if selection['triads'] is None:
selection['triads'] = triads
triad_ind = [triads.index(triad) for triad in selection['triads']]
triad_ind = NP.asarray(triad_ind)
lst_ind = None
if 'lst' not in selection:
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])
else:
if selection['lst'] is None:
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])
elif isinstance(selection['lst'], (list,NP.ndarray)):
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = selection['lst']
if NP.any(NP.logical_or(lst_ind < 0, lst_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])):
raise ValueError('Input processed lst indices out of bounds')
else:
raise TypeError('Wrong type for processed lst indices')
if lst_ind is None:
raise ValueError('LST index selection could not be performed')
day_ind = None
day_ind_eicpdiff = None
if 'days' not in selection:
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']))
else:
if selection['days'] is None:
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']))
elif isinstance(selection['days'], (list,NP.ndarray)):
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = selection['days']
if NP.any(NP.logical_or(day_ind < 0, day_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])):
raise ValueError('Input processed day indices out of bounds')
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = [i for i,item in enumerate(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']) if len(set(item)-set(selection['days']))==0]
else:
raise TypeError('Wrong type for processed day indices')
if day_ind is None:
raise ValueError('Day index selection could not be performed')
return (triad_ind, lst_ind, day_ind, day_ind_eicpdiff)
############################################################################
def compute_power_spectrum(self, cpds=None, selection=None, autoinfo=None,
xinfo=None, cosmo=cosmo100, units='K', beamparms=None):
"""
------------------------------------------------------------------------
Compute power spectrum of closure phase data. It is in units of Mpc/h
Inputs:
cpds [dictionary] A dictionary that contains the 'oversampled' (if
resample=False) and/or 'resampled' (if resample=True) delay
spectrum information. If it is not specified the attributes
cPhaseDS['processed'] and cPhaseDS_resampled['processed'] are
used. Under each of these keys, it holds a dictionary that has
the following keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is
roughly equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband
delay transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed.
It is of size n_win. It is roughly equivalent to
width in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window
was raised. The value is be a positive scalar
with default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform.
It is of size nlags. The lags roughly correspond
to k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_bl x n_win x nlags x n_t.
'lag_corr_length'
[numpy array] It is the correlation timescale
(in pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth.
It is of size n_win. The unit size of a pixel is
determined by the difference between adjacent
pixels in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth
of the subband specified in bw_eff
'processed' [dictionary] Contains the following keys and
values:
'dspec' [dictionary] Contains the following keys
and values:
'twts' [numpy array] Weights from
time-based flags that went into
time-averaging.
Shape=(ntriads,npol,nchan,nt)
'mean' [numpy array] Delay spectrum of
closure phases based on their
mean across time intervals.
Shape=(nspw,npol,nt,ntriads,nlags)
'median'
[numpy array] Delay spectrum of
closure phases based on their
median across time intervals.
Shape=(nspw,npol,nt,ntriads,nlags)
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
autoinfo
[NoneType or dictionary] Specifies parameters for processing
before power spectrum in auto or cross modes. If set to None,
a dictionary will be created with the default values as
described below. The dictionary must have the following keys
and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes that will
be averaged coherently before squaring (for auto) or
cross-multiplying (for cross) power spectrum. If set
to None (default), no axes are averaged coherently.
If set to int, list, tuple or numpy array, those axes
will be averaged coherently after applying the weights
specified under key 'wts' along those axes. 1=lst,
2=days, 3=triads.
'wts' [NoneType/list/numpy array] If not provided (equivalent
to setting it to None) or set to None (default), it is
set to a one element list which is a one element numpy
array of unity. Otherwise, it must be a list of same
number of elements as in key 'axes' and each of these
must be a numpy broadcast compatible array corresponding
to each of the axis specified in 'axes'
xinfo [NoneType or dictionary] Specifies parameters for processing
cross power spectrum. If set to None, a dictionary will be
created with the default values as described below. The
dictionary must have the following keys and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes over which
power spectrum will be computed incoherently by cross-
multiplication. If set to None (default), no cross-
power spectrum is computed. If set to int, list, tuple
or numpy array, cross-power over those axes will be
computed incoherently by cross-multiplication. The
cross-spectrum over these axes will be computed after
applying the pre- and post- cross-multiplication
weights specified in key 'wts'. 1=lst, 2=days,
3=triads.
'collapse_axes'
[list] The axes that will be collpased after the
cross-power matrix is produced by cross-multiplication.
If this key is not set, it will be initialized to an
empty list (default), in which case none of the axes
is collapsed and the full cross-power matrix will be
output. it must be a subset of values under key 'axes'.
This will reduce it from a square matrix along that axis
to collapsed values along each of the leading diagonals.
1=lst, 2=days, 3=triads.
'dlst' [scalar] LST interval (in mins) or difference between LST
pairs which will be determined and used for
cross-power spectrum. Will only apply if values under
'axes' contains the LST axis(=1).
'dlst_range'
[scalar, numpy array, or NoneType] Specifies the LST
difference(s) in minutes that are to be used in the
computation of cross-power spectra. If a scalar, only
the diagonal consisting of pairs with that LST
difference will be computed. If a numpy array, those
diagonals consisting of pairs with that LST difference
will be computed. If set to None (default), the main
diagonal (LST difference of 0) and the first off-main
diagonal (LST difference of 1 unit) corresponding to
pairs with 0 and 1 unit LST difference are computed.
Applies only if key 'axes' contains LST axis (=1).
'avgcov'
[boolean] It specifies if the collapse of square
covariance matrix is to be collapsed further to a single
number after applying 'postX' weights. If not set or
set to False (default), this late stage collapse will
not be performed. Otherwise, it will be averaged in a
weighted average sense where the 'postX' weights would
have already been applied during the collapsing
operation
'wts' [NoneType or Dictionary] If not set, a default
dictionary (see default values below) will be created.
It must have the follwoing keys and values:
'preX' [list of numpy arrays] It contains pre-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of weights. If
set to True, the delay spectrum will be
normalized by the sum of the weights.
'postX' [list of numpy arrays] It contains post-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of 'preX' weights.
If set to True, the delay spectrum will be
normalized by the sum of the weights.
'postXnorm'
[boolean] If False (default), no normalization
is done after the application of postX weights.
If set to True, the delay cross power spectrum
will be normalized by the sum of the weights.
cosmo [instance of cosmology class from astropy] An instance of class
FLRW or default_cosmology of astropy cosmology module. Default
uses Planck 2015 cosmology, with H0=100 h km/s/Mpc
units [string] Specifies the units of output power spectum. Accepted
values are 'Jy' and 'K' (default)) and the power spectrum will
be in corresponding squared units.
Output:
Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array),
'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday'
((ndays,) array), 'oversampled' and 'resampled' corresponding to whether
resample was set to False or True in call to member function FT().
Values under keys 'triads_ind' and 'lst_ind' are numpy array
corresponding to triad and time indices used in selecting the data.
Values under keys 'oversampled' and 'resampled' each contain a
dictionary with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to
'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
each of which is a dictionary. 'whole' contains power spectrum info
about the input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure phase) from
the 'whole' model. 'residual' contains power spectrum info about the
closure phases obtained as a difference between 'whole' and 'submodel'.
It contains the following keys and values:
'mean' [numpy array] Delay power spectrum incoherently estiamted over
the axes specified in xinfo['axes'] using the 'mean' key in input
cpds or attribute cPhaseDS['processed']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance matrices. If
collapse_axes is provided but avgcov is False, those axes will be
of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged over
the axes specified in incohax using the 'median' key in input
cpds or attribute cPhaseDS['processed']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance matrices. If
collapse_axes is provided bu avgcov is False, those axes will be
of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets for
those axes. If 'avgcov' was set, those entries will be removed
from 'diagoffsets' since all the leading diagonal elements have
been collapsed (averaged) further. Value under each key is a
numpy array where each element in the array corresponds to the
index of that leading diagonal. This should match the size of
the output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in collapse_axes and
the value is a numpy array of weights corresponding to the
diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated but is
not collapsed, the number of dimensions in the output will have
changed. This parameter tracks where the original axis is now
placed. The keys are the original axes that are involved in
incoherent cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the power
spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the power
spectrum
Examples:
(1)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False}}
Output delay power spectrum has shape (Nspw, Nlst, 1, Ntriads, Nlags)
(2)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False},
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, 1, Ntriads, Ntriads, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)},
axesmap = {1: [1,2], 3: [4,5]}
(3)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3],
'dlst_range': [0.0, 1.0, 2.0]}
Output delay power spectrum has shape
(Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1,2], 3: [4]}
(4)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3],
'dlst_range': [1.0, 2.0, 3.0, 4.0]}
Output delay power spectrum has shape
(Nspw, 4, Ndays, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1], 3: [3]}
(5)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3],
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndays, 1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]}
(6)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []}
Output delay power spectrum has shape
(Nspw, 1, Ndays, 1, Nlags)
diagoffsets = {}, axesmap = {1: [1], 3: [3]}
------------------------------------------------------------------------
"""
if not isinstance(units,str):
raise TypeError('Input parameter units must be a string')
if units.lower() == 'k':
if not isinstance(beamparms, dict):
raise TypeError('Input beamparms must be a dictionary')
if 'freqs' not in beamparms:
beamparms['freqs'] = self.f
beamparms_orig = copy.deepcopy(beamparms)
if autoinfo is None:
autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]}
elif not isinstance(autoinfo, dict):
raise TypeError('Input autoinfo must be a dictionary')
if 'axes' not in autoinfo:
autoinfo['axes'] = None
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array')
else:
autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1)
if 'wts' not in autoinfo:
if autoinfo['axes'] is not None:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes'])
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['wts'], list):
raise TypeError('wts in input autoinfo must be a list of numpy arrays')
else:
if len(autoinfo['wts']) != len(autoinfo['axes']):
raise ValueError('Input list of wts must be same as length of autoinfo axes')
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
if xinfo is None:
xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}}
elif not isinstance(xinfo, dict):
raise TypeError('Input xinfo must be a dictionary')
if 'axes' not in xinfo:
xinfo['axes'] = None
else:
if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array')
else:
xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1)
if 'wts' not in xinfo:
xinfo['wts'] = {}
for xkey in ['preX', 'postX']:
if xinfo['axes'] is not None:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes'])
else:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
xinfo['wts']['preXnorm'] = False
xinfo['wts']['postXnorm'] = False
else:
if xinfo['axes'] is not None:
if not isinstance(xinfo['wts'], dict):
raise TypeError('wts in input xinfo must be a dictionary')
for xkey in ['preX', 'postX']:
if not isinstance(xinfo['wts'][xkey], list):
raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey))
else:
if len(xinfo['wts'][xkey]) != len(xinfo['axes']):
raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey))
else:
for xkey in ['preX', 'postX']:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
if 'preXnorm' not in xinfo['wts']:
xinfo['wts']['preXnorm'] = False
if 'postXnorm' not in xinfo['wts']:
xinfo['wts']['postXnorm'] = False
if not isinstance(xinfo['wts']['preXnorm'], NP.bool):
raise TypeError('preXnorm in input xinfo must be a boolean')
if not isinstance(xinfo['wts']['postXnorm'], NP.bool):
raise TypeError('postXnorm in input xinfo must be a boolean')
if 'avgcov' not in xinfo:
xinfo['avgcov'] = False
if not isinstance(xinfo['avgcov'], NP.bool):
raise TypeError('avgcov under input xinfo must be boolean')
if 'collapse_axes' not in xinfo:
xinfo['collapse_axes'] = []
if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)):
raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array')
else:
xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1)
if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None):
if NP.intersect1d(autoinfo['axes'], xinfo['axes']).size > 0:
raise ValueError("Inputs autoinfo['axes'] and xinfo['axes'] must have no intersection")
cohax = autoinfo['axes']
if cohax is None:
cohax = []
incohax = xinfo['axes']
if incohax is None:
incohax = []
if selection is None:
selection = {'triads': None, 'lst': None, 'days': None}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
if cpds is None:
cpds = {}
sampling = ['oversampled', 'resampled']
for smplng in sampling:
if smplng == 'oversampled':
cpds[smplng] = copy.deepcopy(self.cPhaseDS)
else:
cpds[smplng] = copy.deepcopy(self.cPhaseDS_resampled)
triad_ind, lst_ind, day_ind, day_ind_eicpdiff = self.subset(selection=selection)
result = {'triads': self.cPhase.cpinfo['raw']['triads'][triad_ind], 'triads_ind': triad_ind, 'lst': self.cPhase.cpinfo['processed']['prelim']['lstbins'][lst_ind], 'lst_ind': lst_ind, 'dlst': self.cPhase.cpinfo['processed']['prelim']['dlstbins'][lst_ind], 'days': self.cPhase.cpinfo['processed']['prelim']['daybins'][day_ind], 'day_ind': day_ind, 'dday': self.cPhase.cpinfo['processed']['prelim']['diff_dbins'][day_ind]}
dlstbin = NP.mean(self.cPhase.cpinfo['processed']['prelim']['dlstbins'])
if 'dlst_range' in xinfo:
if xinfo['dlst_range'] is None:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
else:
dlst_range = NP.asarray(xinfo['dlst_range']).ravel() / 60.0 # Difference in LST between a pair of LST (in hours)
if dlst_range.size == 1:
dlst_range = NP.insert(dlst_range, 0, 0.0)
lstshifts = NP.arange(max([0, NP.ceil(1.0*dlst_range.min()/dlstbin).astype(NP.int)]), min([NP.ceil(1.0*dlst_range.max()/dlstbin).astype(NP.int), result['lst'].size]))
else:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
result['lstXoffsets'] = lstshifts * dlstbin # LST interval corresponding to diagonal offsets created by the LST covariance
for smplng in sampling:
result[smplng] = {}
wl = FCNST.c / (cpds[smplng]['freq_center'] * U.Hz)
z = CNST.rest_freq_HI / cpds[smplng]['freq_center'] - 1
dz = CNST.rest_freq_HI / cpds[smplng]['freq_center']**2 * cpds[smplng]['bw_eff']
dkprll_deta = DS.dkprll_deta(z, cosmo=cosmo)
kprll = dkprll_deta.reshape(-1,1) * cpds[smplng]['lags']
rz_los = cosmo.comoving_distance(z) # in Mpc/h
drz_los = FCNST.c * cpds[smplng]['bw_eff']*U.Hz * (1+z)**2 / (CNST.rest_freq_HI * U.Hz) / (cosmo.H0 * cosmo.efunc(z)) # in Mpc/h
if units == 'Jy':
jacobian1 = 1 / (cpds[smplng]['bw_eff'] * U.Hz)
jacobian2 = drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = 1.0
elif units == 'K':
beamparms = copy.deepcopy(beamparms_orig)
omega_bw = self.beam3Dvol(beamparms, freq_wts=cpds[smplng]['freq_wts'])
jacobian1 = 1 / (omega_bw * U.Hz) # The steradian is present but not explicitly assigned
jacobian2 = rz_los**2 * drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = wl**2 / (2*FCNST.k_B)
else:
raise ValueError('Input value for units invalid')
factor = jacobian1 * jacobian2 * temperature_from_fluxdensity**2
result[smplng]['z'] = z
result[smplng]['kprll'] = kprll
result[smplng]['lags'] = NP.copy(cpds[smplng]['lags'])
result[smplng]['freq_center'] = cpds[smplng]['freq_center']
result[smplng]['bw_eff'] = cpds[smplng]['bw_eff']
result[smplng]['shape'] = cpds[smplng]['shape']
result[smplng]['freq_wts'] = cpds[smplng]['freq_wts']
result[smplng]['lag_corr_length'] = cpds[smplng]['lag_corr_length']
for dpool in ['whole', 'submodel', 'residual']:
if dpool in cpds[smplng]:
result[smplng][dpool] = {}
inpshape = list(cpds[smplng]['whole']['dspec']['mean'].shape)
inpshape[1] = lst_ind.size
inpshape[2] = day_ind.size
inpshape[3] = triad_ind.size
if len(cohax) > 0:
nsamples_coh = NP.prod(NP.asarray(inpshape)[NP.asarray(cohax)])
else:
nsamples_coh = 1
if len(incohax) > 0:
nsamples = NP.prod(NP.asarray(inpshape)[NP.asarray(incohax)])
nsamples_incoh = nsamples * (nsamples - 1)
else:
nsamples_incoh = 1
twts_multidim_idx = NP.ix_(lst_ind,day_ind,triad_ind,NP.arange(1)) # shape=(nlst,ndays,ntriads,1)
dspec_multidim_idx = NP.ix_(NP.arange(wl.size),lst_ind,day_ind,triad_ind,NP.arange(inpshape[4])) # shape=(nspw,nlst,ndays,ntriads,nchan)
max_wt_in_chan = NP.max(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2)))
select_chan = NP.argmax(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2)))
twts = NP.copy(cpds[smplng]['whole']['dspec']['twts'].data[:,:,:,[select_chan]]) # shape=(nlst,ndays,ntriads,nlags=1)
if nsamples_coh > 1:
awts_shape = tuple(NP.ones(cpds[smplng]['whole']['dspec']['mean'].ndim, dtype=NP.int))
awts = NP.ones(awts_shape, dtype=NP.complex)
awts_shape = NP.asarray(awts_shape)
for caxind,caxis in enumerate(cohax):
curr_awts_shape = NP.copy(awts_shape)
curr_awts_shape[caxis] = -1
awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape))
for stat in ['mean', 'median']:
if dpool == 'submodel':
dspec = NP.copy(cpds[smplng][dpool]['dspec'][dspec_multidim_idx])
else:
dspec = NP.copy(cpds[smplng][dpool]['dspec'][stat][dspec_multidim_idx])
if nsamples_coh > 1:
if stat == 'mean':
dspec = NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts * dspec[dspec_multidim_idx], axis=cohax, keepdims=True) / NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
else:
dspec = NP.median(dspec[dspec_multidim_idx], axis=cohax, keepdims=True)
if nsamples_incoh > 1:
expandax_map = {}
wts_shape = tuple(NP.ones(dspec.ndim, dtype=NP.int))
preXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for incaxind,incaxis in enumerate(xinfo['axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[incaxis] = -1
preXwts = preXwts * xinfo['wts']['preX'][incaxind].reshape(tuple(curr_wts_shape))
dspec1 = NP.copy(dspec)
dspec2 = NP.copy(dspec)
preXwts1 = NP.copy(preXwts)
preXwts2 = NP.copy(preXwts)
for incax in NP.sort(incohax)[::-1]:
dspec1 = NP.expand_dims(dspec1, axis=incax)
preXwts1 = NP.expand_dims(preXwts1, axis=incax)
if incax == 1:
preXwts1_outshape = list(preXwts1.shape)
preXwts1_outshape[incax+1] = dspec1.shape[incax+1]
preXwts1_outshape = tuple(preXwts1_outshape)
preXwts1 = NP.broadcast_to(preXwts1, preXwts1_outshape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
preXwts2_tmp = NP.expand_dims(preXwts2, axis=incax)
preXwts2_shape = NP.asarray(preXwts2_tmp.shape)
preXwts2_shape[incax] = lstshifts.size
preXwts2_shape[incax+1] = preXwts1_outshape[incax+1]
preXwts2_shape = tuple(preXwts2_shape)
preXwts2 = NP.broadcast_to(preXwts2_tmp, preXwts2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
dspec2_tmp = NP.expand_dims(dspec2, axis=incax)
dspec2_shape = NP.asarray(dspec2_tmp.shape)
dspec2_shape[incax] = lstshifts.size
# dspec2_shape = NP.insert(dspec2_shape, incax, lstshifts.size)
dspec2_shape = tuple(dspec2_shape)
dspec2 = NP.broadcast_to(dspec2_tmp, dspec2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
for lstshiftind, lstshift in enumerate(lstshifts):
dspec2[:,lstshiftind,...] = NP.roll(dspec2_tmp[:,0,...], lstshift, axis=incax)
dspec2[:,lstshiftind,:lstshift,...] = NP.nan
preXwts2[:,lstshiftind,...] = NP.roll(preXwts2_tmp[:,0,...], lstshift, axis=incax)
preXwts2[:,lstshiftind,:lstshift,...] = NP.nan
else:
dspec2 = NP.expand_dims(dspec2, axis=incax+1)
preXwts2 = NP.expand_dims(preXwts2, axis=incax+1)
expandax_map[incax] = incax + NP.arange(2)
for ekey in expandax_map:
if ekey > incax:
expandax_map[ekey] += 1
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec1.ndim-1, dtype=NP.int))) * (dspec1*U.Unit('Jy Hz') * preXwts1) * (dspec2*U.Unit('Jy Hz') * preXwts2).conj()
if xinfo['wts']['preXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(preXwts1 * preXwts2.conj(), axis=NP.union1d(NP.where(logical_or(NP.asarray(preXwts1.shape)>1, NP.asarray(preXwts2.shape)>1))), keepdims=True) # Normalize by summing the weights over the expanded axes
if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']):
# if any one of collapsing of incoherent axes or
# averaging of full covariance is requested
diagoffsets = {} # Stores the correlation index difference along each axis.
diagweights = {} # Stores the number of points summed in the trace along the offset diagonal
for colaxind, colax in enumerate(xinfo['collapse_axes']):
if colax == 1:
shp = NP.ones(dspec.ndim, dtype=NP.int)
shp[colax] = lst_ind.size
multdim_idx = tuple([NP.arange(axdim) for axdim in shp])
diagweights[colax] = NP.sum(NP.logical_not(NP.isnan(dspec[multdim_idx]))) - lstshifts
# diagweights[colax] = result[smplng][dpool][stat].shape[expandax_map[colax][-1]] - lstshifts
if stat == 'mean':
result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
else:
result[smplng][dpool][stat] = NP.nanmedian(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
diagoffsets[colax] = lstshifts
else:
pspec_unit = result[smplng][dpool][stat].si.unit
result[smplng][dpool][stat], offsets, diagwts = OPS.array_trace(result[smplng][dpool][stat].si.value, offsets=None, axis1=expandax_map[colax][0], axis2=expandax_map[colax][1], outaxis='axis1')
diagwts_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagwts_shape[expandax_map[colax][0]] = diagwts.size
diagoffsets[colax] = offsets
diagweights[colax] = NP.copy(diagwts)
result[smplng][dpool][stat] = result[smplng][dpool][stat] * pspec_unit / diagwts.reshape(diagwts_shape)
for ekey in expandax_map:
if ekey > colax:
expandax_map[ekey] -= 1
expandax_map[colax] = NP.asarray(expandax_map[colax][0]).ravel()
wts_shape = tuple(NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int))
postXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for colaxind, colax in enumerate(xinfo['collapse_axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[expandax_map[colax]] = -1
postXwts = postXwts * xinfo['wts']['postX'][colaxind].reshape(tuple(curr_wts_shape))
result[smplng][dpool][stat] = result[smplng][dpool][stat] * postXwts
axes_to_sum = tuple(NP.asarray([expandax_map[colax] for colax in xinfo['collapse_axes']]).ravel()) # for post-X normalization and collapse of covariance matrix
if xinfo['wts']['postXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(postXwts, axis=axes_to_sum, keepdims=True) # Normalize by summing the weights over the collapsed axes
if xinfo['avgcov']:
# collapse the axes further (postXwts have already
# been applied)
diagoffset_weights = 1.0
for colaxind in zip(*sorted(zip(NP.arange(xinfo['collapse_axes'].size), xinfo['collapse_axes']), reverse=True))[0]:
# It is important to sort the collapsable axes in
# reverse order before deleting elements below,
# otherwise the axes ordering may be get messed up
diagoffset_weights_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagoffset_weights_shape[expandax_map[xinfo['collapse_axes'][colaxind]][0]] = diagweights[xinfo['collapse_axes'][colaxind]].size
diagoffset_weights = diagoffset_weights * diagweights[xinfo['collapse_axes'][colaxind]].reshape(diagoffset_weights_shape)
del diagoffsets[xinfo['collapse_axes'][colaxind]]
result[smplng][dpool][stat] = NP.nansum(result[smplng][dpool][stat]*diagoffset_weights, axis=axes_to_sum, keepdims=True) / NP.nansum(diagoffset_weights, axis=axes_to_sum, keepdims=True)
else:
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec.ndim-1, dtype=NP.int))) * NP.abs(dspec * U.Jy)**2
diagoffsets = {}
expandax_map = {}
if units == 'Jy':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('Jy2 Mpc')
elif units == 'K':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('K2 Mpc3')
else:
raise ValueError('Input value for units invalid')
result[smplng][dpool]['diagoffsets'] = diagoffsets
result[smplng][dpool]['diagweights'] = diagweights
result[smplng][dpool]['axesmap'] = expandax_map
result[smplng][dpool]['nsamples_incoh'] = nsamples_incoh
result[smplng][dpool]['nsamples_coh'] = nsamples_coh
return result
############################################################################
def compute_power_spectrum_uncertainty(self, cpds=None, selection=None,
autoinfo=None,xinfo=None,
cosmo=cosmo100, units='K',
beamparms=None):
"""
------------------------------------------------------------------------
Compute uncertainty in the power spectrum of closure phase data. It is
in units of Mpc/h
Inputs:
cpds [dictionary] A dictionary that contains the 'oversampled' (if
resample=False) and/or 'resampled' (if resample=True) delay
spectrum information on the key 'errinfo'. If it is not
specified the attributes cPhaseDS['errinfo'] and
cPhaseDS_resampled['errinfo'] are used. Under each of these
sampling keys, it holds a dictionary that has the following
keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is
roughly equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband
delay transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed.
It is of size n_win. It is roughly equivalent to
width in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window
was raised. The value is be a positive scalar
with default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform.
It is of size nlags. The lags roughly correspond
to k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_bl x n_win x nlags x n_t.
'lag_corr_length'
[numpy array] It is the correlation timescale
(in pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth.
It is of size n_win. The unit size of a pixel is
determined by the difference between adjacent
pixels in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth
of the subband specified in bw_eff
'errinfo' [dictionary] It has two keys 'dspec0' and
'dspec1' each of which are dictionaries with
the following keys and values:
'twts' [numpy array] Weights for the subsample
difference. It is of shape (nlst, ndays,
ntriads, nchan)
'mean' [numpy array] Delay spectrum of the
subsample difference obtained by using
the mean statistic. It is of shape
(nspw, nlst, ndays, ntriads, nlags)
'median'
[numpy array] Delay spectrum of the
subsample difference obtained by using
the median statistic. It is of shape
(nspw, nlst, ndays, ntriads, nlags)
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
autoinfo
[NoneType or dictionary] Specifies parameters for processing
before power spectrum in auto or cross modes. If set to None,
a dictionary will be created with the default values as
described below. The dictionary must have the following keys
and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes that will
be averaged coherently before squaring (for auto) or
cross-multiplying (for cross) power spectrum. If set
to None (default), no axes are averaged coherently.
If set to int, list, tuple or numpy array, those axes
will be averaged coherently after applying the weights
specified under key 'wts' along those axes. 1=lst,
3=triads. Value of 2 for axes is not allowed since
that denotes repeated days and it is along this axis
that cross-power is computed regardless.
'wts' [NoneType/list/numpy array] If not provided (equivalent
to setting it to None) or set to None (default), it is
set to a one element list which is a one element numpy
array of unity. Otherwise, it must be a list of same
number of elements as in key 'axes' and each of these
must be a numpy broadcast compatible array corresponding
to each of the axis specified in 'axes'
xinfo [NoneType or dictionary] Specifies parameters for processing
cross power spectrum. If set to None, a dictionary will be
created with the default values as described below. The
dictionary must have the following keys and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes over which
power spectrum will be computed incoherently by cross-
multiplication. If set to None (default), no cross-
power spectrum is computed. If set to int, list, tuple
or numpy array, cross-power over those axes will be
computed incoherently by cross-multiplication. The
cross-spectrum over these axes will be computed after
applying the pre- and post- cross-multiplication
weights specified in key 'wts'. 1=lst, 3=triads. Value
of 2 for axes is not allowed since that denotes
repeated days and it is along this axis that
cross-power is computed regardless.
'collapse_axes'
[list] The axes that will be collpased after the
cross-power matrix is produced by cross-multiplication.
If this key is not set, it will be initialized to an
empty list (default), in which case none of the axes
is collapsed and the full cross-power matrix will be
output. it must be a subset of values under key 'axes'.
This will reduce it from a square matrix along that axis
to collapsed values along each of the leading diagonals.
1=lst, 3=triads.
'dlst' [scalar] LST interval (in mins) or difference between LST
pairs which will be determined and used for
cross-power spectrum. Will only apply if values under
'axes' contains the LST axis(=1).
'dlst_range'
[scalar, numpy array, or NoneType] Specifies the LST
difference(s) in minutes that are to be used in the
computation of cross-power spectra. If a scalar, only
the diagonal consisting of pairs with that LST
difference will be computed. If a numpy array, those
diagonals consisting of pairs with that LST difference
will be computed. If set to None (default), the main
diagonal (LST difference of 0) and the first off-main
diagonal (LST difference of 1 unit) corresponding to
pairs with 0 and 1 unit LST difference are computed.
Applies only if key 'axes' contains LST axis (=1).
'avgcov'
[boolean] It specifies if the collapse of square
covariance matrix is to be collapsed further to a single
number after applying 'postX' weights. If not set or
set to False (default), this late stage collapse will
not be performed. Otherwise, it will be averaged in a
weighted average sense where the 'postX' weights would
have already been applied during the collapsing
operation
'wts' [NoneType or Dictionary] If not set, a default
dictionary (see default values below) will be created.
It must have the follwoing keys and values:
'preX' [list of numpy arrays] It contains pre-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of weights. If
set to True, the delay spectrum will be
normalized by the sum of the weights.
'postX' [list of numpy arrays] It contains post-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of 'preX' weights.
If set to True, the delay spectrum will be
normalized by the sum of the weights.
'postXnorm'
[boolean] If False (default), no normalization
is done after the application of postX weights.
If set to True, the delay cross power spectrum
will be normalized by the sum of the weights.
cosmo [instance of cosmology class from astropy] An instance of class
FLRW or default_cosmology of astropy cosmology module. Default
uses Planck 2015 cosmology, with H0=100 h km/s/Mpc
units [string] Specifies the units of output power spectum. Accepted
values are 'Jy' and 'K' (default)) and the power spectrum will
be in corresponding squared units.
Output:
Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array),
'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,) array), 'dday'
((ndaycomb,) array), 'oversampled' and 'resampled' corresponding to
whether resample was set to False or True in call to member function
FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array
corresponding to triad and time indices used in selecting the data.
Values under keys 'oversampled' and 'resampled' each contain a
dictionary with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to
'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains a key named 'errinfo' which is a dictionary. It
contains information about power spectrum uncertainties obtained from
subsample differences. It contains the following keys and values:
'mean' [numpy array] Delay power spectrum uncertainties incoherently
estimated over the axes specified in xinfo['axes'] using the
'mean' key in input cpds or attribute
cPhaseDS['errinfo']['dspec']. It has shape that depends on the
combination of input parameters. See examples below. If both
collapse_axes and avgcov are not set, those axes will be
replaced with square covariance matrices. If collapse_axes is
provided but avgcov is False, those axes will be of shape
2*Naxis-1.
'median'
[numpy array] Delay power spectrum uncertainties incoherently
averaged over the axes specified in incohax using the 'median'
key in input cpds or attribute cPhaseDS['errinfo']['dspec'].
It has shape that depends on the combination of input
parameters. See examples below. If both collapse_axes and
avgcov are not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets for
those axes. If 'avgcov' was set, those entries will be removed
from 'diagoffsets' since all the leading diagonal elements have
been collapsed (averaged) further. Value under each key is a
numpy array where each element in the array corresponds to the
index of that leading diagonal. This should match the size of
the output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in collapse_axes and
the value is a numpy array of weights corresponding to the
diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated but is
not collapsed, the number of dimensions in the output will have
changed. This parameter tracks where the original axis is now
placed. The keys are the original axes that are involved in
incoherent cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the power
spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the power
spectrum
Examples:
(1)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False}}
This will not do anything because axes cannot include value 2 which
denote the 'days' axis and the uncertainties are obtained through
subsample differencing along days axis regardless.
Output delay power spectrum has shape (Nspw, Nlst, Ndaycomb, Ntriads,
Nlags)
(2)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False},
'dlst_range': None}
This will not do anything about coherent averaging along axis=2 because
axes cannot include value 2 which denote the 'days' axis and the
uncertainties are obtained through subsample differencing along days
axis regardless.
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndaycomb, Ntriads, Ntriads, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)},
axesmap = {1: [1,2], 3: [4,5]}
(3)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3],
'dlst_range': [0.0, 1.0, 2.0]}
This will not do anything about coherent averaging along axis=2 because
axes cannot include value 2 which denote the 'days' axis and the
uncertainties are obtained through subsample differencing along days
axis regardless.
Output delay power spectrum has shape
(Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1,2], 3: [4]}
(4)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3],
'dlst_range': [1.0, 2.0, 3.0, 4.0]}
Output delay power spectrum has shape
(Nspw, 4, Ndaycomb, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1], 3: [3]}
(5)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3],
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndays, 1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]}
(6)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []}
Output delay power spectrum has shape
(Nspw, 1, Ndays, 1, Nlags)
diagoffsets = {}, axesmap = {1: [1], 3: [3]}
------------------------------------------------------------------------
"""
if not isinstance(units,str):
raise TypeError('Input parameter units must be a string')
if units.lower() == 'k':
if not isinstance(beamparms, dict):
raise TypeError('Input beamparms must be a dictionary')
if 'freqs' not in beamparms:
beamparms['freqs'] = self.f
beamparms_orig = copy.deepcopy(beamparms)
if autoinfo is None:
autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]}
elif not isinstance(autoinfo, dict):
raise TypeError('Input autoinfo must be a dictionary')
if 'axes' not in autoinfo:
autoinfo['axes'] = None
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array')
else:
autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1)
if 'wts' not in autoinfo:
if autoinfo['axes'] is not None:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes'])
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['wts'], list):
raise TypeError('wts in input autoinfo must be a list of numpy arrays')
else:
if len(autoinfo['wts']) != len(autoinfo['axes']):
raise ValueError('Input list of wts must be same as length of autoinfo axes')
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
if xinfo is None:
xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}}
elif not isinstance(xinfo, dict):
raise TypeError('Input xinfo must be a dictionary')
if 'axes' not in xinfo:
xinfo['axes'] = None
else:
if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array')
else:
xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1)
if 'wts' not in xinfo:
xinfo['wts'] = {}
for xkey in ['preX', 'postX']:
if xinfo['axes'] is not None:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes'])
else:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
xinfo['wts']['preXnorm'] = False
xinfo['wts']['postXnorm'] = False
else:
if xinfo['axes'] is not None:
if not isinstance(xinfo['wts'], dict):
raise TypeError('wts in input xinfo must be a dictionary')
for xkey in ['preX', 'postX']:
if not isinstance(xinfo['wts'][xkey], list):
raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey))
else:
if len(xinfo['wts'][xkey]) != len(xinfo['axes']):
raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey))
else:
for xkey in ['preX', 'postX']:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
if 'preXnorm' not in xinfo['wts']:
xinfo['wts']['preXnorm'] = False
if 'postXnorm' not in xinfo['wts']:
xinfo['wts']['postXnorm'] = False
if not isinstance(xinfo['wts']['preXnorm'], NP.bool):
raise TypeError('preXnorm in input xinfo must be a boolean')
if not isinstance(xinfo['wts']['postXnorm'], NP.bool):
raise TypeError('postXnorm in input xinfo must be a boolean')
if 'avgcov' not in xinfo:
xinfo['avgcov'] = False
if not isinstance(xinfo['avgcov'], NP.bool):
raise TypeError('avgcov under input xinfo must be boolean')
if 'collapse_axes' not in xinfo:
xinfo['collapse_axes'] = []
if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)):
raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array')
else:
xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1)
if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None):
if NP.intersect1d(autoinfo['axes'], xinfo['axes']).size > 0:
raise ValueError("Inputs autoinfo['axes'] and xinfo['axes'] must have no intersection")
cohax = autoinfo['axes']
if cohax is None:
cohax = []
if 2 in cohax: # Remove axis=2 from cohax
if isinstance(cohax, list):
cohax.remove(2)
if isinstance(cohax, NP.ndarray):
cohax = cohax.tolist()
cohax.remove(2)
cohax = NP.asarray(cohax)
incohax = xinfo['axes']
if incohax is None:
incohax = []
if 2 in incohax: # Remove axis=2 from incohax
if isinstance(incohax, list):
incohax.remove(2)
if isinstance(incohax, NP.ndarray):
incohax = incohax.tolist()
incohax.remove(2)
incohax = NP.asarray(incohax)
if selection is None:
selection = {'triads': None, 'lst': None, 'days': None}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
if cpds is None:
cpds = {}
sampling = ['oversampled', 'resampled']
for smplng in sampling:
if smplng == 'oversampled':
cpds[smplng] = copy.deepcopy(self.cPhaseDS)
else:
cpds[smplng] = copy.deepcopy(self.cPhaseDS_resampled)
triad_ind, lst_ind, day_ind, day_ind_eicpdiff = self.subset(selection=selection)
result = {'triads': self.cPhase.cpinfo['raw']['triads'][triad_ind], 'triads_ind': triad_ind, 'lst': self.cPhase.cpinfo['errinfo']['lstbins'][lst_ind], 'lst_ind': lst_ind, 'dlst': self.cPhase.cpinfo['errinfo']['dlstbins'][lst_ind], 'days': self.cPhase.cpinfo['errinfo']['daybins'][day_ind], 'day_ind': day_ind_eicpdiff, 'dday': self.cPhase.cpinfo['errinfo']['diff_dbins'][day_ind]}
dlstbin = NP.mean(self.cPhase.cpinfo['errinfo']['dlstbins'])
if 'dlst_range' in xinfo:
if xinfo['dlst_range'] is None:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
else:
dlst_range = NP.asarray(xinfo['dlst_range']).ravel() / 60.0 # Difference in LST between a pair of LST (in hours)
if dlst_range.size == 1:
dlst_range = NP.insert(dlst_range, 0, 0.0)
lstshifts = NP.arange(max([0, NP.ceil(1.0*dlst_range.min()/dlstbin).astype(NP.int)]), min([NP.ceil(1.0*dlst_range.max()/dlstbin).astype(NP.int), result['lst'].size]))
else:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
result['lstXoffsets'] = lstshifts * dlstbin # LST interval corresponding to diagonal offsets created by the LST covariance
for smplng in sampling:
result[smplng] = {}
wl = FCNST.c / (cpds[smplng]['freq_center'] * U.Hz)
z = CNST.rest_freq_HI / cpds[smplng]['freq_center'] - 1
dz = CNST.rest_freq_HI / cpds[smplng]['freq_center']**2 * cpds[smplng]['bw_eff']
dkprll_deta = DS.dkprll_deta(z, cosmo=cosmo)
kprll = dkprll_deta.reshape(-1,1) * cpds[smplng]['lags']
rz_los = cosmo.comoving_distance(z) # in Mpc/h
drz_los = FCNST.c * cpds[smplng]['bw_eff']*U.Hz * (1+z)**2 / (CNST.rest_freq_HI * U.Hz) / (cosmo.H0 * cosmo.efunc(z)) # in Mpc/h
if units == 'Jy':
jacobian1 = 1 / (cpds[smplng]['bw_eff'] * U.Hz)
jacobian2 = drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = 1.0
elif units == 'K':
beamparms = copy.deepcopy(beamparms_orig)
omega_bw = self.beam3Dvol(beamparms, freq_wts=cpds[smplng]['freq_wts'])
jacobian1 = 1 / (omega_bw * U.Hz) # The steradian is present but not explicitly assigned
jacobian2 = rz_los**2 * drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = wl**2 / (2*FCNST.k_B)
else:
raise ValueError('Input value for units invalid')
factor = jacobian1 * jacobian2 * temperature_from_fluxdensity**2
result[smplng]['z'] = z
result[smplng]['kprll'] = kprll
result[smplng]['lags'] = NP.copy(cpds[smplng]['lags'])
result[smplng]['freq_center'] = cpds[smplng]['freq_center']
result[smplng]['bw_eff'] = cpds[smplng]['bw_eff']
result[smplng]['shape'] = cpds[smplng]['shape']
result[smplng]['freq_wts'] = cpds[smplng]['freq_wts']
result[smplng]['lag_corr_length'] = cpds[smplng]['lag_corr_length']
dpool = 'errinfo'
if dpool in cpds[smplng]:
result[smplng][dpool] = {}
inpshape = list(cpds[smplng][dpool]['dspec0']['mean'].shape)
inpshape[1] = lst_ind.size
inpshape[2] = day_ind_eicpdiff.size
inpshape[3] = triad_ind.size
if len(cohax) > 0:
nsamples_coh = NP.prod(NP.asarray(inpshape)[NP.asarray(cohax)])
else:
nsamples_coh = 1
if len(incohax) > 0:
nsamples = NP.prod(NP.asarray(inpshape)[NP.asarray(incohax)])
nsamples_incoh = nsamples * (nsamples - 1)
else:
nsamples_incoh = 1
twts_multidim_idx = NP.ix_(lst_ind,day_ind_eicpdiff,triad_ind,NP.arange(1)) # shape=(nlst,ndays,ntriads,1)
dspec_multidim_idx = NP.ix_(NP.arange(wl.size),lst_ind,day_ind_eicpdiff,triad_ind,NP.arange(inpshape[4])) # shape=(nspw,nlst,ndays,ntriads,nchan)
max_wt_in_chan = NP.max(NP.sum(cpds[smplng]['errinfo']['dspec0']['twts'].data, axis=(0,1,2,3)))
select_chan = NP.argmax(NP.sum(cpds[smplng]['errinfo']['dspec0']['twts'].data, axis=(0,1,2,3)))
twts = {'0': NP.copy(cpds[smplng]['errinfo']['dspec0']['twts'].data[:,:,:,[select_chan]]), '1': NP.copy(cpds[smplng]['errinfo']['dspec1']['twts'].data[:,:,:,[select_chan]])}
if nsamples_coh > 1:
awts_shape = tuple(NP.ones(cpds[smplng]['errinfo']['dspec']['mean'].ndim, dtype=NP.int))
awts = NP.ones(awts_shape, dtype=NP.complex)
awts_shape = NP.asarray(awts_shape)
for caxind,caxis in enumerate(cohax):
curr_awts_shape = NP.copy(awts_shape)
curr_awts_shape[caxis] = -1
awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape))
for stat in ['mean', 'median']:
dspec0 = NP.copy(cpds[smplng][dpool]['dspec0'][stat][dspec_multidim_idx])
dspec1 = NP.copy(cpds[smplng][dpool]['dspec1'][stat][dspec_multidim_idx])
if nsamples_coh > 1:
if stat == 'mean':
dspec0 = NP.sum(twts['0'][NP.newaxis,...] * awts * dspec0, axis=cohax, keepdims=True) / NP.sum(twts['0'][twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
dspec1 = NP.sum(twts['1'][NP.newaxis,...] * awts * dspec1, axis=cohax, keepdims=True) / NP.sum(twts['1'][twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
else:
dspec0 = NP.median(dspec0, axis=cohax, keepdims=True)
dspec1 = NP.median(dspec1, axis=cohax, keepdims=True)
if nsamples_incoh > 1:
expandax_map = {}
wts_shape = tuple(NP.ones(dspec0.ndim, dtype=NP.int))
preXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for incaxind,incaxis in enumerate(xinfo['axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[incaxis] = -1
preXwts = preXwts * xinfo['wts']['preX'][incaxind].reshape(tuple(curr_wts_shape))
preXwts0 = NP.copy(preXwts)
preXwts1 = NP.copy(preXwts)
for incax in NP.sort(incohax)[::-1]:
dspec0 = NP.expand_dims(dspec0, axis=incax)
preXwts0 = NP.expand_dims(preXwts0, axis=incax)
if incax == 1:
preXwts0_outshape = list(preXwts0.shape)
preXwts0_outshape[incax+1] = dspec0.shape[incax+1]
preXwts0_outshape = tuple(preXwts0_outshape)
preXwts0 = | NP.broadcast_to(preXwts0, preXwts0_outshape) | numpy.broadcast_to |
#!/usr/bin/env python
# coding: utf-8
# In[21]:
#https://stackoverflow.com/questions/10884668/two-sample-kolmogorov-smirnov-test-in-python-scipy
#https://www.machinelearningplus.com/machine-learning/evaluation-metrics-classification-models-r/
# Essentials
import pandas as pd
import numpy as np
import time
import sys
import math
from scipy.spatial import distance
from scipy.stats import ks_2samp
from scipy import spatial
# Ignorar ciertos warnings
import warnings
warnings.filterwarnings(action="ignore")
pd.options.display.max_seq_items = 8000
pd.options.display.max_rows = 8000
#arguments of the console
#first_arg = sys.argv[1]
#first_arg = float(first_arg)
# Filtro Noise addition
def noisy_filter(data, variable, a):
lista_variable = data[variable].tolist()
std = np.std(lista_variable, axis=0)
noisy_ratings = list()
for rating in lista_variable:
new_rating = rating +a*std*abs(np.random.normal())
noisy_ratings.append(new_rating)
b = "NOISY_" + variable
data[b] = noisy_ratings
return data
def AplicarNoiseFilter(a):
# Importar datos originales
df = pd.read_csv('dataParaPrivacidad.csv')
# Aplica el filtro en las variables correspondientes
variablesParaNoise = df.columns[2:-1]
for i in variablesParaNoise:
df = noisy_filter(df,i,a)
# Alista los datos para poder predecir
DF_NF = df.iloc[:,[0,1,7,8,9,10]]
DF_NF.iloc[:,:] = df.iloc[:,[0,1,7,8,9,10]]
# Alista para usar
for i in range(2,6):
DF_NF.iloc[:,i] = np.log1p(DF_NF.iloc[:,i])
DF_NF.columns = ['LONGI', 'LATIT', 'Salinidad', 'TC', 'Clorofila', 'TSM']
# Importar el Learner
import pickle
with open('XGBModel.pkl', 'rb') as f:
xgbModel = pickle.load(f)
with open('RidgeModel.pkl', 'rb') as f:
ridgeModel = pickle.load(f)
with open('SVRModel.pkl', 'rb') as f:
supportVectorRegresorModel = pickle.load(f)
with open('LGBMRModel.pkl', 'rb') as f:
LGBMRModel = pickle.load(f)
with open('StackedModel.pkl', 'rb') as f:
stack_genModel = pickle.load(f)
# Setear el learner
def votingPredictions(X):
return ((0.30 * xgbModel.predict(X)) + (0.05 * ridgeModel.predict(X)) + (0.05 * supportVectorRegresorModel.predict(X)) + (0.25 * LGBMRModel.predict(X)) + (0.35 * stack_genModel.predict(np.array(X))))
# Resultados
df['MontoPescaNoisy'] = votingPredictions(DF_NF)
# Get the Euclidean distance between vectors of real feature vs private vectors
df["SquaredDifference"] = (df.NOISY_SALI - df.SALI)**2 + (df.NOISY_TCL - df.TCL)**2 + (df.NOISY_CLO - df.CLO)**2 + (df.NOISY_TSM - df.TSM)**2
df['EuclideanDistance'] = np.sqrt(df[['SquaredDifference']].sum(axis=1))
# Cosimilitud
r = []
for i in range(df.shape[0]):
r.append(spatial.distance.cosine(df.loc[i,["SALI","TCL","CLO","TSM"]], df.loc[i,["NOISY_SALI","NOISY_TCL","NOISY_CLO","NOISY_TSM"]]))
# IL_EucDistance:
IL_EucDistance = sum(df.EuclideanDistance)
# IL_Cosimilitud:
IL_Cosimilitud = sum(r)
# DR <NAME>: (1 - sum(abs(P_{verdadera}-V_{calculada})))/n
DR_JS = (1 - distance.jensenshannon(df.MontoPescaOriginal, df.MontoPescaNoisy))
# DR <NAME>
# DR1: (1 - sum(P_{verdadera}-V_{calculada}))/n
DR_KS = (1 - ks_2samp(df.MontoPescaOriginal, df.MontoPescaNoisy)[0])
# Params
params = [a]
# Resultados
d = {'Params':[params],'IL_EucDistance': [IL_EucDistance], 'IL_Cosimilitud': [IL_Cosimilitud], 'DR_JS': [DR_JS], 'DR_KS':[DR_KS]}
d = pd.DataFrame(data=d)
Results = str(params)+'NoiseAdditionFilter.csv'
d.to_csv(Results, index = False)
# For executing
#if __name__ == "__main__":
# AplicarNoiseFilter(first_arg)
# In[1]:
# Essentials
import pandas as pd
import numpy as np
import time
import sys
import math
from scipy.spatial import distance
from scipy.stats import ks_2samp
from scipy import spatial
# Ignorar ciertos warnings
import warnings
warnings.filterwarnings(action="ignore")
pd.options.display.max_seq_items = 8000
pd.options.display.max_rows = 8000
# In[2]:
# Filtro Noise addition
def noisy_filter(data, variable, a):
lista_variable = data[variable].tolist()
std = | np.std(lista_variable, axis=0) | numpy.std |
#**
# @file cmnedata.py
# @author <NAME> <<EMAIL>>
# @version 1.0
# @date September, 2017
#
# @section LICENSE
#
# Copyright (C) 2017, <NAME>. All rights reserved.
#
# @brief CMNEData contains, e.g., data loader
#**
#%%
import os
import sys
sys.path.append('../helpers') #Add relative path to include modules
import numpy as np
import random
import mne
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from helpers.cmnesettings import CMNESettings
###################################################################################################
# Standardize
###################################################################################################
def standardize(mat, mean=None, std=None):
"""
0 center and scale data
Standardize an np.array to the array mean and standard deviation or specified parameters
See https://en.wikipedia.org/wiki/Feature_scaling
"""
if mean is None:
mean = np.mean(mat, axis=1)
if std is None:
std = np.std(mat, axis=1)
data_normalized = (mat.transpose() - mean).transpose()
data = (data_normalized.transpose() / std).transpose()
return data
###################################################################################################
# Reshape with lookback value for LSTM
###################################################################################################
# creates lookback values using helper, then reshapes for LSTM
def reshape_stc_data(stc, look_back, start=0, step_size=1):
samples,n = stc.shape
stop = samples - look_back - 1
feature_parts, label_parts = [], []
for i in range(start,stop,step_size):
feature_parts.append(stc[i:(i + look_back), :])
# Take just the last estimate as label
label_parts.append(stc[i + look_back, :])
return feature_parts, label_parts
###################################################################################################
# Reshape with lookback value for LSTM
###################################################################################################
# creates lookback values using helper, then reshapes for LSTM
def reshape_epoch_stc_data(epoch, stc, look_back):
samples_epoch,n = epoch.shape
samples_stc,n = stc.shape
epoch_parts, stc_parts = [], []
# Make sure that samples are equaly long
if samples_epoch != samples_stc:
return epoch_parts, stc_parts
for i in range(samples_epoch - look_back):
epoch_parts.append(epoch[i:(i + look_back), :])
# Take the whole estimate sequence as label
#stc_parts.append(stc[i:(i + look_back), :])
# Take just the last estimate as label
stc_parts.append(stc[i + look_back - 1, :])
return epoch_parts, stc_parts
###################################################################################################
# Reshape with lookback and look into future value for LSTM
###################################################################################################
def reshape_future_data(stc, look_back, future_steps=1, start=0, step_size=1):
samples,n = stc.shape
stop = samples - look_back - 1 - future_steps + 1
feature_parts, label_parts = [], []
for i in range(start,stop,step_size):
feature_parts.append(stc[i:(i + look_back), :])
# Take just the last estimate as label
label_parts.append(stc[i + look_back : i + look_back + future_steps:1, :].flatten())
return feature_parts, label_parts
###################################################################################################
# Create Sequence Parts
###################################################################################################
def create_sequence_parts(stc, look_back, start=0, step_size=1):
samples,n = stc.shape
stop = samples - look_back - 1
stc_sequence_parts = []
for i in range(start,stop,step_size):
stc_sequence_parts.append(stc[i:(i + look_back), :])
stc_sequences = np.array(stc_sequence_parts)
return stc_sequences
###################################################################################################
# CMNEData class
###################################################################################################
class CMNEData(object):
"""the cmne data object
Attributes:
_cmne_settings: CMNE settings object.
_inv_op: The loaded inverse operator.
_epochs: The loaded epochs.
_num_epochs: Number of available epochs.
_train_idcs: Indeces which should be used for training
_test_idcs: Indeces which should be used for testing
"""
_mag_th = 4e-12 #Simulation: 4e-11
_grad_th = 4000e-13 #Simulation: 4000e-12
# Using the same inverse operator when inspecting single trials Vs. evoked
_snr = 3.0 # Standard assumption for average data but using it for single trial
_lambda2 = 1.0 / _snr ** 2
_method = "dSPM"
###############################################################################################
# Constructor
###############################################################################################
def __init__(self, cmne_settings):
"""Return a new CMNEData object."""
self._cmne_settings = cmne_settings
###############################################################################################
# Load Data
###############################################################################################
def load_data(self, event_id=1, tmin=-0.2, tmax=0.5, train_percentage = 0.85):
# Load data
inverse_operator = read_inverse_operator(self._cmne_settings.fname_inv())
raw = mne.io.read_raw_fif(self._cmne_settings.fname_raw())
events = mne.read_events(self._cmne_settings.fname_event())
# Set up pick list
include = []
# set EEG average reference
# raw.set_eeg_reference()
# Add a bad channel
# raw.info['bads'] += ['EEG 053'] # bads + 1 more
# pick MEG channels
picks = mne.pick_types( raw.info, meg=True, eeg=self._cmne_settings.meg_and_eeg(), stim=False, eog=False, include=include, exclude='bads')
# Read epochs
epochs = mne.Epochs( raw, events, event_id, tmin, tmax, baseline=(None, 0),
picks=picks, preload=self._cmne_settings.large_memory(), reject=dict(mag=self._mag_th, grad=self._grad_th))#, eog=150e-5))#eog=150e-6))
epochs.drop_bad()
self._inv_op = inverse_operator
self._epochs = epochs
#Count epochs - since they are not preloaded it has to be done with a for loop
num_epochs = 0
for epoch in epochs:
num_epochs = num_epochs + 1
self._num_epochs = num_epochs
whole_list = list(range(num_epochs))
if os.path.isfile(self._cmne_settings.fname_test_idcs()):
self._test_idcs = []
with open(self._cmne_settings.fname_test_idcs(), "r") as f:
for line in f:
self._test_idcs.append(int(line.strip()))
self._train_idcs = [item for item in whole_list if item not in self._test_idcs]
else:
#split train and test
random.seed(42)
self._train_idcs = random.sample(range(num_epochs), (int)(num_epochs*train_percentage))
self._test_idcs = [item for item in whole_list if item not in self._train_idcs]
with open(self._cmne_settings.fname_test_idcs(), "w") as f:
for idx in self._test_idcs:
f.write(str(idx) +"\n")
###############################################################################################
# Generate Input
###############################################################################################
def generate_normalized_input(self, look_back=40, batch_size=20):
"""
Create the normalized input
"""
nave = 1
#Count epochs
num_epochs = 0
for epoch in self._epochs:
num_epochs = num_epochs + 1
while True:
# select random epochs
idx = random.sample(range(num_epochs), batch_size)
sel_epochs = self.epochs[idx]
stcs = apply_inverse_epochs(sel_epochs, self._inv_op, self._lambda2, self._method, pick_ori="normal", nave=nave)
# Attention - just an approximation, since not all stc are considered for the mean and the std
stc_data = np.hstack([stc.data for stc in stcs])
stc_mean = np.mean(stc_data, axis=1)
stc_std = np.std(stc_data, axis=1)
stc_data = None
#Attention end
for stc in stcs:
stc_normalized = standardize(stc.data, mean=stc_mean, std=stc_std)
stc_normalized_T = stc_normalized.transpose()
feature_list, label_list = reshape_stc_data(stc = stc_normalized_T, look_back = look_back)
features = np.array(feature_list)
labels = np.array(label_list)
yield features, labels
###############################################################################################
# Getters and setters
###############################################################################################
def epochs(self, idx=None):
"""
Returns selected epochs, if selection is None then all epochs are returned
"""
if idx == None:
return self._epochs
else:
return self._epochs[idx]
def test_idcs(self):
"""
Returns selected test indeces
"""
return self._test_idcs
def test_epochs(self, idx=None):
"""
Returns selected test epochs, if selection is None then all test epochs are returned
"""
if idx == None:
return self._epochs[self._test_idcs]
else:
return self._epochs[self._test_idcs][idx]
def train_idcs(self):
"""
Returns selected test indeces
"""
return self._train_idcs
def train_epochs(self, idx=None):
"""
Returns selected test epochs, if selection is None then all test epochs are returned
"""
if idx == None:
return self._epochs[self._train_idcs]
else:
return self._epochs[self._train_idcs][idx]
def inv_op(self):
"""
Returns the loaded inverse operator
"""
return self._inv_op
def method(self):
"""
Returns the inverse method
"""
return self._method
def snr(self):
"""
Returns the datas snr
"""
return self._snr
def lambda2(self):
"""
Returns the datas snr
"""
return self._lambda2
def num_epochs(self):
"""
Returns the datas snr
"""
return self._num_epochs
###################################################################################################
# Create all data at once
###################################################################################################
def create_lstm_data(epochs, inverse_operator, lambda2, method, look_back = 1):
"""
Create the dataset for testing regression models in the CNTK format
Y = GQ + E -> features = stc, labels = stc
"""
nave = 2
# Compute inverse solution and stcs for each epoch
# Use the same inverse operator as with evoked data (i.e., set nave)
# If you use a different nave, dSPM just scales by a factor sqrt(nave)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", nave=nave)
###############################################################################
# Standardize Label and Mean Data
feature_data = np.hstack([stc.data for stc in stcs])
feature_mean = np.mean(feature_data, axis=1)
feature_std = np.std(feature_data, axis=1)
features_normalized = []
labels_normalized = []
for stc in stcs:
stc_normalized = standardize(stc.data,mean=feature_mean,std=feature_std)
stc_normalized_T = stc_normalized.transpose()
feature_parts, label_parts = reshape_stc_data(stc = stc_normalized_T, look_back = look_back)
features_normalized.extend(feature_parts)
labels_normalized.extend(label_parts)
features= np.array(features_normalized)
labels = np.array(labels_normalized)
return features, labels
def create_epoch_stc_data(epochs, inverse_operator, lambda2, method, look_back = 1):
"""
Create the dataset for testing regression models in the CNTK format
Y = GQ + E
"""
nave = 2
# Compute inverse solution and stcs for each epoch
# Use the same inverse operator as with evoked data (i.e., set nave)
# If you use a different nave, dSPM just scales by a factor sqrt(nave)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", nave=nave)
###############################################################################
# Standardize Label and Mean Data
label_data = np.hstack([stc.data for stc in stcs])
label_mean = np.mean(label_data, axis=1)
label_std = np.std(label_data, axis=1)
feature_data = np.hstack([epoch.data for epoch in epochs])
feature_mean = np.mean(feature_data, axis=1)
feature_std = np.std(feature_data, axis=1)
epochs_normalized = []
stcs_normalized = []
for epoch,stc in zip(epochs,stcs):
stc_normalized = standardize(stc.data,mean=label_mean,std=label_std)
stc_normalized_T = stc_normalized.transpose()
epoch_normalized = standardize(epoch,mean=feature_mean,std=feature_std)
epoch_normalized_T = epoch_normalized.transpose()
epoch_parts, stc_parts = reshape_epoch_stc_data(epoch = epoch_normalized_T, stc = stc_normalized_T, look_back = look_back)
epochs_normalized.extend(epoch_parts)
stcs_normalized.extend(stc_parts)
features = np.array(epochs_normalized)
labels = np.array(stcs_normalized)
return features, labels
###################################################################################################
# Generate DNN Batches
###################################################################################################
def generate_dnn_batches(epochs, inverse_operator, lambda2, method, batch_size=20):
"""
Create the DNN Training Batches
"""
nave = 1
# Compute inverse solution and stcs for each epoch
# Use the same inverse operator as with evoked data (i.e., set nave)
# If you use a different nave, dSPM just scales by a factor sqrt(nave)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", nave=nave, verbose=None)
#Count epochs
num_epochs = 0
for epoch in epochs:
num_epochs = num_epochs + 1
while True:
# select random epochs
idx = random.sample(range(num_epochs), batch_size)
sel_epochs = epochs[idx]
sel_stcs = [stcs[i] for i in idx]
###############################################################################
# Standardize Label and Mean Data
feature_data = np.hstack([epoch.data for epoch in sel_epochs])
feature_mean = np.mean(feature_data, axis=1)
feature_std = np.std(feature_data, axis=1)
label_data = | np.hstack([stc.data for stc in sel_stcs]) | numpy.hstack |
import numpy as np
import time
import copy
import math
import scipy
import logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("debug.log"),
logging.StreamHandler()
]
)
# included modules
from pynumdiff.finite_difference import first_order as finite_difference
import pynumdiff.smooth_finite_difference
from pynumdiff.utils import utility as utility
from pynumdiff import smooth_finite_difference
__friedrichs_kernel__ = utility.__friedrichs_kernel__
__gaussian_kernel__ = utility.__gaussian_kernel__
KERNELS = {'friedrichs': __friedrichs_kernel__,
'gaussian': __gaussian_kernel__}
# optional packages
warned = False
try:
import pychebfun
except:
logging.info('Import Error\nCould not import pychebfun.\nInstall pychebfun (https://github.com/pychebfun/pychebfun/) to use chebfun derivatives.\n')
warned = True
try:
import pydmd.dmdc
except:
logging.info('Import Error\nCould not import pydmd.\nInstall pydmd (florisvb fork: https://github.com/florisvb/PyDMD) to use dmd derivatives.\n')
warned = True
try:
import cvxpy
except:
logging.info('Import Error\nCould not import cvxpy.\nInstall cvxpy (http://www.cvxpy.org/install/index.html) to use lineardiff.\nRecommended solver: MOSEK, free academic license available: https://www.mosek.com/products/academic-licenses/ \n')
warned = True
if warned == True:
logging.info('Import Error\nDespite these import errors, you can still use many of the methods without additional installations.\n')
####################################################################################################################################################
# Helper functions
####################################################################################################################################################
def __slide_function__(func, x, dt, params, window_size, step_size, kernel_name):
'''
Slide a smoothing derivative function across a timeseries with specified window size.
Inputs
------
func : (function) name of the function to slide
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) see func for requirements
window_size : (int) size of the sliding window
step_size : (int) step size for slide (e.g. 1 means slide by 1 step)
kernel_name : (string) name of the smoothing kernel
(e.g. 'friedrichs' or 'gaussian')
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
# get smoothing kernel
if not window_size%2: # then make odd
window_size += 1
ker = KERNELS[kernel_name](window_size)
x_hat_list = []
dxdt_hat_list = []
weight_list = []
for p in range(0, len(x), step_size):
# deal with end points
start = p- int((window_size-1)/2)
end = p+ int((window_size-1)/2)+1
ker_start = 0
ker_end = window_size
ker_middle = int((window_size-1)/2)
if start < 0:
ker_start = np.abs(start)
ker_middle = ker_middle - np.abs(start)
start = 0
if end > len(x):
ker_end = window_size - (end-len(x))
end = len(x)
# weights
w = ker[ker_start:ker_end]
w = w/np.sum(w)
# run the function on the window
_x = x[start:end]
x_hat, dxdt_hat = func(_x, dt, params, options={'weights': w})
# stack results
z_x_hat = np.zeros([len(x)])
z_x_hat[start:end] = x_hat
x_hat_list.append(z_x_hat)
z_dxdt_hat = np.zeros([len(x)])
z_dxdt_hat[start:end] = dxdt_hat
dxdt_hat_list.append(z_dxdt_hat)
z_weights = np.zeros([len(x)])
z_weights[start:end] = w
weight_list.append(z_weights)
# column norm weights
weights = np.vstack(weight_list)
for col in range(weights.shape[1]):
weights[:, col] = weights[:, col] / np.sum(weights[:, col])
# stack and weight x_hat and dxdt_hat
x_hat = np.vstack(x_hat_list)
dxdt_hat = np.vstack(dxdt_hat_list)
x_hat = np.sum(weights*x_hat, axis=0)
dxdt_hat = np.sum(weights*dxdt_hat, axis=0)
return x_hat, dxdt_hat
####################################################################################################################################################
# Savitzky-Golay filter
####################################################################################################################################################
def savgoldiff(x, dt, params, options={'smooth': True}):
'''
Use the Savitzky-Golay to smooth the data and calculate the first derivative.
Uses scipy.signal.savgol_filter
The Savitzky-Golay is very similar to the sliding polynomial fit, but slightly noisier, and much faster.
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [N, : (int) order of the polynomial
window_size, : (int) size of the sliding window, must be odd (if not, 1 is added)
smoothing_win] : (int) size of the window used for gaussian smoothing, a good default is = window_size, but smaller for high freq data
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
N, window_size, smoothing_win = params
if window_size > len(x)-1:
window_size = len(x)-1
if smoothing_win > len(x)-1:
smoothing_win = len(x)-1
if window_size <= N:
window_size = N+1
if not window_size%2: # then make odd
window_size += 1
dxdt_hat = scipy.signal.savgol_filter(x, window_size, N, deriv=1) / dt
if 1: #options['smooth']:
kernel = __gaussian_kernel__(smoothing_win)
dxdt_hat = pynumdiff.smooth_finite_difference.__convolutional_smoother__(dxdt_hat, kernel, 1)
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
x0 = utility.estimate_initial_condition(x, x_hat)
x_hat = x_hat + x0
return x_hat, dxdt_hat
####################################################################################################################################################
# Polynomial fitting
####################################################################################################################################################
def __polydiff__(x, dt, params, options={}):
'''
Fit polynomials to the timeseries, and differentiate the polynomials.
Inputs
------
x : (np.array of floats, 1xN) time series to differentiate
dt : (float) time step
Parameters
----------
params : (list) [N] : (int) order of the polynomial
options : (dict) {'weights'} : (np.array, optional) weights applied to each point in calculating the polynomial fit. Defaults to 1s if missing.
Outputs
-------
x_hat : estimated (smoothed) x
dxdt_hat : estimated derivative of x
'''
if 'weights' in options.keys():
w = options['weights']
else:
w = | np.ones_like(x) | numpy.ones_like |
# -*- coding: utf-8 -*-
"""
Authors: <NAME> and <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
<EMAIL>
Repository: https://github.com/wateraccounting/watools
Module: Collect/ETmonitor
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the ALEXI developers.
Description:
This script collects ALEXI data from the UNESCO-IHE FTP server. The data has a
monthly temporal resolution and a spatial resolution of 0.05 degree. The
resulting tiff files are in the WGS84 projection.
The data is available between 2003-01-01 till 2014-12-31.
Example:
from watools.Collect import ALEXI
ALEXI.monthly(Dir='C:/Temp/', Startdate='2003-02-24', Enddate='2003-03-09',
latlim=[50,54], lonlim=[3,7])
"""
from __future__ import print_function
# General modules
from builtins import str
import numpy as np
import os
import pandas as pd
from ftplib import FTP
import datetime
import math
import glob
# Water Accounting Modules
import watools.WebAccounts as WebAccounts
import watools.General.raster_conversions as RC
import watools.General.data_conversions as DC
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, TimeStep, Waitbar):
"""
This scripts downloads ALEXI ET data from the UNESCO-IHE ftp server.
The output files display the total ET in mm for a period of one week.
The name of the file corresponds to the first day of the week.
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
TimeStep -- 'daily' or 'weekly' (by using here monthly, an older dataset will be used)
lonlim -- [ymin, ymax] (values must be between -60 and 70)
latlim -- [xmin, xmax] (values must be between -180 and 180)
"""
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -60 or latlim[1] > 70:
print('Latitude above 70N or below 60S is not possible. Value set to maximum')
latlim[0] = np.max(latlim[0],-60)
latlim[1] = np.min(latlim[1],70)
if lonlim[0] < -180 or lonlim[1] > 180:
print ('Longitude must be between 180E and 180W. Now value is set to maximum')
lonlim[0] = np.max(lonlim[0],-180)
lonlim[1] = np.min(lonlim[1],180)
# Check Startdate and Enddate
if not Startdate:
if TimeStep == 'weekly':
Startdate = pd.Timestamp('2003-01-01')
if TimeStep == 'daily':
Startdate = pd.Timestamp('2005-01-01')
if not Enddate:
if TimeStep == 'weekly':
Enddate = pd.Timestamp('2015-12-31')
if TimeStep == 'daily':
Enddate = pd.Timestamp('2016-12-31')
# Make a panda timestamp of the date
try:
Enddate = pd.Timestamp(Enddate)
except:
Enddate = Enddate
if TimeStep == 'weekly':
# Define the Startdate of ALEXI
DOY = datetime.datetime.strptime(Startdate,
'%Y-%m-%d').timetuple().tm_yday
Year = datetime.datetime.strptime(Startdate,
'%Y-%m-%d').timetuple().tm_year
# Change the startdate so it includes an ALEXI date
DOYstart = int(math.ceil(DOY/7.0)*7+1)
DOYstart = str('%s-%s' %(DOYstart, Year))
Day = datetime.datetime.strptime(DOYstart, '%j-%Y')
Month = '%02d' % Day.month
Day = '%02d' % Day.day
Date = (str(Year) + '-' + str(Month) + '-' + str(Day))
DOY = datetime.datetime.strptime(Date,
'%Y-%m-%d').timetuple().tm_yday
# The new Startdate
Date = pd.Timestamp(Date)
# amount of Dates weekly
Dates = pd.date_range(Date, Enddate, freq = '7D')
# Define directory and create it if not exists
output_folder = os.path.join(Dir, 'Evaporation', 'ALEXI', 'Weekly')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if TimeStep == 'daily':
# Define Dates
Dates = pd.date_range(Startdate, Enddate, freq = 'D')
# Define directory and create it if not exists
output_folder = os.path.join(Dir, 'Evaporation', 'ALEXI', 'Daily')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Create Waitbar
total_amount = len(Dates)
if Waitbar == 1:
import watools.Functions.Start.WaitbarConsole as WaitbarConsole
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
if TimeStep == 'weekly':
ALEXI_weekly(Date, Enddate, output_folder, latlim, lonlim, Year, Waitbar, total_amount, TimeStep)
if TimeStep == 'daily':
ALEXI_daily(Dates, output_folder, latlim, lonlim, Waitbar, total_amount, TimeStep)
def ALEXI_weekly(Date, Enddate, output_folder, latlim, lonlim, Year, Waitbar, total_amount, TimeStep):
# Define the stop conditions
Stop = Enddate.toordinal()
End_date=0
amount = 0
while End_date == 0:
# Date as printed in filename
Datesname=Date+pd.DateOffset(days=-7)
DirFile= os.path.join(output_folder,'ETa_ALEXI_CSFR_mm-week-1_weekly_%s.%02s.%02s.tif' %(Datesname.strftime('%Y'), Datesname.strftime('%m'), Datesname.strftime('%d')))
# Define end filename
filename = "ALEXI_weekly_mm_%s_%s.tif" %(Date.strftime('%j'), Date.strftime('%Y'))
# Temporary filename for the downloaded global file
local_filename = os.path.join(output_folder, filename)
# Create the new date for the next download
Datename = (str(Date.strftime('%Y')) + '-' + str(Date.strftime('%m')) + '-' + str(Date.strftime('%d')))
# Define IDs
yID = 3000 - np.int16(np.array([np.ceil((latlim[1]+60)*20),np.floor((latlim[0]+60)*20)]))
xID = np.int16(np.array([np.floor((lonlim[0])*20),np.ceil((lonlim[1])*20)])+3600)
# Download the data from FTP server if the file not exists
if not os.path.exists(DirFile):
try:
Download_ALEXI_from_WA_FTP(local_filename, DirFile, filename, lonlim, latlim, yID, xID, TimeStep)
except:
print("Was not able to download file with date %s" %Date)
# Current DOY
DOY = datetime.datetime.strptime(Datename,
'%Y-%m-%d').timetuple().tm_yday
# Define next day
DOY_next = int(DOY + 7)
if DOY_next >= 366:
DOY_next = 8
Year += 1
DOYnext = str('%s-%s' %(DOY_next, Year))
DayNext = datetime.datetime.strptime(DOYnext, '%j-%Y')
Month = '%02d' % DayNext.month
Day = '%02d' % DayNext.day
Date = (str(Year) + '-' + str(Month) + '-' + str(Day))
# Adjust waitbar
if Waitbar == 1:
import watools.Functions.Start.WaitbarConsole as WaitbarConsole
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Check if this file must be downloaded
Date = pd.Timestamp(Date)
if Date.toordinal() > Stop:
End_date = 1
def ALEXI_daily(Dates, output_folder, latlim, lonlim, Waitbar, total_amount, TimeStep):
amount = 0
for Date in Dates:
# Date as printed in filename
DirFile= os.path.join(output_folder,'ETa_ALEXI_CSFR_mm-day-1_daily_%d.%02d.%02d.tif' %(Date.year, Date.month, Date.day))
DOY = Date.timetuple().tm_yday
# Define end filename
filename = "EDAY_CERES_%d%03d.dat.gz" %(Date.year, DOY)
# Temporary filename for the downloaded global file
local_filename = os.path.join(output_folder, filename)
# Define IDs
yID = 3000 - np.int16(np.array([ | np.ceil((latlim[1]+60)*20) | numpy.ceil |
from __future__ import division
import itertools
import warnings
import numpy as np
scipy_gaussian_filter = None # expensive
from .base import ndfeature, winitfeature, imgfeature
from ._gradient import gradient_cython
from .windowiterator import WindowIterator, WindowIteratorResult
def _np_gradient(pixels):
"""
This method is used in the case of multi-channel images (not 2D images).
The output ordering is identical to the gradient() method, returning
a 2 * n_channels image with gradients in order of the first axis derivative
over all the channels, then the second etc. For example, in the case of
a 3D image with 2 channels, the ordering would be:
I[:, 0, 0, 0] = [A_0, B_0, A_1, B_1, A_2, B_2]
where A and B are the 'channel' labels (synonymous with RGB for a colour
image) and 0,1,2 are the axis labels (synonymous with y,x for a 2D image).
"""
n_dims = pixels.ndim - 1
grad_per_dim_per_channel = [np.gradient(g, edge_order=1)
for g in pixels]
# Flatten out the separate dims
grad_per_channel = list(itertools.chain.from_iterable(
grad_per_dim_per_channel))
# Add a channel axis for broadcasting
grad_per_channel = [g[None, ...] for g in grad_per_channel]
# Permute the list so it is first axis, second axis, etc
grad_per_channel = [grad_per_channel[i::n_dims]
for i in range(n_dims)]
grad_per_channel = list(itertools.chain.from_iterable(grad_per_channel))
# Concatenate gradient list into an array (the new_image)
return np.concatenate(grad_per_channel, axis=0)
@ndfeature
def gradient(pixels):
r"""
Calculates the gradient of an input image. The image is assumed to have
channel information on the first axis. In the case of multiple channels,
it returns the gradient over each axis over each channel as the first axis.
The gradient is computed using second order accurate central differences in
the interior and first order accurate one-side (forward or backwards)
differences at the boundaries.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array where the first dimension
is interpreted as channels. This means an N-dimensional image is
represented by an N+1 dimensional array.
If the image is 2-dimensional the pixels should be of type
float/double (int is not supported).
Returns
-------
gradient : `ndarray`
The gradient over each axis over each channel. Therefore, the
first axis of the gradient of a 2D, single channel image, will have
length `2`. The first axis of the gradient of a 2D, 3-channel image,
will have length `6`, the ordering being
``I[:, 0, 0] = [R0_y, G0_y, B0_y, R0_x, G0_x, B0_x]``. To be clear,
all the ``y``-gradients are returned over each channel, then all
the ``x``-gradients.
"""
if (pixels.ndim - 1) == 2: # 2D Image
return gradient_cython(pixels)
else:
return _np_gradient(pixels)
@ndfeature
def gaussian_filter(pixels, sigma):
r"""
Calculates the convolution of the input image with a multidimensional
Gaussian filter.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
sigma : `float` or `list` of `float`
The standard deviation for Gaussian kernel. The standard deviations of
the Gaussian filter are given for each axis as a `list`, or as a single
`float`, in which case it is equal for all axes.
Returns
-------
output_image : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The filtered image has the same type and size as the input ``pixels``.
"""
global scipy_gaussian_filter
if scipy_gaussian_filter is None:
from scipy.ndimage import gaussian_filter as scipy_gaussian_filter
output = np.empty(pixels.shape, dtype=pixels.dtype)
for dim in range(pixels.shape[0]):
scipy_gaussian_filter(pixels[dim], sigma, output=output[dim])
return output
@winitfeature
def hog(pixels, mode='dense', algorithm='dalaltriggs', num_bins=9,
cell_size=8, block_size=2, signed_gradient=True, l2_norm_clip=0.2,
window_height=1, window_width=1, window_unit='blocks',
window_step_vertical=1, window_step_horizontal=1,
window_step_unit='pixels', padding=True, verbose=False):
r"""
Extracts Histograms of Oriented Gradients (HOG) features from the input
image.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : {``dense``, ``sparse``}, optional
The ``sparse`` case refers to the traditional usage of HOGs, so
predefined parameters values are used.
The ``sparse`` case of ``dalaltriggs`` algorithm sets
``window_height = window_width = block_size`` and
``window_step_horizontal = window_step_vertical = cell_size``.
The ``sparse`` case of ``zhuramanan`` algorithm sets
``window_height = window_width = 3 * cell_size`` and
``window_step_horizontal = window_step_vertical = cell_size``.
In the ``dense`` case, the user can choose values for `window_height`,
`window_width`, `window_unit`, `window_step_vertical`,
`window_step_horizontal`, `window_step_unit` and `padding` to customize
the HOG calculation.
window_height : `float`, optional
Defines the height of the window. The metric unit is defined by
`window_unit`.
window_width : `float`, optional
Defines the width of the window. The metric unit is defined by
`window_unit`.
window_unit : {``blocks``, ``pixels``}, optional
Defines the metric unit of the `window_height` and `window_width`
parameters.
window_step_vertical : `float`, optional
Defines the vertical step by which the window is moved, thus it
controls the features' density. The metric unit is defined by
`window_step_unit`.
window_step_horizontal : `float`, optional
Defines the horizontal step by which the window is moved, thus it
controls the features' density. The metric unit is defined by
`window_step_unit`.
window_step_unit : {``pixels``, ``cells``}, optional
Defines the metric unit of the `window_step_vertical` and
`window_step_horizontal` parameters.
padding : `bool`, optional
If ``True``, the output image is padded with zeros to match the input
image's size.
algorithm : {``dalaltriggs``, ``zhuramanan``}, optional
Specifies the algorithm used to compute HOGs. ``dalaltriggs`` is the
implementation of [1] and ``zhuramanan`` is the implementation of [2].
cell_size : `float`, optional
Defines the cell size in pixels. This value is set to both the width
and height of the cell. This option is valid for both algorithms.
block_size : `float`, optional
Defines the block size in cells. This value is set to both the width
and height of the block. This option is valid only for the
``dalaltriggs`` algorithm.
num_bins : `float`, optional
Defines the number of orientation histogram bins. This option is
valid only for the ``dalaltriggs`` algorithm.
signed_gradient : `bool`, optional
Flag that defines whether we use signed or unsigned gradient angles.
This option is valid only for the ``dalaltriggs`` algorithm.
l2_norm_clip : `float`, optional
Defines the clipping value of the gradients' L2-norm. This option is
valid only for the ``dalaltriggs`` algorithm.
verbose : `bool`, optional
Flag to print HOG related information.
Returns
-------
hog : :map:`Image` or subclass or ``(X, Y, ..., Z, K)`` `ndarray`
The HOG features image. It has the same type as the input ``pixels``.
The output number of channels in the case of ``dalaltriggs`` is
``K = num_bins * block_size *block_size`` and ``K = 31`` in the case of
``zhuramanan``.
Raises
------
ValueError
HOG features mode must be either dense or sparse
ValueError
Algorithm must be either dalaltriggs or zhuramanan
ValueError
Number of orientation bins must be > 0
ValueError
Cell size (in pixels) must be > 0
ValueError
Block size (in cells) must be > 0
ValueError
Value for L2-norm clipping must be > 0.0
ValueError
Window height must be >= block size and <= image height
ValueError
Window width must be >= block size and <= image width
ValueError
Window unit must be either pixels or blocks
ValueError
Horizontal window step must be > 0
ValueError
Vertical window step must be > 0
ValueError
Window step unit must be either pixels or cells
References
----------
.. [1] <NAME> and <NAME>, "Histograms of oriented gradients for human
detection", Proceedings of the IEEE Conference on Computer Vision and
Pattern Recognition (CVPR), 2005.
.. [2] <NAME>, <NAME>. "Face detection, pose estimation and landmark
localization in the wild", Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition (CVPR), 2012.
"""
# TODO: This is a temporary fix
# flip axis
pixels = np.rollaxis(pixels, 0, len(pixels.shape))
# Parse options
if mode not in ['dense', 'sparse']:
raise ValueError("HOG features mode must be either dense or sparse")
if algorithm not in ['dalaltriggs', 'zhuramanan']:
raise ValueError("Algorithm must be either dalaltriggs or zhuramanan")
if num_bins <= 0:
raise ValueError("Number of orientation bins must be > 0")
if cell_size <= 0:
raise ValueError("Cell size (in pixels) must be > 0")
if block_size <= 0:
raise ValueError("Block size (in cells) must be > 0")
if l2_norm_clip <= 0.0:
raise ValueError("Value for L2-norm clipping must be > 0.0")
if mode == 'dense':
if window_unit not in ['pixels', 'blocks']:
raise ValueError("Window unit must be either pixels or blocks")
window_height_temp = window_height
window_width_temp = window_width
if window_unit == 'blocks':
window_height_temp = window_height * block_size * cell_size
window_width_temp = window_width * block_size * cell_size
if (window_height_temp < block_size * cell_size or
window_height_temp > pixels.shape[0]):
raise ValueError("Window height must be >= block size and <= "
"image height")
if (window_width_temp < block_size*cell_size or
window_width_temp > pixels.shape[1]):
raise ValueError("Window width must be >= block size and <= "
"image width")
if window_step_horizontal <= 0:
raise ValueError("Horizontal window step must be > 0")
if window_step_vertical <= 0:
raise ValueError("Vertical window step must be > 0")
if window_step_unit not in ['pixels', 'cells']:
raise ValueError("Window step unit must be either pixels or cells")
# Correct input image_data
pixels = np.asfortranarray(pixels)
pixels *= 255.
# Dense case
if mode == 'dense':
# Iterator parameters
if algorithm == 'dalaltriggs':
algorithm = 1
if window_unit == 'blocks':
block_in_pixels = cell_size * block_size
window_height = np.uint32(window_height * block_in_pixels)
window_width = np.uint32(window_width * block_in_pixels)
if window_step_unit == 'cells':
window_step_vertical = np.uint32(window_step_vertical *
cell_size)
window_step_horizontal = np.uint32(window_step_horizontal *
cell_size)
elif algorithm == 'zhuramanan':
algorithm = 2
if window_unit == 'blocks':
block_in_pixels = 3 * cell_size
window_height = np.uint32(window_height * block_in_pixels)
window_width = np.uint32(window_width * block_in_pixels)
if window_step_unit == 'cells':
window_step_vertical = np.uint32(window_step_vertical *
cell_size)
window_step_horizontal = np.uint32(window_step_horizontal *
cell_size)
iterator = WindowIterator(pixels, window_height, window_width,
window_step_horizontal,
window_step_vertical, padding)
# Sparse case
else:
# Create iterator
if algorithm == 'dalaltriggs':
algorithm = 1
window_size = cell_size * block_size
step = cell_size
else:
algorithm = 2
window_size = 3 * cell_size
step = cell_size
iterator = WindowIterator(pixels, window_size, window_size, step,
step, False)
# Print iterator's info
if verbose:
print(iterator)
# Compute HOG
hog_descriptor = iterator.HOG(algorithm, num_bins, cell_size, block_size,
signed_gradient, l2_norm_clip, verbose)
# TODO: This is a temporal fix
# flip axis
hog_descriptor = WindowIteratorResult(
np.ascontiguousarray(np.rollaxis(hog_descriptor.pixels, -1)),
hog_descriptor.centres)
return hog_descriptor
@ndfeature
def igo(pixels, double_angles=False, verbose=False):
r"""
Extracts Image Gradient Orientation (IGO) features from the input image.
The output image has ``N * C`` number of channels, where ``N`` is the
number of channels of the original image and ``C = 2`` or ``C = 4``
depending on whether double angles are used.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
double_angles : `bool`, optional
Assume that ``phi`` represents the gradient orientations.
If this flag is ``False``, the features image is the concatenation of
``cos(phi)`` and ``sin(phi)``, thus 2 channels.
If ``True``, the features image is the concatenation of
``cos(phi)``, ``sin(phi)``, ``cos(2 * phi)``, ``sin(2 * phi)``, thus 4
channels.
verbose : `bool`, optional
Flag to print IGO related information.
Returns
-------
igo : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The IGO features image. It has the same type and shape as the input
``pixels``. The output number of channels depends on the
``double_angles`` flag.
Raises
------
ValueError
Image has to be 2D in order to extract IGOs.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Subspace learning
from image gradient orientations", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 34, num. 12, p. 2454--2466, 2012.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError('IGOs only work on 2D images. Expects image data '
'to be 3D, channels + shape.')
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_chnls = 2
if double_angles:
feat_chnls = 4
# compute gradients
grad = gradient(pixels)
# compute angles
grad_orient = np.angle(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute igo image
igo_pixels = np.empty((n_img_chnls * feat_chnls,
pixels.shape[1], pixels.shape[2]),
dtype=pixels.dtype)
if double_angles:
dbl_grad_orient = 2 * grad_orient
# y angles
igo_pixels[:n_img_chnls] = np.sin(grad_orient)
igo_pixels[n_img_chnls:n_img_chnls*2] = np.sin(dbl_grad_orient)
# x angles
igo_pixels[n_img_chnls*2:n_img_chnls*3] = np.cos(grad_orient)
igo_pixels[n_img_chnls*3:] = np.cos(dbl_grad_orient)
else:
igo_pixels[:n_img_chnls] = np.sin(grad_orient) # y
igo_pixels[n_img_chnls:] = np.cos(grad_orient) # x
# print information
if verbose:
info_str = "IGO Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls)
info_str = "{} - Double angles are {}.\n".format(
info_str, 'enabled' if double_angles else 'disabled')
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, igo_pixels.shape[2], igo_pixels.shape[1], n_img_chnls)
print(info_str)
return igo_pixels
@ndfeature
def es(pixels, verbose=False):
r"""
Extracts Edge Structure (ES) features from the input image. The output image
has ``N * C`` number of channels, where ``N`` is the number of channels of
the original image and ``C = 2``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either an image object itself or an array where the first axis
represents the number of channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
verbose : `bool`, optional
Flag to print ES related information.
Returns
-------
es : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is ``C = 2``.
Raises
------
ValueError
Image has to be 2D in order to extract ES features.
References
----------
.. [1] <NAME>, <NAME>, "On representing edge structure for model
matching", Proceedings of the IEEE Conference on Computer Vision and
Pattern Recognition (CVPR), 2001.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError('ES features only work on 2D images. Expects '
'image data to be 3D, channels + shape.')
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_channels = 2
# compute gradients
grad = gradient(pixels)
# compute magnitude
grad_abs = np.abs(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute es image
grad_abs = grad_abs + np.median(grad_abs)
es_pixels = np.empty((pixels.shape[0] * feat_channels,
pixels.shape[1], pixels.shape[2]),
dtype=pixels.dtype)
es_pixels[:n_img_chnls] = grad[:n_img_chnls] / grad_abs
es_pixels[n_img_chnls:] = grad[n_img_chnls:] / grad_abs
# print information
if verbose:
info_str = "ES Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls)
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, es_pixels.shape[2], es_pixels.shape[1], n_img_chnls)
print(info_str)
return es_pixels
@ndfeature
def daisy(pixels, step=1, radius=15, rings=2, histograms=2, orientations=8,
normalization='l1', sigmas=None, ring_radii=None, verbose=False):
r"""
Extracts Daisy features from the input image. The output image has ``N * C``
number of channels, where ``N`` is the number of channels of the original
image and ``C`` is the feature channels determined by the input options.
Specifically, ``C = (rings * histograms + 1) * orientations``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
step : `int`, optional
The sampling step that defines the density of the output image.
radius : `int`, optional
The radius (in pixels) of the outermost ring.
rings : `int`, optional
The number of rings to be used.
histograms : `int`, optional
The number of histograms sampled per ring.
orientations : `int`, optional
The number of orientations (bins) per histogram.
normalization : [ 'l1', 'l2', 'daisy', None ], optional
It defines how to normalize the descriptors
If 'l1' then L1-normalization is applied at each descriptor.
If 'l2' then L2-normalization is applied at each descriptor.
If 'daisy' then L2-normalization is applied at individual histograms.
If None then no normalization is employed.
sigmas : `list` of `float` or ``None``, optional
Standard deviation of spatial Gaussian smoothing for the centre
histogram and for each ring of histograms. The `list` of sigmas should
be sorted from the centre and out. I.e. the first sigma value defines
the spatial smoothing of the centre histogram and the last sigma value
defines the spatial smoothing of the outermost ring. Specifying sigmas
overrides the `rings` parameter by setting ``rings = len(sigmas) - 1``.
ring_radii : `list` of `float` or ``None``, optional
Radius (in pixels) for each ring. Specifying `ring_radii` overrides the
`rings` and `radius` parameters by setting ``rings = len(ring_radii)``
and ``radius = ring_radii[-1]``.
If both sigmas and ring_radii are given, they must satisfy ::
len(ring_radii) == len(sigmas) + 1
since no radius is needed for the centre histogram.
verbose : `bool`
Flag to print Daisy related information.
Returns
-------
daisy : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is
``C = (rings * histograms + 1) * orientations``.
Raises
------
ValueError
len(sigmas)-1 != len(ring_radii)
ValueError
Invalid normalization method.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Daisy: An efficient dense descriptor
applied to wide-baseline stereo", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 32, num. 5, p. 815-830, 2010.
"""
from menpo.external.skimage._daisy import _daisy
# Parse options
if sigmas is not None and ring_radii is not None \
and len(sigmas) - 1 != len(ring_radii):
raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
if ring_radii is not None:
rings = len(ring_radii)
radius = ring_radii[-1]
if sigmas is not None:
rings = len(sigmas) - 1
if sigmas is None:
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
if ring_radii is None:
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
if normalization is None:
normalization = 'off'
if normalization not in ['l1', 'l2', 'daisy', 'off']:
raise ValueError('Invalid normalization method.')
# Compute daisy features
daisy_descriptor = _daisy(pixels, step=step, radius=radius, rings=rings,
histograms=histograms, orientations=orientations,
normalization=normalization, sigmas=sigmas,
ring_radii=ring_radii)
# print information
if verbose:
info_str = "Daisy Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], pixels.shape[0])
info_str = "{} - Sampling step is {}.\n".format(info_str, step)
info_str = "{} - Radius of {} pixels, {} rings and {} histograms " \
"with {} orientations.\n".format(
info_str, radius, rings, histograms, orientations)
if not normalization == 'off':
info_str = "{} - Using {} normalization.\n".format(info_str,
normalization)
else:
info_str = "{} - No normalization emplyed.\n".format(info_str)
info_str = "{}Output image size {}W x {}H x {}.".format(
info_str, daisy_descriptor.shape[2], daisy_descriptor.shape[1],
daisy_descriptor.shape[0])
print(info_str)
return daisy_descriptor
# TODO: Needs fixing ...
@winitfeature
def lbp(pixels, radius=None, samples=None, mapping_type='riu2',
window_step_vertical=1, window_step_horizontal=1,
window_step_unit='pixels', padding=True, verbose=False,
skip_checks=False):
r"""
Extracts Local Binary Pattern (LBP) features from the input image. The
output image has ``N * C`` number of channels, where ``N`` is the number of
channels of the original image and ``C`` is the number of radius/samples
values combinations that are used in the LBP computation.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
radius : `int` or `list` of `int` or ``None``, optional
It defines the radius of the circle (or circles) at which the sampling
points will be extracted. The radius (or radii) values must be greater
than zero. There must be a radius value for each samples value, thus
they both need to have the same length. If ``None``, then
``[1, 2, 3, 4]`` is used.
samples : `int` or `list` of `int` or ``None``, optional
It defines the number of sampling points that will be extracted at each
circle. The samples value (or values) must be greater than zero. There
must be a samples value for each radius value, thus they both need to
have the same length. If ``None``, then ``[8, 8, 8, 8]`` is used.
mapping_type : {``u2``, ``ri``, ``riu2``, ``none``}, optional
It defines the mapping type of the LBP codes. Select ``u2`` for
uniform-2 mapping, ``ri`` for rotation-invariant mapping, ``riu2`` for
uniform-2 and rotation-invariant mapping and ``none`` to use no mapping
and only the decimal values instead.
window_step_vertical : `float`, optional
Defines the vertical step by which the window is moved, thus it controls
the features density. The metric unit is defined by `window_step_unit`.
window_step_horizontal : `float`, optional
Defines the horizontal step by which the window is moved, thus it
controls the features density. The metric unit is defined by
`window_step_unit`.
window_step_unit : {``pixels``, ``window``}, optional
Defines the metric unit of the `window_step_vertical` and
`window_step_horizontal` parameters.
padding : `bool`, optional
If ``True``, the output image is padded with zeros to match the input
image's size.
verbose : `bool`, optional
Flag to print LBP related information.
skip_checks : `bool`, optional
If ``True``, do not perform any validation of the parameters.
Returns
-------
lbp : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is
``C = len(radius) * len(samples)``.
Raises
------
ValueError
Radius and samples must both be either integers or lists
ValueError
Radius and samples must have the same length
ValueError
Radius must be > 0
ValueError
Radii must be > 0
ValueError
Samples must be > 0
ValueError
Mapping type must be u2, ri, riu2 or none
ValueError
Horizontal window step must be > 0
ValueError
Vertical window step must be > 0
ValueError
Window step unit must be either pixels or window
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "Multiresolution gray-scale
and rotation invariant texture classification with local binary
patterns", IEEE Transactions on Pattern Analysis and Machine
Intelligence, vol. 24, num. 7, p. 971-987, 2002.
"""
if radius is None:
radius = range(1, 5)
if samples is None:
samples = [8]*4
# TODO: This is a temporal fix
# flip axis
pixels = np.rollaxis(pixels, 0, len(pixels.shape))
if not skip_checks:
# Check parameters
if ((isinstance(radius, int) and isinstance(samples, list)) or
(isinstance(radius, list) and isinstance(samples, int))):
raise ValueError("Radius and samples must both be either integers "
"or lists")
elif isinstance(radius, list) and isinstance(samples, list):
if len(radius) != len(samples):
raise ValueError("Radius and samples must have the same "
"length")
if isinstance(radius, int) and radius < 1:
raise ValueError("Radius must be > 0")
elif isinstance(radius, list) and sum(r < 1 for r in radius) > 0:
raise ValueError("Radii must be > 0")
if isinstance(samples, int) and samples < 1:
raise ValueError("Samples must be > 0")
elif isinstance(samples, list) and sum(s < 1 for s in samples) > 0:
raise ValueError("Samples must be > 0")
if mapping_type not in ['u2', 'ri', 'riu2', 'none']:
raise ValueError("Mapping type must be u2, ri, riu2 or "
"none")
if window_step_horizontal <= 0:
raise ValueError("Horizontal window step must be > 0")
if window_step_vertical <= 0:
raise ValueError("Vertical window step must be > 0")
if window_step_unit not in ['pixels', 'window']:
raise ValueError("Window step unit must be either pixels or "
"window")
# Correct input image_data
pixels = np.asfortranarray(pixels)
# Parse options
radius = np.asfortranarray(radius)
samples = np.asfortranarray(samples)
window_height = np.uint32(2 * radius.max() + 1)
window_width = window_height
if window_step_unit == 'window':
window_step_vertical = np.uint32(window_step_vertical * window_height)
window_step_horizontal = np.uint32(window_step_horizontal *
window_width)
if mapping_type == 'u2':
mapping_type = 1
elif mapping_type == 'ri':
mapping_type = 2
elif mapping_type == 'riu2':
mapping_type = 3
else:
mapping_type = 0
# Create iterator object
iterator = WindowIterator(pixels, window_height, window_width,
window_step_horizontal, window_step_vertical,
padding)
# Print iterator's info
if verbose:
print(iterator)
# Compute LBP
lbp_descriptor = iterator.LBP(radius, samples, mapping_type, verbose)
# TODO: This is a temporary fix
# flip axis
lbp_descriptor = WindowIteratorResult(
np.ascontiguousarray( | np.rollaxis(lbp_descriptor.pixels, -1) | numpy.rollaxis |
'''
Created on: see version log.
@author: rigonz
coding: utf-8
IMPORTANT: requires py3.6 (rasterio)
Script that:
1) reads a series of raster files,
2) runs some checks,
3) makes charts showing the results.
The input data corresponds to a region of the world (ESP) and represents
the population density (pop/km2).
Each file has from a data provider, or different calculation conditions.
The checks consist in verifying that the input files refer to the same region
and to some intercomparison indicators.
The charts show the correlation among the different input data, as tuples
associated to the same geographical location.
Version log.
R0 (20210512):
First trials, seems to work well.
'''
# %% Imports.
import rasterio # IMPORTANT: requires py3.6
import numpy as np
from matplotlib import pyplot as plt
# %% Directories.
RootDirIn = 'D:/0 DOWN/zz EXTSave/GIS/POP/EUR/SHP/'
# Filenames:
FileNameI1 = RootDirIn + 'WP/ESP_clip_pd_2020_1km_UNadj.tif'
FileNameI2 = RootDirIn + 'WP/ESP_clip_ppp_2020_1km_Aggregated_UNadj_d.tif'
FileNameI3 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_rev11_2020_30_sec.tif'
FileNameI4 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_adjusted_to_2015_unwpp_country_totals_rev11_2020_30_sec.tif'
# %% Read data.
# Open files:
print('Opening and reading the files...')
ds1 = rasterio.open(FileNameI1)
ds2 = rasterio.open(FileNameI2)
ds3 = rasterio.open(FileNameI3)
ds4 = rasterio.open(FileNameI4)
# Read data:
band1 = ds1.read(1)
band2 = ds2.read(1)
band3 = ds3.read(1)
band4 = ds4.read(1)
# %% Check the datasets.
print('Checking the data...')
# Bounds:
if not(ds1.bounds == ds2.bounds and ds2.bounds == ds3.bounds and
ds3.bounds == ds4.bounds):
print('WARNING: bounds are not the same:')
print(ds1.bounds)
print(ds2.bounds)
print(ds3.bounds)
print(ds4.bounds)
# Width and height:
if not(ds1.width == ds2.width and ds2.width == ds3.width and
ds3.width == ds4.width):
print('WARNING: widths are not the same:')
print(ds1.width)
print(ds2.width)
print(ds3.width)
print(ds4.width)
if not(ds1.height == ds2.height and ds2.height == ds3.height and
ds3.height == ds4.height):
print('WARNING: heights are not the same:')
print(ds1.height)
print(ds2.height)
print(ds3.height)
print(ds4.height)
# Bands:
if not(ds1.indexes[0] == ds2.indexes[0] and ds2.indexes[0] == ds3.indexes[0]
and ds3.indexes[0] == ds4.indexes[0]):
print('WARNING: bands are not the same:')
print(ds1.indexes[0])
print(ds2.indexes[0])
print(ds3.indexes[0])
print(ds4.indexes[0])
# Dimensions:
if not(ds1.shape == ds2.shape and ds2.shape == ds3.shape and
ds3.shape == ds4.shape):
print('WARNING: shapes are not the same:')
print(ds1.shape)
print(ds2.shape)
print(ds3.shape)
print(ds4.shape)
# CRS:
try:
if (ds1.crs.data['init'] != 'epsg:4326' or
ds2.crs.data['init'] != 'epsg:4326' or
ds3.crs.data['init'] != 'epsg:4326' or
ds4.crs.data['init'] != 'epsg:4326'):
print('WARNING: CRS is not EPSG:4326.')
except:
print('WARNING: CRS is not available or is not EPSG:4326:')
# %% Create new bands.
print('Checking the new bands...')
# Remain within the boundaries of data:
left = max(ds1.bounds.left, ds2.bounds.left, ds3.bounds.left, ds4.bounds.left)
top = min(ds1.bounds.top, ds2.bounds.top, ds3.bounds.top, ds4.bounds.top)
right = min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right)
bottom = max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom)
res = 1 / 120. # 30 arc-sec, approx 100 m; should be min() etc.
height = int(np.ceil((top - bottom) / res + 1))
width = int(np.ceil((right - left) / res + 1))
res_x = (right - left) / (width - 1)
res_y = (top - bottom) / (height - 1)
# Check (valid for east + north hemispheres only!):
if right > min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right):
print('WARNING: right boundary exceeded.')
if bottom > max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom):
print('WARNING: bottom boundary exceeded.')
# Create new bands:
print('Creating the new bands...')
b1 = np.full((height, width), 0.)
b2 = np.full((height, width), 0.)
b3 = np.full((height, width), 0.)
b4 = | np.full((height, width), 0.) | numpy.full |
import numpy as np
import tensorflow as tf
def build_laplace(n,boundary='0'):
if n==1:
return np.zeros((1,1),dtype=np.float32)
d1 = -2 * | np.ones((n,),dtype=np.float32) | numpy.ones |
from abc import ABC, abstractmethod
import numpy as np
from janus.system import Buffer
from copy import deepcopy
import mdtraj as md
import mendeleev as mdlv
class Partition(ABC):
nm_to_angstrom = 10.0000000
def __init__(self, trajectory, topology, class_type):
self.traj = trajectory
self.topology = topology
self.class_type = class_type
def compute_COM(self, atoms):
"""
Computes the center of mass of a specified group
Parameters
----------
atoms : list
indices defining the group to compute the COM for
Returns
-------
numpy array
COM xyz coordinates
dict
the indices of each atom in the group and the atomic weight for each atom
dict
the indices of each atom in the group and the weight ratio of each atom to
the total weight of the group (sum of all atomic weights)
"""
xyz = | np.zeros(3) | numpy.zeros |
## HEAT DIFFUSION
import numpy as np
import matplotlib.pyplot as plt
import pylan as pn
## grid
dx = 0.1
Nx = int(1/dx+1)
x = | np.arange(0,1+dx,dx) | numpy.arange |
"""
Copyright (c) 2010-2018 CNRS / Centre de Recherche Astrophysique de Lyon
Copyright (c) 2012-2017 <NAME> <<EMAIL>>
Copyright (c) 2014-2019 <NAME> <<EMAIL>>
Copyright (c) 2016 <NAME> <<EMAIL>>
Copyright (c) 2016-2019 <NAME> <<EMAIL>>
Copyright (c) 2018-2019 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import types
import astropy.units as u
from astropy.io import fits
from astropy.stats import gaussian_sigma_to_fwhm, gaussian_fwhm_to_sigma
from astropy.convolution import convolve, Box1DKernel
from os.path import join, abspath, dirname
from scipy import interpolate, signal
from scipy.optimize import leastsq
from . import ABmag_filters, wavelet1D
from .arithmetic import ArithmeticMixin
from .data import DataArray
from .fitting import Gauss1D
from .objs import flux2mag
__all__ = ('Spectrum', 'vactoair', 'airtovac')
def vactoair(vacwl):
"""Calculate the approximate wavelength in air for vacuum wavelengths.
Parameters
----------
vacwl : ndarray
Vacuum wavelengths.
This uses an approximate formula from the IDL astronomy library
https://idlastro.gsfc.nasa.gov/ftp/pro/astro/vactoair.pro
"""
wave2 = vacwl * vacwl
n = 1.0 + 2.735182e-4 + 131.4182 / wave2 + 2.76249e8 / (wave2 * wave2)
# Do not extrapolate to very short wavelengths.
if not isinstance(vacwl, np.ndarray):
if vacwl < 2000:
n = 1.0
else:
ignore = np.where(vacwl < 2000)
n[ignore] = 1.0
return vacwl / n
def airtovac(airwl):
"""Convert air wavelengths to vacuum wavelengths.
Parameters
----------
vacwl : ndarray
Vacuum wavelengths.
This uses the IAU standard as implemented in the IDL astronomy library
https://idlastro.gsfc.nasa.gov/ftp/pro/astro/airtovac.pro
"""
sigma2 = (1e4 / airwl)**2. # Convert to wavenumber squared
n = 1.0 + (6.4328e-5 + 2.94981e-2 / (146. - sigma2) +
2.5540e-4 / (41. - sigma2))
if not isinstance(airwl, np.ndarray):
if airwl < 2000:
n = 1.0
else:
ignore = np.where(airwl < 2000)
n[ignore] = 1.0
return airwl * n
class Spectrum(ArithmeticMixin, DataArray):
"""Spectrum objects contain 1D arrays of numbers, optionally
accompanied by corresponding variances. These numbers represent
sample fluxes along a regularly spaced grid of wavelengths.
The spectral pixel values and their variances, if any, are
available as arrays[q that can be accessed via properties of the
Spectrum object called .data and .var, respectively. These arrays
are usually masked arrays, which share a boolean masking array
that can be accessed via a property called .mask. In principle,
these arrays can also be normal numpy arrays without masks, in
which case the .mask property holds the value,
numpy.ma.nomask. However non-masked arrays are only supported by a
subset of mpdaf functions at this time, so masked arrays should be
used where possible.
When a new Spectrum object is created, the data, variance and mask
arrays can either be specified as arguments, or the name of a FITS
file can be provided to load them from.
Parameters
----------
filename : string
An optional FITS file name from which to load the spectrum.
None by default. This argument is ignored if the data
argument is not None.
ext : int or (int,int) or string or (string,string)
The optional number/name of the data extension
or the numbers/names of the data and variance extensions.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of the spectrum.
unit : str or `astropy.units.Unit`
The physical units of the data values. Defaults to
`astropy.units.dimensionless_unscaled`.
data : float array
An optional 1 dimensional array containing the values of each
pixel of the spectrum, stored in ascending order of wavelength
(None by default). Where given, this array should be 1
dimensional.
var : float array
An optional 1 dimensional array containing the estimated
variances of each pixel of the spectrum, stored in ascending
order of wavelength (None by default).
Attributes
----------
filename : string
The name of the originating FITS file, if any. Otherwise None.
unit : `astropy.units.Unit`
The physical units of the data values.
primary_header : `astropy.io.fits.Header`
The FITS primary header instance, if a FITS file was provided.
data_header : `astropy.io.fits.Header`
The FITS header of the DATA extension.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of the spectrum.
"""
# Tell the DataArray base-class that Spectrum objects require 1 dimensional
# data arrays and wavelength coordinates.
_ndim_required = 1
_has_wave = True
def subspec(self, lmin, lmax=None, unit=u.angstrom):
"""Return the flux at a given wavelength, or the sub-spectrum
of a specified wavelength range.
A single flux value is returned if the lmax argument is None
(the default), or if the wavelengths assigned to the lmin and
lmax arguments are both within the same pixel. The value that
is returned is the value of the pixel whose wavelength is
closest to the wavelength specified by the lmin argument.
Note that is a wavelength range is asked for, a view on the original
spectrum is returned and both will be modified at the same time. If
you need to modify only the sub-spectrum, you'll need to copy() it
before.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
Returns
-------
out : float or `~mpdaf.obj.Spectrum`
"""
if self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
if lmax is None:
lmax = lmin
# Are lmin and lmax array indexes?
if unit is None:
pix_min = max(0, int(lmin + 0.5))
pix_max = min(self.shape[0], int(lmax + 0.5))
# Convert wavelengths to the nearest spectrum array indexes.
else:
pix_min = max(0, self.wave.pixel(lmin, nearest=True, unit=unit))
pix_max = min(self.shape[0],
self.wave.pixel(lmax, nearest=True, unit=unit) + 1)
# If the start and end of the wavelength range select the same pixel,
# return just the value of that pixel.
if (pix_min + 1) == pix_max:
return self[pix_min]
# Otherwise return a sub-spectrum.
else:
return self[pix_min:pix_max]
def get_step(self, unit=None):
"""Return the wavelength step size.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned step-size.
Returns
-------
out : float
The width of a spectrum pixel.
"""
if self.wave is not None:
return self.wave.get_step(unit)
def get_start(self, unit=None):
"""Return the wavelength value of the first pixel of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelength.
Returns
-------
out : float
The wavelength of the first pixel of the spectrum.
"""
if self.wave is not None:
return self.wave.get_start(unit)
def get_end(self, unit=None):
"""Return the wavelength of the last pixel of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelength.
Returns
-------
out : float
The wavelength of the final pixel of the spectrum.
"""
if self.wave is not None:
return self.wave.get_end(unit)
def get_range(self, unit=None):
"""Return the wavelength range (Lambda_min, Lambda_max) of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelengths.
Returns
-------
out : float array
The minimum and maximum wavelengths.
"""
if self.wave is not None:
return self.wave.get_range(unit)
def mask_region(self, lmin=None, lmax=None, inside=True, unit=u.angstrom):
"""Mask spectrum pixels inside or outside a wavelength range, [lmin,lmax].
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
inside : bool
If True, pixels inside the range [lmin,lmax] are masked.
If False, pixels outside the range [lmin,lmax] are masked.
"""
if self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
else:
if lmin is None:
pix_min = 0
else:
if unit is None:
pix_min = max(0, int(lmin + 0.5))
else:
pix_min = max(0, self.wave.pixel(lmin, nearest=True,
unit=unit))
if lmax is None:
pix_max = self.shape[0]
else:
if unit is None:
pix_max = min(self.shape[0], int(lmax + 0.5))
else:
pix_max = min(self.shape[0],
self.wave.pixel(lmax, nearest=True,
unit=unit) + 1)
if inside:
self.data[pix_min:pix_max] = np.ma.masked
else:
self.data[:pix_min] = np.ma.masked
self.data[pix_max + 1:] = np.ma.masked
def _wavelengths_to_slice(self, lmin, lmax, unit):
"""Return the slice that selects a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
Returns
-------
out : slice
The slice needed to select pixels within the specified wavelength
range.
"""
if unit is not None and self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
# Get the pixel index that corresponds to the minimum wavelength.
if lmin is None:
i1 = 0
else:
if unit is None:
if lmin > self.shape[0]:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i1 = max(0, int(lmin + 0.5))
else:
i1 = self.wave.pixel(lmin, nearest=False, unit=unit)
if i1 > self.shape[0]:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i1 = self.wave.pixel(lmin, nearest=True, unit=unit)
# Get the pixel index that corresponds to the maximum wavelength.
if lmax is None:
i2 = self.shape[0]
else:
if unit is None:
if lmax < 0:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i2 = min(self.shape[0], int(lmax + 0.5))
else:
i2 = self.wave.pixel(lmax, nearest=False, unit=unit)
if i2 < 0:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i2 = self.wave.pixel(lmax, nearest=True, unit=unit) + 1
return slice(i1, i2)
def _interp(self, wavelengths, spline=False):
"""return the interpolated values corresponding to the wavelength
array.
Parameters
----------
wavelengths : array of float
wavelength values
unit : `astropy.units.Unit`
Type of the wavelength coordinates
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
lbda = self.wave.coord()
data = np.pad(self.data.compressed(), 1, 'edge')
w = np.concatenate(([self.get_start() - 0.5 * self.get_step()],
np.compress(~self._mask, lbda),
[self.get_end() + 0.5 * self.get_step()]))
if spline:
if self._var is not None:
_weight = 1. / np.sqrt(np.abs(self.var.filled(np.inf)))
if self.mask is np.ma.nomask:
weight = np.empty(self.shape + 2, dtype=float)
weight[1:-1] = _weight
else:
ksel = np.where(self.mask == False)
weight = np.empty(np.shape(ksel)[1] + 2)
weight[1:-1] = _weight[ksel]
weight[0] = weight[1]
weight[-1] = weight[-2]
else:
weight = None
tck = interpolate.splrep(w, data, w=weight)
return interpolate.splev(wavelengths, tck, der=0)
else:
f = interpolate.interp1d(w, data)
return f(wavelengths)
def _interp_data(self, spline=False):
"""Return data array with interpolated values for masked pixels.
Parameters
----------
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
if np.count_nonzero(self._mask) in (0, self.shape[0]):
return self._data
lbda = self.wave.coord()
wnew = lbda[self._mask]
data = self._data.copy()
data[self._mask] = self._interp(wnew, spline)
return data
def interp_mask(self, spline=False):
"""Interpolate masked pixels.
Parameters
----------
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
self.data = np.ma.masked_invalid(self._interp_data(spline))
def rebin(self, factor, margin='center', inplace=False):
"""Combine neighboring pixels to reduce the size of a spectrum by an
integer factor.
Each output pixel is the mean of n pixels, where n is the
specified reduction factor.
Parameters
----------
factor : int
The integer reduction factor by which the spectrum should
be shrunk.
margin : string in 'center'|'right'|'left'|'origin'
When the dimension of the input spectrum is not an integer
multiple of the reduction factor, the spectrum is
truncated to remove just enough pixels that its length is
a multiple of the reduction factor. This sub-spectrum is
then rebinned in place of the original spectrum. The
margin parameter determines which pixels of the input
spectrum are truncated, and which remain.
The options are:
'origin' or 'center':
The start of the output spectrum is coincident
with the start of the input spectrum.
'center':
The center of the output spectrum is aligned
with the center of the input spectrum, within
one pixel.
'right':
The end of the output spectrum is coincident
with the end of the input spectrum.
inplace : bool
If False, return a rebinned copy of the spectrum (the default).
If True, rebin the original spectrum in-place, and return that.
Returns
-------
out : Spectrum
"""
# Delegate the rebinning to the generic DataArray function.
return self._rebin(factor, margin, inplace)
def _decimation_filter(self, newstep, atten, unit=None):
"""This is a private function Spectrum.resample(), used to apply
a decimation filter prior to resampling.
Parameters
----------
step : float
The new pixel size along the wavelength axis of the spectrum.
atten : float
The minimum attenuation (dB), of the antialiasing
decimation filter at the Nyquist folding frequency of the
new pixel size. Larger attenuations suppress aliasing
better at the expense of worsened resolution. A good value
to choose is 40dB, which produces a response that is very
similar to a blackman filter applied within the Fourier
plane, but with less ringing in the image plane.
unit : `astropy.units.Unit`
The wavelength units of the step argument. A value of None
is equivalent to specifying self.wave.unit.
"""
# Convert the attenuation from dB to a linear scale factor.
gcut = 10.0**(-atten / 20.0)
# Calculate the Nyquist folding frequency of the new pixel size.
nyquist_folding_freq = 0.5 / newstep
# Calculate the standard deviation of a Gaussian whose Fourier
# transform drops from unity at the center to gcut at the Nyquist
# folding frequency.
sigma = (0.5 / np.pi / nyquist_folding_freq *
np.sqrt(-2.0 * np.log(gcut)))
# Convert the standard deviation from wavelength units to input pixels.
sigma /= self.get_step(unit=unit)
# Choose dimensions for the gaussian filtering kernel. Choose an
# extent from -4*sigma to +4*sigma. This truncates the gaussian
# where it drops to about 3e-4 of its peak. The following
# calculation ensures that the dimensions of the array are odd, so
# that the gaussian will be symmetrically sampled either side of a
# central pixel. This prevents spectral shifts.
gshape = int(np.ceil(4.0 * sigma)) * 2 + 1
# fftconvolve requires that the kernel be no larger than the array
# that it is convolving, so reduce the size of the kernel array if
# needed. Be careful to choose an odd sized array.
n = self.shape[0]
if gshape > n:
gshape = n if n % 2 != 0 else (n - 1)
# Sample the gaussian filter symmetrically around the central pixel.
gx = np.arange(gshape, dtype=float) - gshape // 2
gy = np.exp(-0.5 * (gx / sigma)**2)
# Area-normalize the gaussian profile.
gy /= gy.sum()
# Filter the spectrum with the gaussian filter.
self.fftconvolve(gy, inplace=True)
def resample(self, step, start=None, shape=None, unit=u.angstrom,
inplace=False, atten=40.0, cutoff=0.25):
"""Resample a spectrum to have a different wavelength interval.
Parameters
----------
step : float
The new pixel size along the wavelength axis of the spectrum.
start : float
The wavelength at the center of the first pixel of the resampled
spectrum. If None (the default) the center of the first pixel
has the same wavelength before and after resampling.
unit : `astropy.units.Unit`
The wavelength units of the step and start arguments.
The default is u.angstrom.
shape : int
The dimension of the array of the new spectrum (ie. the number
of spectral pixels). If this is not specified, the shape is
selected to encompass the wavelength range from the chosen
start wavelength to the ending wavelength of the input spectrum.
inplace : bool
If False, return a resampled copy of the spectrum (the default).
If True, resample the original spectrum in-place, and return that.
atten : float
The minimum attenuation (dB), of the antialiasing
decimation filter at the Nyquist folding frequency of the
new pixel size. Larger attenuations suppress aliasing
better at the expense of worsened resolution. The default
attenuation is 40.0 dB. To disable antialiasing, specify
atten=0.0.
cutoff : float
Mask each output pixel of which at least this fraction of the
pixel was interpolated from masked input pixels.
Returns
-------
out : Spectrum
"""
out = self if inplace else self.copy()
# Don't allow the spectrum to be started beyond the far end of
# the spectrum, because this would result in an empty spectrum.
if start is not None and start > self.get_end(unit):
raise ValueError('The start value is past the end of the '
'spectrum range')
# Get wavelength world coordinates of the output spectrum.
newwave = self.wave.resample(step, start, unit)
# How many pixels should there be in the resampled spectrum?
# If the user didn't specify this, use newwave.shape, which
# holds the number of pixels of size 'step' needed to sample
# from 'start' to the end of the current wavelength range.
if shape is not None:
newwave.shape = shape
# Get the existing wavelength step size in the new units.
oldstep = self.wave.get_step(unit)
# If the spectrum is being resampled to a larger pixel size,
# then a decimation filter should be applied before
# resampling, to ensure that the new pixel size doesn't
# undersample rapidly changing features in the spectrum.
if step > oldstep and atten > 0.0:
out._decimation_filter(step, atten, unit=unit)
# Get the data, mask (and variance) arrays, and replace bad pixels with
# zeros.
if out._mask is not None: # Is out.data a masked array?
data = out.data.filled(0.0)
if out._var is not None:
var = out.var.filled(0.0)
else:
var = None
mask = out._mask
else: # Is out.data just a numpy array?
mask = ~np.isfinite(out._data)
data = out._data.copy()
data[mask] = 0.0
if out.var is not None:
var = out.var.copy()
var[mask] = 0.0
else:
var = None
# Get the coordinates of the pixels of the input and output spectra.
xi = self.wave.coord()
xo = newwave.coord()
# Get a resampled versions of the data array, optionally the variance
# array, and a floating point version of the mask array. Note that the
# choice of linear interpolation is required to preserve flux.
data = interpolate.griddata(xi, data, xo, method="linear",
fill_value=np.nan)
if var is not None:
var = interpolate.griddata(xi, var, xo, method="linear",
fill_value=np.nan)
mask = interpolate.griddata(xi, mask.astype(float), xo,
method="linear", fill_value=1.0)
# Create a new boolean mask in which all pixels that had an integrated
# contribution of more than 'cutoff' originally masked pixels are
# masked. Note that setting the cutoff to the "obvious" value of zero
# results in lots of pixels being masked that are far away from any
# masked pixels, due to precision errors in the griddata()
# function. Limit the minimum value of the cutoff to avoid this.
mask = np.greater(mask, max(cutoff, 1.0e-6))
# If masked arrays were not in use in the original spectrum, fill
# bad pixels with NaNs.
if out._mask is None:
data[mask] = np.nan
if var is not None:
var[mask] = np.nan
mask = None
# Install the resampled arrays.
out._data = data
out._var = var
out._mask = mask
# Install the new wavelength world coordinates.
out.wave = newwave
# When up-sampling, decimation filter the output spectrum. The
# combination of this and the linear interpolation of the preceding
# griddata() produces a much better interpolation than a cubic spline
# filter can. In particular, a spline interpolation does not conserve
# flux, whereas linear interpolation plus decimation filtering does.
if step < oldstep and atten > 0.0:
out._decimation_filter(step, atten, unit=unit)
return out
def mean(self, lmin=None, lmax=None, weight=True, unit=u.angstrom):
"""Compute the mean flux over a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
weight : bool
If weight is True, compute the weighted mean, inversely
weighting each pixel by its variance.
Returns
-------
out : (float, float)
The mean flux and its error.
"""
# Don't attempt to perform a weighted mean if there are no variances.
if self._var is None:
weight = False
# Get the slice that selects the specified wavelength range.
try:
lambda_slice = self._wavelengths_to_slice(lmin, lmax, unit)
except ValueError:
return (0.0, np.inf)
# Obtain the mean flux of the sub-spectrum.
if weight:
weights = 1.0 / self.var[lambda_slice].filled(np.inf)
flux, wsum = np.ma.average(self.data[lambda_slice],
weights=weights, returned=True)
if self.var is not None:
err_flux = np.sqrt(
np.ma.sum(self.var[lambda_slice] * weights**2) / wsum**2)
else:
err_flux = np.inf
else:
flux, wsum = np.ma.average(self.data[lambda_slice], returned=True)
if self.var is not None:
err_flux = np.sqrt( | np.ma.sum(self.var[lambda_slice]) | numpy.ma.sum |
import os
import sys
from glob import glob
from tqdm import tqdm
import numpy as np
import pandas as pd
import SimpleITK as sitk
from torch.utils.data import Dataset, DataLoader
import nibabel
from scipy import ndimage
import time
import torch
import torch.nn as nn
import fire
import time
import pydicom
import shutil
def read_config_file(config_file):
'''
config_file: '../data/config/肝穿病人给放射科和合作者.xlsx'
表头:['编号', '住院号', '姓名', 'series uid', 'Unnamed: 4', '性别1男2女', '年龄', 'MRS脂肪峰面积', '水峰面积', '脂肪含量', 'Fat', 'necrosisfoci', 'ballooning', 'NAS(total)', 'fibrosis', 'NAS大于4', '进展性纤维化', '脂肪肝病理评分']
此处用到 'series uid', 'fibrosis'
debug cmd: read_config_file('../data/config/肝穿病人给放射科和合作者.xlsx')
'''
df = pd.read_excel(config_file)
series_fib_dict = {}
for index, row in df.iterrows():
series_fib_dict[row['series uid']] = int(row['fibrosis'])
return series_fib_dict
def read_dcm_file(in_dcm_path):
series_reader = sitk.ImageSeriesReader()
dicomfilenames = series_reader.GetGDCMSeriesFileNames(in_dcm_path)
series_reader.SetFileNames(dicomfilenames)
series_reader.MetaDataDictionaryArrayUpdateOn()
series_reader.LoadPrivateTagsOn()
image = series_reader.Execute()
return image
def split_data_to_two_phase_one_case(series_path, out_dir):
'''
debug cmd: split_data_to_two_phase_one_case('../data/images_mr_filtered/1.3.12.2.1107.5.2.30.25245.2015120320185731080640838.0.0.0', '')
'''
in_files1 = glob((series_path, '*.dcm'))
in_files2 = glob((series_path, '*.DCM'))
in_files = in_files1 + in_files2
echo_1_files = []
echo_2_files = []
for infile in in_files:
metadata = pydicom.dcmread(infile)
if 1 == metadata.EchoNumbers:
echo_1_files.append(infile)
elif 2 == metadata.EchoNumbers:
echo_2_files.append(infile)
series_uid = os.path.basename(series_path)
out_series_path = os.path.join(out_dir, series_uid)
out_echo_1_path = os.path.join(out_series_path, 'echo_1')
out_echo_2_path = os.path.join(out_series_path, 'echo_2')
os.makedirs(out_series_path, exist_ok=True)
os.makedirs(out_echo_1_path, exist_ok=True)
os.makedirs(out_echo_2_path, exist_ok=True)
assert len(echo_1_files) == len(echo_2_files)
for src_file in echo_1_files:
dst_file = os.path.join(out_echo_1_path, os.path.basename(src_file))
shutil.copyfile(src_file, dst_file)
print('====> copy from {} to {}'.format(src_file, dst_file))
for src_file in echo_2_files:
dst_file = os.path.join(out_echo_2_path, os.path.basename(src_file))
shutil.copyfile(src_file, dst_file)
print('====> copy from {} to {}'.format(src_file, dst_file))
def split_data_to_two_phase_singletask(in_dir, out_dir, config_file):
'''
indir: ../data/images_mr_filtered
outdir: ../data/experiment_0/0.ori
config_file: '../data/config/肝穿病人给放射科和合作者.xlsx' 根据配置文件确定需要进入后续操作的series,这里为防止文件夹中混入非序列的子文件夹
debug cmd: split_data_to_two_phase_singletask('../data/images_mr_filtered', '../data/experiment_0/0.ori', '../data/config/肝穿病人给放射科和合作者.xlsx')
invoke cmd: python FattyLiverDatasets.py split_data_to_two_phase_singletask '../data/images_mr_filtered' '../data/experiment_0/0.ori' '../data/config/肝穿病人给放射科和合作者.xlsx'
'''
series_fib_dict = read_config_file(config_file)
series_uids = os.listdir(in_dir)
series_paths = []
for series_uid in series_uids:
if not series_uid in series_fib_dict:
continue
series_path = os.path.join(in_dir, series_uid)
series_paths.append(series_path)
split_data_to_two_phase_one_case(series_path, out_dir)
def resample_sitkImage_by_spacing(sitkImage, newSpacing, vol_default_value='min', interpolator=sitk.sitkNearestNeighbor):
"""
:param sitkImage:
:param newSpacing:
:return:
"""
if sitkImage == None:
return None
if newSpacing is None:
return None
dim = sitkImage.GetDimension()
if len(newSpacing) != dim:
return None
# determine the default value
vol_value = 0.0
if vol_default_value == 'min':
vol_value = float(np.ndarray.min(sitk.GetArrayFromImage(sitkImage)))
elif vol_default_value == 'zero':
vol_value = 0.0
elif str(vol_default_value).isnumeric():
vol_value = float(vol_default_value)
# calculate new size
np_oldSize = np.array(sitkImage.GetSize())
np_oldSpacing = np.array(sitkImage.GetSpacing())
np_newSpacing = np.array(newSpacing)
np_newSize = np.divide(np.multiply(np_oldSize, np_oldSpacing), np_newSpacing)
newSize = tuple(np_newSize.astype(np.uint).tolist())
# resample sitkImage into new specs
transform = sitk.Transform()
return sitk.Resample(sitkImage, newSize, transform, interpolator, sitkImage.GetOrigin(),
newSpacing, sitkImage.GetDirection(), vol_value, sitkImage.GetPixelID())
def resample_data_one_case(series_path, out_dir, z_mul:int):
'''
series_path: ../data/experiment_0/0.ori/1.3.12.2.1107.5.2.30.25245.2015120320185731080640838.0.0.0/11
resample_data_one_case('../data/experiment_0/0.ori/1.3.12.2.1107.5.2.30.25245.2015120320185731080640838.0.0.0/echo_1', '../data/experiment_0/0.ori/1.3.12.2.1107.5.2.30.25245.2015120320185731080640838.0.0.0', 1)
'''
beg = time.time()
print('====> processing {}'.format(series_path))
image = read_dcm_file(series_path)
basename = os.path.basename(series_path)
# 1. 保存原始分辨率数据的nii.gz
out_raw_file = os.path.join(out_dir, '{}.nii.gz'.format(basename))
# sitk.WriteImage(image, out_raw_file)
# 2. resample, base x-spacing
# spc = image.GetSpacing()
# mults = [1,2,4,8]
# for z_mul in mults:
# out_resampled_file = os.path.join(out_dir, '{}_z_mul{}.nii.gz'.format(basename, z_mul))
# new_spc = [spc[0]] + [spc[0]] + [spc[0]*z_mul]
# resampled_img = resample_sitkImage_by_spacing(image, new_spc, interpolator=sitk.sitkLinear)
# sitk.WriteImage(resampled_img, out_resampled_file)
end = time.time()
print('=====> finish {}, time elapsed is {:.3f}s'.format(series_path, end-beg))
return out_raw_file
def resample_data_singletask(series_paths):
'''
indir: ../data/experiment_0/0.ori
debug cmd: resample_data_singletask('../data/experiment_0/0.ori')
invoke cmd: python FattyLiverDatasets.py resample_data_singletask '../data/experiment_0/0.ori'
'''
print(series_paths)
for series_path in tqdm(series_paths):
if not os.path.isdir(series_path):
continue
echo_1_path = os.path.join(series_path, 'echo_1')
echo_2_path = os.path.join(series_path, 'echo_2')
out_dir = series_path
if not os.path.isdir(echo_1_path):
print('{} echo 1 data not exist!'.format(series_path))
continue
if not os.path.isdir(echo_2_path):
print('{} echo 2 data not exist!'.format(series_path))
continue
out_echo_1_file = resample_data_one_case(echo_1_path, out_dir, 1)
out_echo_2_file = resample_data_one_case(echo_2_path, out_dir, 1)
echo_1_image = sitk.ReadImage(out_echo_1_file)
echo_2_image = sitk.ReadImage(out_echo_2_file)
echo_1_arr = sitk.GetArrayFromImage(echo_1_image)
echo_2_arr = sitk.GetArrayFromImage(echo_2_image)
echo_1_arr = np.array(echo_1_arr, dtype=np.int16)
echo_2_arr = np.array(echo_2_arr, dtype=np.int16)
diff_1_2_arr = echo_1_arr - echo_2_arr
diff_1_2_image = sitk.GetImageFromArray(diff_1_2_arr)
diff_1_2_image.CopyInformation(echo_1_image)
out_diff_file = os.path.join(os.path.dirname(out_echo_1_file), 'diff_1_2.nii.gz')
sitk.WriteImage(diff_1_2_image, out_diff_file)
def resample_data_multiprocessing(indir, process_num=12):
'''
indir: ../data/experiment_0/0.ori
invoke cmd: python FattyLiverDatasets.py resample_data_multiprocessing '../data/experiment_0/0.ori' 12
'''
series_uids = os.listdir(indir)
series_paths = [os.path.join(indir, i) for i in series_uids]
import multiprocessing
from multiprocessing import Process
multiprocessing.freeze_support()
pool = multiprocessing.Pool()
results = []
num_per_process = (len(series_paths) + process_num - 1)//process_num
resample_data_singletask(series_paths)
# for i in range(process_num):
# sub_infiles = series_paths[num_per_process*i:min(num_per_process*(i+1), len(series_paths))]
# print(sub_infiles)
# result = pool.apply_async(resample_data_singletask, args=(sub_infiles))
# results.append(result)
# pool.close()
# pool.join()
def split_data_to_train_val_test(data_root, config_file, outdir, train_ratio, val_ratio):
'''
debug cmd: split_data_to_train_val_test('../data/images_mr_filtered', '../data/config/肝穿病人给放射科和合作者.xlsx', '../data/config', 0.7, 0.1)
invoke cmd: python FattyLiverDatasets.py split_data_to_train_val_test '../data/images_mr_filtered' '../data/config/肝穿病人给放射科和合作者.xlsx' '../data/config' 0.7 0.1
debug cmd: split_data_to_train_val_test('../data/images_mr_filtered', '../data/config/肝穿病人给放射科和合作者宋筛重复序列终版.xlsx', '../data/config', 0.7, 0.1)
invoke cmd: python FattyLiverDatasets.py split_data_to_train_val_test '../data/images_mr_filtered' '../data/config/肝穿病人给放射科和合作者宋筛重复序列终版.xlsx' '../data/config' 0.7 0.1
'''
series_fib_dict = read_config_file(config_file)
series_uids = os.listdir(data_root)
pairs = []
for series_uid in series_uids:
if not series_uid in series_fib_dict:
continue
pairs.append([series_uid, series_fib_dict[series_uid]])
np.random.shuffle(pairs)
train_pos = int(len(pairs)*train_ratio)
val_pos = int(len(pairs)*(train_ratio+val_ratio))
train_pairs = pairs[:train_pos]
val_pairs = pairs[train_pos:val_pos]
test_pairs = pairs[val_pos:]
out_config_train_file = os.path.join(outdir, 'config_train.txt')
out_config_val_file = os.path.join(outdir, 'config_val.txt')
out_config_test_file = os.path.join(outdir, 'config_test.txt')
with open(out_config_train_file, 'w') as f:
for pair in train_pairs:
f.write('{}\t{}\n'.format(pair[0], pair[1]))
with open(out_config_val_file, 'w') as f:
for pair in val_pairs:
f.write('{}\t{}\n'.format(pair[0], pair[1]))
with open(out_config_test_file, 'w') as f:
for pair in test_pairs:
f.write('{}\t{}\n'.format(pair[0], pair[1]))
def split_data_to_train_val_test_ratio(data_root, config_file, outdir, train_ratio, val_ratio):
'''
debug cmd: split_data_to_train_val_test('../data/images_mr_filtered', '../data/config/肝穿病人给放射科和合作者.xlsx', '../data/config_ratio', 0.7, 0.1)
invoke cmd: python FattyLiverDatasets.py split_data_to_train_val_test '../data/images_mr_filtered' '../data/config/肝穿病人给放射科和合作者.xlsx' '../data/config_ratio' 0.7 0.1
debug cmd: split_data_to_train_val_test('../data/images_mr_filtered', '../data/config/肝穿病人给放射科和合作者宋筛重复序列终版.xlsx', '../data/config_ratio', 0.7, 0.1)
invoke cmd: python FattyLiverDatasets.py split_data_to_train_val_test '../data/images_mr_filtered' '../data/config/肝穿病人给放射科和合作者宋筛重复序列终版.xlsx' '../data/config_ratio' 0.7 0.1
====> train pairs label 0 count is 13
====> train pairs label 1 count is 39
====> train pairs label 2 count is 41
====> train pairs label 3 count is 19
====> train pairs label 4 count is 4
====> val pairs label 0 count is 2
====> val pairs label 1 count is 5
====> val pairs label 2 count is 6
====> val pairs label 3 count is 3
====> val pairs label 4 count is 1
====> test pairs label 0 count is 4
====> test pairs label 1 count is 12
====> test pairs label 2 count is 12
====> test pairs label 3 count is 6
====> test pairs label 4 count is 2
'''
series_fib_dict = read_config_file(config_file)
series_uids = os.listdir(data_root)
pairs_0 = []
pairs_1 = []
pairs_2 = []
pairs_3 = []
pairs_4 = []
for series_uid in series_uids:
if not series_uid in series_fib_dict:
continue
if series_fib_dict[series_uid] == 0:
pairs_0.append([series_uid, 0])
elif series_fib_dict[series_uid] == 1:
pairs_1.append([series_uid, 1])
elif series_fib_dict[series_uid] == 2:
pairs_2.append([series_uid, 2])
elif series_fib_dict[series_uid] == 3:
pairs_3.append([series_uid, 3])
elif series_fib_dict[series_uid] == 4:
pairs_4.append([series_uid, 4])
def inner_split(pairs, train_ratio, val_ratio):
np.random.shuffle(pairs)
train_pos = int(len(pairs)*train_ratio)
val_pos = int(len(pairs)*(train_ratio+val_ratio))
train_pairs = pairs[:train_pos]
val_pairs = pairs[train_pos:val_pos]
test_pairs = pairs[val_pos:]
return train_pairs, val_pairs, test_pairs
train_pairs_0, val_pairs_0, test_pairs_0 = inner_split(pairs_0, train_ratio, val_ratio)
train_pairs_1, val_pairs_1, test_pairs_1 = inner_split(pairs_1, train_ratio, val_ratio)
train_pairs_2, val_pairs_2, test_pairs_2 = inner_split(pairs_2, train_ratio, val_ratio)
train_pairs_3, val_pairs_3, test_pairs_3 = inner_split(pairs_3, train_ratio, val_ratio)
train_pairs_4, val_pairs_4, test_pairs_4 = inner_split(pairs_4, train_ratio, val_ratio)
print('====> train pairs label 0 count is {}'.format(len(train_pairs_0)))
print('====> train pairs label 1 count is {}'.format(len(train_pairs_1)))
print('====> train pairs label 2 count is {}'.format(len(train_pairs_2)))
print('====> train pairs label 3 count is {}'.format(len(train_pairs_3)))
print('====> train pairs label 4 count is {}'.format(len(train_pairs_4)))
print('====> val pairs label 0 count is {}'.format(len(val_pairs_0)))
print('====> val pairs label 1 count is {}'.format(len(val_pairs_1)))
print('====> val pairs label 2 count is {}'.format(len(val_pairs_2)))
print('====> val pairs label 3 count is {}'.format(len(val_pairs_3)))
print('====> val pairs label 4 count is {}'.format(len(val_pairs_4)))
print('====> test pairs label 0 count is {}'.format(len(test_pairs_0)))
print('====> test pairs label 1 count is {}'.format(len(test_pairs_1)))
print('====> test pairs label 2 count is {}'.format(len(test_pairs_2)))
print('====> test pairs label 3 count is {}'.format(len(test_pairs_3)))
print('====> test pairs label 4 count is {}'.format(len(test_pairs_4)))
train_pairs = train_pairs_0 + train_pairs_1 + train_pairs_2 + train_pairs_3 + train_pairs_4
val_pairs = val_pairs_0 + val_pairs_1 + val_pairs_2 + val_pairs_3 + val_pairs_4
test_pairs = test_pairs_0 + test_pairs_1 + test_pairs_2 + test_pairs_3 + test_pairs_4
np.random.shuffle(train_pairs)
np.random.shuffle(val_pairs)
np.random.shuffle(test_pairs)
out_config_train_file = os.path.join(outdir, 'config_train.txt')
out_config_val_file = os.path.join(outdir, 'config_val.txt')
out_config_test_file = os.path.join(outdir, 'config_test.txt')
os.makedirs(outdir, exist_ok=True)
with open(out_config_train_file, 'w') as f:
for pair in train_pairs:
f.write('{}\t{}\n'.format(pair[0], pair[1]))
with open(out_config_val_file, 'w') as f:
for pair in val_pairs:
f.write('{}\t{}\n'.format(pair[0], pair[1]))
with open(out_config_test_file, 'w') as f:
for pair in test_pairs:
f.write('{}\t{}\n'.format(pair[0], pair[1]))
class FattyLiverClsDatasets(Dataset):
def __init__(self, data_root, config_file, crop_size, scale_size, phase='train'):
self.image_files = []
self.labels = []
with open(config_file) as f:
for line in f.readlines():
line = line.strip()
if line is None or len(line) == 0:
continue
ss = line.split('\t')
self.image_files.append(os.path.join(data_root, ss[0]))
self.labels.append(ss[1])
print('====> fatty liver count is:{}'.format(len(self.image_files)))
def __getitem__(self, index):
image_file = self.image_files[index]
label = self.labels[index]
image = read_dcm_file(image_file)
print(image_file, '\t', image.GetSize())
# arr = sitk.GetArrayFromImage(image)
# # 截取5-14层用来训练
# arr_slice = arr[5:15, :, :]
def __len__(self):
return len(self.image_files)
class FattyLiverClsDatasetsDiff3D(Dataset):
'''
输入数据的分辨率由train函数的输入统一到(512, 384, 32), 对应(x,y,z)
data_format: diff, phase1, phase2
condition: raw, mask, cut(after mask)
'''
def __init__(self, data_root, config_file, crop_size, phase='train'):
self.crop_size = crop_size
# self.data_format = data_format
# self.condition = condition
self.image_files = []
self.labels = []
self.masks = []
mask_root = '../data/seg_task/renamed_masks'
with open(config_file) as f:
for line in f.readlines():
line = line.strip()
if line is None or len(line) == 0:
continue
ss = line.split('\t')
image_file = os.path.join(data_root, ss[0])
mask_file = os.path.join(mask_root,ss[0])+'.mha'
if os.path.isdir(image_file) and os.path.exists(mask_file):
self.image_files.append(image_file)
self.labels.append(int(ss[1]))
# self.masks.append(mask_file)
print('====> fatty liver count is:{}'.format(len(self.image_files)))
def __getitem__(self, index):
# data_format = self.data_format
# condition = self.condition
image_path = self.image_files[index]
label = self.labels[index]
# mask = self.masks[index]
echo_1_file = os.path.join(image_path, 'echo_1.nii.gz')
echo_2_file = os.path.join(image_path, 'echo_2.nii.gz')
image_1 = sitk.ReadImage(echo_1_file)
image_2 = sitk.ReadImage(echo_2_file)
# mask_image = sitk.ReadImage(mask)
arr_1 = sitk.GetArrayFromImage(image_1)
arr_2 = sitk.GetArrayFromImage(image_2)
# mask_arr = sitk.GetArrayFromImage(mask_image)
arr_1 = np.array(arr_1, dtype=np.float32)
arr_2 = | np.array(arr_2, dtype=np.float32) | numpy.array |
import astropy.io.fits as pf
import os
import numpy as np
from copy import deepcopy
from itertools import chain
import unittest
import healpy as hp
import warnings
# disable new order warnings in tests
warnings.filterwarnings('ignore')
class TestSphtFunc(unittest.TestCase):
def setUp(self):
self.lmax = 64
self.path = os.path.dirname( os.path.realpath( __file__ ) )
self.map1 = [hp.ma(m) for m in hp.read_map(os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_W_v4_udgraded32.fits'), (0,1,2))]
self.map2 = [hp.ma(m) for m in hp.read_map(os.path.join(self.path, 'data', 'wmap_band_iqumap_r9_7yr_V_v4_udgraded32.fits'), (0,1,2))]
self.mask = hp.read_map(os.path.join(self.path, 'data', 'wmap_temperature_analysis_mask_r9_7yr_v4_udgraded32.fits')).astype(np.bool)
for m in chain(self.map1, self.map2):
m.mask = np.logical_not(self.mask)
self.cla = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_II_lmax64_rmmono_3iter.fits'))
self.cl_fortran_nomask = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_II_lmax64_rmmono_3iter_nomask.fits'))
cls_file = pf.open(os.path.join(self.path, 'data',
'cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_IQU_lmax64_rmmono_3iter.fits'))
# fix for pyfits to read the file with duplicate column names
for i in range(2, 6):
cls_file[1].header['TTYPE%d' % i] += '-%d' % i
cls = cls_file[1].data
# order of HEALPIX is TB, EB while in healpy is EB, TB
self.cliqu = [np.array(cls.field(i)) for i in (0,1,2,3,5,4)]
nside = 32
lmax = 64
fwhm_deg = 7.
seed = 12345
np.random.seed(seed)
self.mapiqu = hp.synfast(self.cliqu, nside, lmax=lmax, pixwin=False,
fwhm=np.radians(fwhm_deg), new=False)
def test_anafast(self):
cl = hp.anafast(hp.remove_monopole(self.map1[0].filled()), lmax = self.lmax)
self.assertEqual(len(cl), 65)
np.testing.assert_array_almost_equal(cl, self.cla, decimal=8)
def test_anafast_nomask(self):
cl = hp.anafast(hp.remove_monopole(self.map1[0].data), lmax = self.lmax)
self.assertEqual(len(cl), 65)
np.testing.assert_array_almost_equal(cl, self.cl_fortran_nomask, decimal=8)
def test_anafast_iqu(self):
self.map1[0] = hp.remove_monopole(self.map1[0])
cl = hp.anafast(self.map1, lmax = self.lmax)
self.assertEqual(len(cl[0]), 65)
self.assertEqual(len(cl), 6)
for i in range(6):
np.testing.assert_array_almost_equal(cl[i], self.cliqu[i], decimal=8)
def test_anafast_xspectra(self):
cl = hp.anafast(hp.remove_monopole(self.map1[0]), hp.remove_monopole(self.map2[0]), lmax = self.lmax)
self.assertEqual(len(cl), self.lmax+1)
clx = hp.read_cl(os.path.join(self.path, 'data', 'cl_wmap_band_iqumap_r9_7yr_WVxspec_v4_udgraded32_II_lmax64_rmmono_3iter.fits'))
np.testing.assert_array_almost_equal(cl, clx, decimal=8)
def test_synfast(self):
nside = 32
lmax = 64
fwhm_deg = 7.
seed = 12345
np.random.seed(seed)
map_pregen = hp.read_map(os.path.join(self.path, 'data',
'map_synfast_seed%d.fits' % seed),
(0,1,2))
sim_map = hp.synfast(self.cliqu, nside, lmax = lmax, pixwin=False,
fwhm= | np.radians(fwhm_deg) | numpy.radians |
import numpy as np
from src.network_elements.network_element import NetworkElement
class LayersLinker(NetworkElement):
def __init__(self, previous_layer_dimension, next_layer_dimension) -> None:
self.previous_layer_dimension = previous_layer_dimension
self.next_layer_dimension = next_layer_dimension
self.W = self.init_random_uniform_matrix(size=(previous_layer_dimension, next_layer_dimension))
self.B = self.init_random_uniform_matrix(size=(1, next_layer_dimension))
self.previous_layer_activated_output = None
self.dLdW = None
self.dLdB = None
def init_random_uniform_matrix(self, size):
low = - np.sqrt(1 / np.sum(size))
high = np.sqrt(1 / np.sum(size))
return np.random.uniform(low=low, high=high, size=size)
def init_random_gaussian_matrix(self, size, mean=0.0, variance=1.0):
return np.random.normal(loc=mean, scale=np.sqrt(variance), size=size)
def forward_propagate(self, A):
self.previous_layer_activated_output = A
Z = (A @ self.W) + self.B
return Z
def backward_propagate(self, dLdZ):
if self.previous_layer_activated_output is None:
raise ValueError("Please forward propagate information before backward propagating.")
(batch_size, _) = dLdZ.shape
self.dLdW = self.previous_layer_activated_output.T @ dLdZ
self.dLdB = | np.ones(batch_size) | numpy.ones |
import numpy as np
import utils.utils_data as utils_data
import utils.utils_video as utils_video
import cv2
import time
class DataGenerator:
def __init__(self, model, config, sess, data_name, shuffle, augment):
self.config = config
# data_generator receives the model and session because it runs the first part of the model -
# mobile_net to return batch features
self.model = model
self.sess = sess
# read classes dict
self.label_dict = utils_data.read_classes(config.classInd)
self.label_dict_inv = {v: k for k, v in self.label_dict.items()}
self.augment = augment
# load data here (shuffle inside)
if data_name == 'train':
data_list = config.train_list
elif data_name == 'validate':
data_list = config.val_list
else:
data_list = config.test_list
self.lines, self.labels, self.len_lines = utils_data.read_data(data_list, self.label_dict, shuffle, config.data)
# feeder state
self._curr_line_num = 0
# resets the feeder to the first example
def reset_feeder(self):
self._curr_line_num = 0
# returns the batch fc and conv features and labels
def next_batch(self):
time1 = time.time()
eod = False
batch_frames = np.zeros((self.config.batch_size,
self.config.n_steps,
self.config.frame_size[0],
self.config.frame_size[1],
self.config.frame_size[2],), dtype=np.float32) # (8, 40, 224, 224, 3)
batch_labels = np.zeros((self.config.batch_size, self.config.n_classes), dtype=np.int8)
example_ind = 0
while example_ind < self.config.batch_size:
if self.end_of_data():
# if we finished the data, the rest of the batch array is zeros.
eod = True
break
else:
# get next path and class
curr_video_full_path, curr_video_class = self.get_next_example()
# create the label example
one_hot = np.zeros(self.config.n_classes, dtype=np.int8)
one_hot[self.label_dict[curr_video_class]] = 1
label = np.expand_dims(one_hot, axis=0)
# extract frames
bit_correct, frames = get_clip_frames(self.config, curr_video_full_path)
if bit_correct == 1:
print('ERROR: skipping clip...')
continue
# augment example if required
if self.augment:
frames = utils_video.augment_frames(frames)
# assign to the big array
batch_frames[example_ind] = frames
batch_labels[example_ind] = label
example_ind += 1
time2 = time.time()
print("batch_read_time:", '{0:.2f}'.format(time2 - time1), "s")
return batch_frames, batch_labels, eod
# did we reach the end of the line list?
def end_of_data(self):
return self._curr_line_num == self.len_lines
def get_next_example(self):
# returns the video full path, class, first frame to read from
line = self.lines[self._curr_line_num]
if self.config.data == "UCF":
curr_video_full_path, curr_video_class = utils_video.line_to_path(line, self.config.UCF_ARG_path)
elif self.config.data == "SDHA":
curr_video_full_path, curr_video_class = utils_video.line_to_path_SDHA(line, self.config.SDHA_2010_path)
elif self.config.data == "Combined":
curr_video_full_path, curr_video_class = utils_video.line_to_path_Combined(line, self.config.Combined_path)
elif self.config.data == "IR":
curr_video_full_path, curr_video_class = utils_video.line_to_path_IR(line, self.config.IR_path)
elif self.config.data == "HMDB":
curr_video_full_path, curr_video_class = utils_video.line_to_path_HMDB(line, self.config.HMDB_path, self.label_dict_inv)
self.update_state()
return curr_video_full_path, curr_video_class
def update_state(self):
self._curr_line_num += 1
# # use CNN to extract features - these features are then used as input for the LSTM networks
# def get_fc_conv_features(model, config, sess, video_path, is_training):
#
# num_conv_features = (config.conv_input_shape[0], config.conv_input_shape[1], config.channels)
#
# bit = 0
# capture = cv2.VideoCapture(video_path)
#
# # array to hold all frame fc features
# fc_features = np.zeros((config.n_steps, config.n_fc_inputs))
#
# # array to hold all frame conv features
# conv_features = np.zeros((config.n_steps,) + num_conv_features)
#
# frame_num = 0
# # extract features
# while (capture.isOpened()) & (frame_num < config.n_steps) & (bit == 0):
# flag, frame = capture.read()
# if flag == 0:
# bit = 1
# print("******ERROR: Could not read frame in " + video_path + " frame_num: " + str(frame_num))
# break
#
# #name = params['res_vids_path'] + str(frame_num) + 'frame.jpg'
# #cv2.imwrite(name, frame)
# #cv2.imshow("Vid", frame)
# #key_pressed = cv2.waitKey(10) # Escape to exit
#
# # process frame
# centered_image = utils_video.val_reprocess(config, frame)
#
# # forward vgg (including vgg pre-processing)
# #res2, res = self._my_sess.run([self._pool5_features,
# # self._fc6_features], {self._input_img: centered_image})
#
# res2, res = sess.run([model.mn_layer_15, model.mn_global_pool],
# {model.mn_input_img: centered_image, model.is_training: is_training})
#
# #pred = sess.run(model_specs['predictions'], {model_specs['input_img']: centered_image})
# #label_map = imagenet.create_readable_names_for_imagenet_labels()
# #print("Top 1 prediction: ", pred.argmax(), label_map[pred.argmax()], pred.max())
#
# # collect to all video features
# fc_features[frame_num, :] = res[0, :]
# #np.append(fc_features, res[0, :], axis=0)
# conv_features[frame_num, :, :, :] = res2[0, :, :, :]
# #np.append(conv_features, res2[0, :, :, :], axis=0)
#
# #print(np.shape(res))
# #print(np.shape(res2))
# #input_img = sess.run(vgg_extractor.return_input(), {image: img1})
#
# frame_num += 1
# #if key_pressed == 27:
# # break
#
# capture.release()
#
# return bit, fc_features, conv_features
# # soft max on output
# #res1 = np.exp(res)
# #res2 = res1 / np.sum(res1)
# #indices = np.argsort(res2)
# # print the top 10 predictions
# #print(indices[0][-10:])
def get_clip_frames(config, video_path):
clip_frames = []
bit = 0
capture = cv2.VideoCapture(video_path)
fps = capture.get(cv2.CAP_PROP_FPS)
length = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
frame_gap = int(round(fps / config.target_fps))
if config.random_start:
start_frame = int(np.random.rand() * (length - int(config.n_steps * frame_gap) - 1))
# skip first frames
for j in range(start_frame):
flag, frame = capture.read()
frame_num = 0
# extract features
while (capture.isOpened()) & (int(round(frame_num / frame_gap)) < config.n_steps) & (bit == 0):
flag, frame = capture.read()
if flag == 0:
bit = 1
print("******ERROR: Could not read frame in " + video_path + " frame_num: " + str(frame_num))
break
#name = params['res_vids_path'] + str(frame_num) + 'frame.jpg'
#cv2.imwrite(name, frame)
#cv2.imshow("Vid", frame)
#key_pressed = cv2.waitKey(10) # Escape to exit
# process frame (according to the correct frame rate)
if frame_num % frame_gap == 0:
centered_image = utils_video.val_reprocess(config, frame)
clip_frames.append(centered_image)
#cv2.imshow("Vid", frame)
#key_pressed = cv2.waitKey(30) # Escape to exit
frame_num += 1
capture.release()
frames = | np.array(clip_frames) | numpy.array |
import time
import numpy as np
import yaml
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score, cohen_kappa_score
def plot_full_barchart(test_score, n_pilots, title="Test accuracy - BCIC IV 2a", fig=None, ax_idx=0):
if fig:
ax = fig.get_axes()[ax_idx]
else:
fig, ax = plt.subplots()
w = 0.40
average_kappa = np.mean(test_score['kappa'])
average_acc = np.mean(test_score['accuracy'])
# Accuracy score
ax.bar(x=np.arange(1 + 1.25*w, n_pilots + 1), height=test_score['accuracy'], width=-w,
tick_label=range(1, n_pilots+1), alpha=0.9, align='edge')
ax.plot([0,n_pilots+1], 2*[average_acc], '--b')
ax.set_xlim([1, n_pilots + 1])
ax.set_ylim([0,1])
ax.grid(True, axis='y')
ax.set_xlabel('Pilot')
ax.set_ylabel('Accuracy / Kappa score')
# Kappa score
ax.bar(x=np.arange(1 + 1.25*w, n_pilots + 1), height=test_score['kappa'], width=w,
color='orange', alpha=0.9, align='edge')
ax.plot([0, n_pilots + 1], 2*[average_kappa], '--r')
ax.set_title(title)
ax.legend(['Average accuracy', 'Average kappa', 'Accuracy score', 'Kappa score'])
fig.text(1.01, average_acc-0.01, "{:.2f}".format(average_acc), fontsize='large', color='b', transform = ax.transAxes)
fig.text(1.01, average_kappa-0.01, "{:.2f}".format(average_kappa), fontsize='large', color='r', transform = ax.transAxes)
return fig
def plot_accuracy_barchart(test_score, model_name, n_pilots):
plt.figure(figsize=(10,6))
plt.plot([0,n_pilots+1], 2*[np.mean(test_score['accuracy'])], '--r')
plt.legend(['Average accuracy'])
plt.bar(x=range(1, n_pilots+1), height=test_score['accuracy'], tick_label=range(1, n_pilots+1))
plt.title("Accuracy on test dataset for each pilot - {}".format(model_name))
plt.ylabel("Accuracy")
plt.ylim([0, 1])
plt.xlabel("Pilot")
plt.xlim([0, n_pilots+1])
plt.grid(True, axis='y')
return plt
def plot_loss(loss_history, pilot_idx):
plt.figure()
plt.plot(loss_history.history['loss'])
plt.plot(loss_history.history['val_loss'])
plt.ylabel("Loss")
plt.xlabel("Epochs")
plt.title("Training & validation loss - Pilot {}".format(pilot_idx))
plt.legend(['Training loss','Validation loss']);
return plt
def compute_cm(y_true, y_pred, classes, normalize=False, title=None, fig=None, ax_idx=0):
''' TODO '''
if fig:
ax = fig.get_axes()[ax_idx]
else:
fig, ax = plt.subplots()
# Compute confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
ax.set_xticks(ticks=np.arange(cm.shape[1]))
ax.set_yticks(ticks= | np.arange(cm.shape[0]) | numpy.arange |
from __future__ import division
import os
import re
import vtk
import sys
import csv
import math
import shutil
import random
import pickle
from random import random
import numpy as np
import pandas as pd
from sympy import *
import nibabel as nib
from pathlib import Path
from numpy import linalg
from sklearn import metrics
import plotly.express as px
import matplotlib.pyplot as plt
from collections import namedtuple
from nipype.interfaces import fsl
import plotly.graph_objects as go
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from nipype.testing import example_data
from mpl_toolkits.mplot3d import Axes3D
# defining nii to stl conversion
def nii_2_mesh(filename_nii, filename_stl, label):
try:
reader = vtk.vtkNIFTIImageReader()
reader.SetFileName(filename_nii)
reader.Update()
surf = vtk.vtkDiscreteMarchingCubes()
surf.SetInputConnection(reader.GetOutputPort())
surf.SetValue(0, label)
surf.Update()
smoother= vtk.vtkWindowedSincPolyDataFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
smoother.SetInput(surf.GetOutput())
else:
smoother.SetInputConnection(surf.GetOutputPort())
smoother.SetNumberOfIterations(30)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.GenerateErrorScalarsOn()
smoother.Update()
writer = vtk.vtkSTLWriter()
writer.SetInputConnection(smoother.GetOutputPort())
writer.SetFileTypeToASCII()
writer.SetFileName(filename_stl)
writer.Write()
except:
pass
#Ellipsoid tool
class EllipsoidTool:
def __init__(self):
pass
def getMinVolEllipse(self, P=None, tolerance=0.01):
(N, d) = np.shape(P)
d = float(d)
Q = np.vstack([np.copy(P.T), np.ones(N)])
QT = Q.T
err = 1.0 + tolerance
u = (1.0 / N) * np.ones(N)
while err > tolerance:
V = np.dot(Q, np.dot(np.diag(u), QT))
M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q)))
j = np.argmax(M)
maximum = M[j]
step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0))
new_u = (1.0 - step_size) * u
new_u[j] += step_size
err = np.linalg.norm(new_u - u)
u = new_u
center = np.dot(P.T, u)
A = linalg.inv(
np.dot(P.T, np.dot(np.diag(u), P)) -
np.array([[a * b for b in center] for a in center])
) / d
U, s, rotation = linalg.svd(A)
radii = 1.0/np.sqrt(s)
return (center, radii, rotation)
#Convexhull 2D tool
Point = namedtuple('Point', 'x y')
class ConvexHull(object):
_points = []
_hull_points = []
def __init__(self):
pass
def add(self, point):
self._points.append(point)
def _get_orientation(self, origin, p1, p2):
difference = (
((p2.x - origin.x) * (p1.y - origin.y))
- ((p1.x - origin.x) * (p2.y - origin.y))
)
return difference
def compute_hull(self):
points = self._points
start = points[0]
min_x = start.x
for p in points[1:]:
if p.x < min_x:
min_x = p.x
start = p
point = start
self._hull_points.append(start)
far_point = None
while far_point is not start:
p1 = None
for p in points:
if p is point:
continue
else:
p1 = p
break
far_point = p1
for p2 in points:
if p2 is point or p2 is p1:
continue
else:
direction = self._get_orientation(point, far_point, p2)
if direction > 0:
far_point = p2
self._hull_points.append(far_point)
point = far_point
def get_hull_points(self):
if self._points and not self._hull_points:
self.compute_hull()
return self._hull_points
def display(self):
x = [p.x for p in self._points]
y = [p.y for p in self._points]
plt.plot(x, y, marker='D', linestyle='None')
hx = [p.x for p in self._hull_points]
hy = [p.y for p in self._hull_points]
plt.plot(hx, hy)
plt.title('Convex Hull')
plt.show()
#plane equation for three given points
def equation_plane(p1, p2, p3):
x1 = p1[0]
y1 = p1[1]
z1 = p1[2]
x2 = p2[0]
y2 = p2[1]
z2 = p2[2]
x3 = p3[0]
y3 = p3[1]
z3 = p3[2]
global a,b,c,d
a = ((y2 - y1) * (z3 - z1) - (y3 - y1) * (z2 - z1))/((x2 - x1) * (y3 - y1) - (y2 - y1) * (x3 - x1))
b = ((x3 - x1) * (z2 - z1) - (x2 - x1) * (z3 - z1))/((x2 - x1) * (y3 - y1) - (y2 - y1) * (x3 - x1))
c = 1
d = - a * x1 - b * y1 - c * z1
if __name__ == "__main__":
# Before: HD-BET Installation and Identification
if not os.path.exists("HD-BET"):
os.system("git clone https://github.com/MIC-DKFZ/HD-BET")
os.system("cd HD-BET")
os.system("pip3 install -e .")
os.system("cd ../")
introduction = input("\nYou need to give cluster classification number, pre-operative and post-operative SCALP files. MNI152 raw file is optional. Press enter to continue.\n\n")
cluster_label = input("\n(REQUIRED) Type in the number you would like to classify the segmented FLAP files with, from 000 to 999. Make sure the number is not repeated in other cluster folders. If repeated, the folder will be overwritten.\n\n")
if not os.path.exists('Clusters_' + str(cluster_label)):
os.mkdir('Clusters_' + str(cluster_label))
preop_name = input("\n(REQUIRED) Type in the name of the pre-operative SCALP file. Make sure you include nii.gz format.\n\n")
postop_name = input("\n(REQUIRED) Type in the name of the post-operative SCALP file. Make sure you include nii.gz format.\n\n")
MNI_registration = input("\n(OPTIONAL) Type Y if you would like to register pre-operative and post-operative SCALP files to MNI152 SCALP file. If not, press enter instead. We do not recommend MNI normalization because the predicted flap region size may be insufficient.\n\n")
# Optional: Task 0 : MNI Scalp Extraction and Input Normalization
# Comment on Task 0 : From testing, Task 0 may not give significant effect on scalp segmentaation.
if (MNI_registration == 'Y'):
# Task 0.1 : MNI brain segmentation.
MNI_original = input("\nType in the name of the unprocessed MNI152 file.\n\n")
os.rename(MNI_original, "MNI-template.nii.gz")
os.system("hd-bet -i MNI-template.nii.gz -device cpu -mode fast -tta 0")
#remove "-device cpu -mode fast -tta 0" if GPU support is available.
#install HD-BET from https://github.com/MIC-DKFZ/HD-BET. HD-BET is the most up-to-date brain segmentation algorithm (6/17/21).
# Task 0.2 : Making MNI SCALP file.
os.remove("MNI-template_bet.nii.gz")
os.rename("MNI-template_bet_mask.nii.gz", "MNI-template_BRAINMASK.nii.gz")
brain_mask = nib.load('MNI-template_BRAINMASK.nii.gz')
MNI_ref = nib.load('MNI-template.nii.gz')
brain_mask_A = np.array(brain_mask.dataobj)
MNI_ref_A = np.array(MNI_ref.dataobj)
# Task 0.2.1 : Dimension check.
if(brain_mask_A.shape == MNI_ref_A.shape):
for x in range(0, brain_mask_A.shape[0]-1):
for y in range(0, brain_mask_A.shape[1]-1):
for z in range(0, brain_mask_A.shape[2]-1):
if(brain_mask_A[x][y][z] > 0):
MNI_ref_A[x][y][z] = 0
else:
print("Comparison not possible due to difference in dimensions.")
# Task 0.2.2 : Volume Restriction.
for x in range(0, MNI_ref_A.shape[0]-1):
for y in range(0, MNI_ref_A.shape[1]-1):
for z in range(0, MNI_ref_A.shape[2]-1):
if(x < ((MNI_ref_A.shape[0]-1)*0.03) or x > ((MNI_ref_A.shape[0]-1)*0.96) or y < ((MNI_ref_A.shape[1]-1)*0.01) or y > ((MNI_ref_A.shape[1]-1)*0.99) or z < ((-(MNI_ref_A.shape[2]-1)*y*0.000275)+85)):
MNI_ref_A[x][y][z] = 0
# Task 0.2.3 : Maximum value check.
def paraMAX():
M = 0
for x in range(int(0.05*(MNI_ref_A.shape[0]-1)),int(0.95*(MNI_ref_A.shape[0]-1))):
for y in range(int(0.05*(MNI_ref_A.shape[1]-1)),int(0.95*(MNI_ref_A.shape[1]-1))):
for z in range(int(0.05*(MNI_ref_A.shape[2]-1)),int(0.95*(MNI_ref_A.shape[2]-1))):
if(M < MNI_ref_A[x][y][z]):
M = MNI_ref_A[x][y][z]
return M
# Task 0.2.4 : Filtering by maximum threshold.
MAX = paraMAX()
MAX_thres = 0.225*MAX
for x in range(0, MNI_ref_A.shape[0]-1):
for y in range(0, MNI_ref_A.shape[1]-1):
for z in range(0, MNI_ref_A.shape[2]-1):
if(MNI_ref_A[x][y][z] < MAX_thres):
MNI_ref_A[x][y][z] = 0
# Task 0.2.5 : Removing non-scalp voxels by area inspection.
ns_thres = 0.34*MAX
for x in range(1, MNI_ref_A.shape[0]-1):
for y in range(1, MNI_ref_A.shape[1]-1):
for z in range(1, MNI_ref_A.shape[2]-1):
M = 0
for k in range(-1,2):
for m in range(-1,2):
for n in range(-1,2):
if MNI_ref_A[x+k][y+m][z+n] >= M:
M = MNI_ref_A[x+k][y+m][z+n]
if M < ns_thres:
MNI_ref_A[x][y][z] = 0
# Task 0.2.6 : Extraction
MNI_scalp_array = nib.Nifti1Image(MNI_ref_A, affine=np.eye(4))
nib.save(MNI_scalp_array, "MNI-template_SCALP.nii.gz")
# Task 0.3 : Aligning pre-operative and post-operative SCALP files onto MNI SCALP file.
flt1 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt1.inputs.in_file = preop_name
flt1.inputs.reference = 'MNI-template_SCALP.nii.gz'
flt1.inputs.output_type = "NIFTI_GZ"
flt1.cmdline
res = flt1.run()
os.remove(str(preop_name[:-7]) + '_flirt.mat')
preop_name = str(preop_name[:-7]) + '_flirt.nii.gz'
flt2 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt2.inputs.in_file = postop_name
flt2.inputs.reference = 'MNI-template_SCALP.nii.gz'
flt2.inputs.output_type = "NIFTI_GZ"
flt2.cmdline
res = flt2.run()
os.remove(str(postop_name[:-7]) + '_flirt.mat')
preop_name = str(postop_name[:-7]) + '_flirt.nii.gz'
# Task 1 : Intensity Normalization, Difference NIFTI Generation, and Mesh
preop = nib.load(preop_name) #might need to change to normalized name
A = np.array(preop.dataobj) #might need to change to normalized name
postop = nib.load(postop_name)
B = np.array(postop.dataobj)
normal_diff = np.zeros((A.shape[0],A.shape[1],A.shape[2]))
AA = A/(np.max(A))
BB = B/(np.max(B))
MIN_thres = 0.34
# Aligning postoperative SCALP file to preoperative SCALP file lowers accuracy, so it is avoided in this script. Minimum threshold for absolute intensity difference is manually selected after testing with pre-operative and post-operative T1w files (Patient 3) in OPENNEURO Database: https://openneuro.org/datasets/ds001226/versions/1.0.0 and https://openneuro.org/datasets/ds002080/versions/1.0.1. This is subject to change, depending on whether T1w is registered with T2w before analysis or not. However, skull segmentation is better with T1w only (confirmed).
for x in range(0,AA.shape[0]-1):
for y in range(0,AA.shape[1]-1):
for z in range(0,AA.shape[2]-1):
if abs(AA[x][y][z]-BB[x][y][z] > MIN_thres) and BB[x][y][z] == 0:
normal_diff[x][y][z] = 1
else:
normal_diff[x][y][z] = 0
normal_diff_nib = nib.Nifti1Image(normal_diff, affine=np.eye(4))
nib.save(normal_diff_nib, "normalized_difference_MNIenabled.nii.gz")
filename_nii = 'normalized_difference_MNIenabled.nii.gz'
filename_stl = filename_nii[:-7] + '.stl'
label = 1
nii_2_mesh(filename_nii, filename_stl, label)
shutil.move('normalized_difference_MNIenabled.stl','Clusters_' + str(cluster_label))
# Completion 1 notified.
confirmation1 = input("\nNormalized, unprocessed FLAP File generated and mesh generated (relocated). Press enter to continue.")
# Task 2 : DBSCAN Segmentation (C++ implementation)
print("\nFor DBSCAN segmentation, epsilon value is 8, and minpoints value is 40. For eps and minpts, edit main.cpp if needed. For size thresholds, edit this script.")
a = nib.load('normalized_difference_MNIenabled.nii.gz')
A = np.array(a.dataobj)
valid_coord = []
for x in range(0,A.shape[0]-1):
for y in range(0,A.shape[1]-1):
for z in range(0,A.shape[2]-1):
if (A[x][y][z] == 1):
valid_coord += [[x,y,z]]
np.savetxt("valid_coord.csv", valid_coord, delimiter=",")
with open('valid_coord.csv', newline='') as csvfile:
data = np.array(list(csv.reader(csvfile)))
data_float = data.astype(float)
add = np.array([[data_float.shape[0], ' ', ' ']])
DBSCAN_prep = np.concatenate((add, data_float))
np.savetxt("valid_coord.dat", DBSCAN_prep, fmt='%s', delimiter=',')
os.remove("valid_coord.csv")
df = pd.read_csv("valid_coord.dat", sep=";", header=0)
df.rename(columns = lambda x: re.sub('\D','',x),inplace=True)
df.to_csv("valid_coord_DBSCANprep.dat", sep = ' ', index = False)
os.remove("valid_coord.dat")
os.system("g++ main.cpp dbscan.cpp -o DBSCANcsv")
os.system("./DBSCANcsv")
os.remove("valid_coord_DBSCANprep.dat")
# Cluster organization (cluster_label, Ashape, and valid_coord given from input)
DBSCANraw = np.genfromtxt('DBSCAN_raw.csv')
rownum = int(DBSCANraw.shape[0]/4 - 1)
cluster_max = 0
for i in range(rownum + 1):
if DBSCANraw[4*i + 3] >= cluster_max:
cluster_max = int(DBSCANraw[4*i + 3])
cluster_lists = [[] for i in range(cluster_max + 1)]
for i in range(rownum + 1):
if DBSCANraw[4*i + 3] >= 1:
cluster_lists[int(DBSCANraw[4*i + 3])].append([valid_coord[i]])
for r in range(1,cluster_max + 1):
cluster_indi = np.array(cluster_lists[r])
cluster_coord = np.zeros((A.shape[0], A.shape[1], A.shape[2]))
for s in range(len(cluster_indi)):
cluster_coord[cluster_indi[s][0][0],cluster_indi[s][0][1],cluster_indi[s][0][2]] = 1
if len(cluster_indi) >= 8000:
cluster_nib = nib.Nifti1Image(cluster_coord, affine=np.eye(4))
nib.save(cluster_nib, "DBSCAN-cluster" + str(r) + ".nii.gz")
filename_nii = "DBSCAN-cluster" + str(r) + ".nii.gz"
filename_stl = filename_nii[:-7] + '.stl'
label = 1
nii_2_mesh(filename_nii, filename_stl, label)
shutil.move("DBSCAN-cluster" + str(r) + ".nii.gz", 'Clusters_' + str(cluster_label))
shutil.move("DBSCAN-cluster" + str(r) + ".stl", 'Clusters_' + str(cluster_label))
os.remove("DBSCAN_raw.csv")
shutil.move('normalized_difference_MNIenabled.nii.gz','Clusters_' + str(cluster_label))
# Task 3 : K-Means Segmentation
filename = input("\nType in the path to the NIFTI file you would like to apply kmeans to. Include nii.gz format for the NIFTI file. For example, if the file is in 'a' folder, type './a/file_name'.\n\n")
clusters = input("\nType in the number of clusters you would like to segment in the file. This depends on the file, but usually it is 2 or 3, given that DBSCAN analysis has been done.\n\n")
print("\nWorking...\n")
a = nib.load(str(filename))
A = np.array(a.dataobj)
points = []
for x in range(A.shape[0]):
for y in range(A.shape[1]):
for z in range(A.shape[2]):
if A[x][y][z] == 1:
points += [[x,y,z]]
points = np.array(points)
kmeans = KMeans(n_clusters=int(clusters), random_state=1).fit(points)
compiled = [[] for _ in range(int(clusters))]
for l in range(len(kmeans.labels_)):
compiled[int(kmeans.labels_[l])] += [points[l]]
for c in range(int(clusters)):
i = np.zeros((A.shape[0],A.shape[1],A.shape[2]))
for p in range(len(compiled[c])):
i[compiled[c][p][0],compiled[c][p][1],compiled[c][p][2]] = 1
I = nib.Nifti1Image(i, affine=np.eye(4))
nib.save(I, "kmeans_" + str(c+1) + ".nii.gz")
filename_nii = "kmeans_" + str(c+1) + ".nii.gz"
if not os.path.exists('kmeans_'+str(c+1)):
os.mkdir('kmeans_'+str(c+1))
shutil.move("kmeans_" + str(c+1) + ".nii.gz",'kmeans_'+str(c+1))
print("\nK-means segmentation completed.\n")
# Task 4 : MNI Polygonalization
scalp_input = input("Type in the path to scalp file you desire to polygonalize to MNI space from K-means cluster folder. Please include nii.gz format. If the file is in 'a' directory, type './a/file_name'.\n")
MNI_input = input("Type in the reference MNI scalp file. Please include nii.gz format.\n")
#loading MNI SCALP template
m = nib.load(str(MNI_input))
M = np.array(m.dataobj)
#finding ellipsoid center for MNI SCALP template (symbol: O)
P = []
for x in range(M.shape[0]):
for y in range(M.shape[1]):
for z in range(M.shape[2]):
if M[x][y][z] != 0:
P += [[x,y,z]]
P = np.array(P)
P_y = [v[1] for v in P]
#selecting 25000 points due to limits in RAM.
randomlist = random.sample(range(0, len(P)), 25000)
Q = []
for i in range(len(randomlist)):
Q += [P[randomlist[i]]]
Q = np.array(Q)
ET = EllipsoidTool()
(center, radii, rotation) = ET.getMinVolEllipse(Q, .01)
#MNI centerpoint only has x and z coordinates as ellipsoid coordinate to keep respect of y-coordinate symmetry
O = [center[0], np.average(P_y), center[2]]
#loading kmeans
a = nib.load(str(scalp_input))
A = np.array(a.dataobj)
#saving kmeans coordinates
K_coord = []
for x in range(A.shape[0]):
for y in range(A.shape[1]):
for z in range(A.shape[2]):
if A[x][y][z] != 0:
K_coord += [[x,y,z]]
#finding different centers of cluster
C = []
C_x = [v[0] for v in K_coord]
C_y = [v[1] for v in K_coord]
C_z = [v[2] for v in K_coord]
xx = (max(C_x)+min(C_x))/2
yy = (max(C_y)+min(C_y))/2
zz = (max(C_z)+min(C_z))/2
C_middle = [xx,yy,zz]
C_median = [np.median(C_x),np.median(C_y),np.median(C_z)]
C_centroid = [np.average(C_x), | np.average(C_y) | numpy.average |
import numpy as np
# import cupy as np
# def softmax_cross_entropy(x, y):
# ''' 对输入先进行 softmax 操作后再使用交叉熵求损失 '''
# # softmax forward
# x = x - np.max(x)
# out = np.exp(x) / np.reshape(np.sum(np.exp(x), 1), (x.shape[0], 1))
# loss, dout = cross_entropy(out, y)
# diag = np.zeros((dout.shape[0],dout.shape[1],dout.shape[1]))
# for i in range(diag.shape[0]):
# diag[i, :, :] = np.diag(out[i])
# # 计算梯度 dout reshape to N x C x 1 * (diag - out reshape to N x C x 1 @ out reshape to N x 1 x C (N=1 时就相当于 <EMAIL>)) -> N x C x C (这个矩阵一行是 yi 对每个 x 的导数,一列是每个 y 对 xi 的导数) -> sum -> N x 1 x C = N x C
# dx = np.sum(dout.reshape(dout.shape[0], -1, 1) * (diag - out.reshape((out.shape[0], -1, 1)) @ out.reshape((out.shape[0], 1, -1))), 1)
# return loss, dx
def cross_entropy(pred, y):
'''
交叉熵
Args:
pred: pred 为 softmax 函数的输出结果(这个函数不进行 softmax 操作)
y: 正确的标签(标量形式,不是 one-hot 形式)
Return:
loss: 损失
dpred:损失对输入的导数
'''
# 就是第 label 类的概率变成 -log 然后加起来
# 反向传播就是第 label 类的导数变成 -1/pred,其它都是 0
y = y.astype(np.int)
# 限制最小值,免得被 1e-253 之类的极端数值爆掉
pred = np.clip(pred, 1e-10, 1)
log_pred = -np.log(pred)
loss = np.sum(log_pred[np.arange(0, pred.shape[0]), y]) / pred.shape[0]
dpred = np.zeros_like(pred)
dpred[np.arange(0, pred.shape[0]), y] = - pred[np.arange(0, pred.shape[0]), y] ** (-1)
dpred = dpred / pred.shape[0]
return loss, dpred
def hinge_loss(scores, y):
'''
合页损失
Args:
scores: scores 为最后全连接层的输出结果
y: 正确的标签(标量形式,不是 one-hot 形式)
Return:
loss: 损失
dscores:损失对输入的导数
'''
y = y.astype(np.int)
# 选出 yi
score_y = scores[range(y.shape[0]), y]
# si - yi + 1
score_plus_1_minus_y = scores + 1 - score_y.reshape((score_y.shape[0], 1))
loss_array = np.maximum(0, score_plus_1_minus_y)
loss_array[range(y.shape[0]), y] = 0
# 除的这个主要是为了让 loss 和 dloss 值变小一点,不影响整个 loss 的分布
loss = | np.sum(loss_array) | numpy.sum |
"""
Inference module for PCM toolbox with main functionality for model fitting and evaluation.
@author: jdiedrichsen
"""
import numpy as np
from numpy.linalg import solve, eigh, cholesky
from numpy import sum, diag, log, eye, exp, trace, einsum
import pandas as pd
import PcmPy as pcm
from PcmPy.model import IndependentNoise, BlockPlusIndepNoise
from PcmPy.optimize import newton
def likelihood_individ(theta, M, YY, Z, X=None,
Noise = IndependentNoise(),
n_channel=1, fit_scale=False, scale_prior = 1e3, return_deriv=0):
"""Negative Log-Likelihood of the data and derivative in respect to the parameters
Parameters:
theta (np.array):
Vector of (log-)model parameters - these include model, signal, and noise parameters
M (PcmPy.model.Model):
Model object with predict function
YY (2d-np.array):
NxN Matrix of outer product of the activity data (Y*Y')
Z (2d-np.array):
NxQ Design matrix - relating the trials (N) to the random effects (Q)
X (np.array):
Fixed effects design matrix - will be accounted for by ReML
Noise (pcm.Noisemodel):
Pcm-noise mode to model block-effects (default: IndepenentNoise)
n_channel (int):
Number of channels
fit_scale (bool):
Fit a scaling parameter for the model (default is False)
scale_prior (float):
Prior variance for log-normal prior on scale parameter
return_deriv (int):
0: Only return negative loglikelihood
1: Return first derivative
2: Return first and second derivative (default)
Returns:
negloglike (double):
Negative log-likelihood of the data under a model
dLdtheta (1d-np.array):
First derivative of negloglike in respect to the parameters
ddLdtheta2 (2d-np.array):
Second derivative of negloglike in respect to the parameters
"""
N = YY.shape[0]
Q = Z.shape[1]
n_param = theta.shape[0]
# Get G-matrix and derivative of G-matrix in respect to parameters
model_params = theta[range(M.n_param)]
G,dGdtheta = M.predict(model_params)
# Get the scale parameter and scale G by it
if fit_scale:
scale_param = theta[M.n_param]
indx_scale = M.n_param # Index of scale parameter
else:
scale_param = 0
Gs = G * exp(scale_param)
# Get the noise model parameters
noise_params = theta[M.n_param+fit_scale:]
# Apply the matrix inversion lemma. The following statement is the same as
# V = (Z*Gs*Z' + S(noiseParam));
# iV = pinv(V);
Gs = (Gs + Gs.T) / 2 # Symmetrize
Glambda, GU = eigh(Gs)
idx = Glambda > (10e-10) # Find small eigenvalues
Zu = Z @ GU[:, idx]
iS = Noise.inverse(noise_params)
if type(iS) is np.float64:
matrixInv = (diag(1 / Glambda[idx]) / iS + Zu.T @ Zu)
iV = (eye(N) - Zu @ solve(matrixInv, Zu.T)) * iS
else:
matrixInv = (diag(1 / Glambda[idx]) + Zu.T @ iS @ Zu)
iV = iS - iS @ Zu @ solve(matrixInv,Zu.T) @ iS
# For ReML, compute the modified inverse iVr
if X is not None:
iVX = iV @ X
iVr = iV - iVX @ solve(X.T @ iVX, iVX.T)
else:
iVr = iV
# Computation of (restricted) likelihood
ldet = -2 * sum(log(diag(cholesky(iV)))) # Safe computation
llik = -n_channel / 2 * ldet - 0.5 * einsum('ij,ij->',iVr, YY)
if X is not None:
# P/2 log(det(X'V^-1*X))
llik -= n_channel * sum(log(diag(cholesky(X.T @ iV @X)))) #
if fit_scale:
llik -= scale_param**2 / (2 * scale_prior) # Add prior
# If no derivative - exit here
if return_deriv == 0:
return (-llik,) # Return as tuple for consistency
# Calculate the first derivative
A = iVr @ Z
B = YY @ iVr
iVdV = []
# Get the quantity iVdV = inv(V)dVdtheta for model parameters
for i in range(M.n_param):
iVdV.append(A @ dGdtheta[i,:,:] @ Z.T * exp(scale_param))
# Get iVdV for scaling parameter
if fit_scale:
iVdV.append(A @ G @ Z.T * exp(scale_param))
# Get iVdV for Noise parameters
for j in range(Noise.n_param):
dVdtheta = Noise.derivative(noise_params,j)
if type(dVdtheta) is np.float64:
iVdV.append(iVr * dVdtheta)
else:
iVdV.append(iVr @ dVdtheta)
# Based on iVdV we can get he first derivative
dLdtheta = np.zeros((n_param,))
for i in range(n_param):
dLdtheta[i] = -n_channel / 2 * trace(iVdV[i]) + 0.5 * einsum('ij,ij->',iVdV[i], B) # Trace([email protected])
if fit_scale:
dLdtheta[indx_scale] -= scale_param / scale_prior
# If only first derivative, exit here
if return_deriv == 1:
return (-llik, -dLdtheta)
# Calculate expected second derivative
d2L = np.zeros((n_param,n_param))
for i in range(n_param):
for j in range(i, n_param):
d2L[i, j] = -n_channel / 2 * einsum('ij,ji->',iVdV[i],iVdV[j]) # Trace(A@B)
d2L[j, i] = d2L[i, j]
if fit_scale:
d2L[indx_scale, indx_scale] -= 1 / scale_prior
if return_deriv == 2:
return (-llik, -dLdtheta, -d2L)
else:
raise NameError('return_deriv needs to be 0, 1 or 2')
def likelihood_group(theta, M, YY, Z, X=None,
Noise=IndependentNoise(),
n_channel=1, fit_scale=True, scale_prior=1e3,
return_deriv=0,return_individ=False):
"""Negative Log-Likelihood of group data and derivative in respect to the parameters
Parameters:
theta (np.array):
Vector of (log-)model parameters consisting of common model parameters (M.n_param or sum of M.common_param),
participant-specific parameters (interated by subject), unique model parameters (not in common_param),
scale parameter,noise parameters
M (pcm.Model):
Model object
YY (List of np.arrays):
List of NxN Matrix of outer product of the activity data (Y*Y')
Z (List of 2d-np.array):
NxQ Design matrix - relating the trials (N) to the random effects (Q)
X (List of np.array):
Fixed effects design matrix - will be accounted for by ReML
Noise (List of pcm.Noisemodel):
Pcm-noise model (default: IndependentNoise)
n_channel (List of int):
Number of channels
fit_scale (bool):
Fit a scaling parameter for the model (default is False)
scale_prior (float):
Prior variance for log-normal prior on scale parameter
return_deriv (int):
0: Only return negative likelihood
1: Return first derivative
2: Return first and second derivative (default)
return_individ (bool):
return individual likelihoods instead of group likelihood
return_deriv (int):
0:None, 1:First, 2: second
Returns:
negloglike:
Negative log-likelihood of the data under a model
dLdtheta (1d-np.array)
First derivative of negloglike in respect to the parameters
ddLdtheta2 (2d-np.array)
Second derivative of negloglike in respect to the parameters
"""
n_subj = len(YY)
n_param = theta.shape[0]
# Determine the common parameters to the group
if hasattr(M,'common_param'):
common_param = M.common_param
else:
common_param = np.ones((M.n_param,),dtype=np.bool_)
# Get the number of parameters
n_common = np.sum(common_param) # Number of common params
n_modsu = M.n_param - n_common # Number of subject-specific model params
n_scale = int(fit_scale) # Number of scale parameters
n_noise = Noise[0].n_param # Number of noise params
n_per_subj = n_modsu + n_scale + n_noise # Number of parameters per subj
# Generate the indices into the theta vector
indx_common = np.array(range(n_common))
indx_subj = np.arange(n_common, n_common + n_subj * n_per_subj, n_per_subj, dtype = int)
indx_subj = indx_subj.reshape((1,-1))
indx_modsu = np.zeros((n_modsu,1),dtype = int) + indx_subj
indx_scale = np.zeros((n_scale,1),dtype = int) + n_modsu + indx_subj
indx_noise = np.array(range(n_noise),dtype = int).T + n_scale + n_modsu + indx_subj
# preallocate the arrays
nl = np.zeros((n_subj,))
dFdh = np.zeros((n_subj,n_param))
dFdhh = np.zeros((n_subj,n_param,n_param))
# Loop over subjects and get individual likelihoods
for s in range(n_subj):
# Pick out the correct places for the group parameter vector for each subj
indx_model = np.zeros((M.n_param,), dtype=int)
indx_model[common_param]=indx_common
indx_model[np.logical_not(common_param)]=indx_modsu[:,s]
indx = np.concatenate([indx_model, indx_scale[:,s], indx_noise[:,s]])
ths = theta[indx]
# Get individual likelihood
res = likelihood_individ(ths, M, YY[s], Z[s], X[s],
Noise[s], n_channel[s], fit_scale = fit_scale, scale_prior = scale_prior, return_deriv = return_deriv)
iS = indx_scale[0,s]
nl[s] = res[0]
if return_deriv>0:
dFdh[s, indx] = res[1]
if return_deriv==2:
ixgrid = np.ix_([s],indx,indx)
dFdhh[ixgrid] = res[2]
# Integrate over subjects
if return_individ:
ra = [nl]
else:
ra = [np.sum(nl, axis=0)]
if return_deriv > 0:
ra.append(np.sum(dFdh,axis=0)) # First derivative
if return_deriv > 1:
ra.append(np.sum(dFdhh,axis=0)) # Second derivative
return ra
def fit_model_individ(Data, M, fixed_effect='block', fit_scale=False,
scale_prior = 1e3, noise_cov=None, algorithm=None,
optim_param={}, theta0=None, verbose = True):
"""Fits Models to a data set inidividually.
The model parameters are all individually fit.
Parameters:
Data (pcm.Dataset or list of pcm.Datasets):
List data set has partition and condition descriptors
M (pcm.Model or list of pcm.Models):
Models to be fitted on the data sets
fixed effect:
None, 'block', or nd-array. Default ('block') adds an intercept for each partition
fit_scale (bool):
Fit a additional scale parameter for each subject? Default is set to False.
scale_prior (float):
Prior variance for log-normal prior on scale parameter
algorithm (string):
Either 'newton' or 'minimize' - provides over-write for model specific algorithms
noise_cov:
None (i.i.d), 'block', or optional specific covariance structure of the noise
optim_param (dict):
Additional paramters to be passed to the optimizer
theta0 (list of np.arrays):
List of starting values (same format as return argument theta)
verbose (bool):
Provide printout of progress? Default: True
Returns:
T (pandas.dataframe):
Dataframe with the fields:
SN: Subject number
likelihood: log-likelihood
scale: Scale parameter (if fitscale = 1)-exp(theta_s)
noise: Noise parameter- exp(theta_eps)
run: Run parameter (if run = 'random')
iterations: Number of interations for model fit
time: Elapsed time in sec
theta (list of np.arrays):
List of estimated model parameters, each a
#params x #numSubj np.array
G_pred (list of np.arrays):
List of estimated G-matrices under the model
"""
# Get the number of subjects
if type(Data) is list:
n_subj = len(Data)
else:
n_subj = 1
Data = [Data]
# Get the number of models
if type(M) in [list,pcm.model.ModelFamily]:
n_model = len(M)
else:
n_model = 1
M = [M]
# Get model names
m_names = []
for m in range(n_model):
m_names.append(M[m].name)
# Preallocate output structures
iterab = [['likelihood','noise','iterations'],m_names]
index = pd.MultiIndex.from_product(iterab, names=['variable', 'model'])
T = pd.DataFrame(np.zeros((n_subj, n_model * 3)), columns=index)
theta = [None] * n_model
# Determine optimal algorithm for each of the models
# M = pcm.optimize.best_algorithm(M,algorithm)
# Loop over subject and models and provide inidivdual fits
for s in range(n_subj):
Z,X,YY,n_channel,Noise,G_hat = set_up_fit(Data[s],
fixed_effect = fixed_effect,
noise_cov = noise_cov)
for m in range(n_model):
if verbose:
print('Fitting Subj',s,'model',m)
# Get starting guess for theta0 is not provideddf
if (theta0 is None) or (len(theta0) <= m) or (theta0[m].shape[1]<s):
M[m].set_theta0(G_hat)
th0 = M[m].theta0
if (fit_scale):
G_pred, _ = M[m].predict(M[m].theta0)
scale0 = get_scale0(G_pred, G_hat)
th0 = np.concatenate((th0,scale0))
th0 = np.concatenate((th0,Noise.theta0))
else:
th0 = theta0[m][:,s]
# Now do the fitting, using the preferred optimization routine
if (M[m].algorithm=='newton'):
fcn = lambda x: likelihood_individ(x, M[m], YY, Z, X=X,
Noise = Noise, fit_scale = fit_scale, scale_prior = scale_prior, return_deriv = 2,n_channel=n_channel)
th, l, INFO = newton(th0, fcn, **optim_param)
else:
raise(NameError('not implemented yet'))
if theta[m] is None:
theta[m] = np.zeros((th.shape[0],n_subj))
theta[m][:,s] = th
T.loc[s,('likelihood',m_names[m])] = l
T.loc[s,('iterations',m_names[m])] = INFO['iter']+1
T.loc[s,('noise',m_names[m])] = exp(th[-Noise.n_param])
if fit_scale:
T.loc[s,('scale',m_names[m])] = exp(th[M[m].n_param])
return [T,theta]
def fit_model_group(Data, M, fixed_effect='block', fit_scale=False,
scale_prior = 1e3, noise_cov=None, algorithm=None,
optim_param={}, theta0=None, verbose=True):
""" Fits PCM models(s) to a group of subjects
The model parameters are (by default) shared across subjects.
Scale and noise parameters are individual for each subject.
Some model parameters can also be made individual by setting M.common_param
Parameters:
Data (list of pcm.Datasets):
List data set has partition and condition descriptors
M (pcm.Model or list of pcm.Models):
Models to be fitted on the data sets. Optional field M.common_param indicates which model parameters are common to the group (True) and which ones are fit individually (False)
fixed effect:
None, 'block', or nd-array / list of nd-arrays. Default ('block') add an intercept for each partition
fit_scale (bool):
Fit a additional scale parameter for each subject? Default is set to False.
scale_prior (float):
Prior variance for log-normal prior on scale parameter
algorithm (string):
Either 'newton' or 'minimize' - provides over-write for model specific algorithms
noise_cov:
None (i.i.d), 'block', or optional specific covariance structure of the noise
optim_param (dict):
Additional paramters to be passed to the optimizer
theta0 (list of np.arrays):
List of starting values (same format as return argument theta)
verbose (bool):
Provide printout of progress? Default: True
Returns:
T (pandas.dataframe):
Dataframe with the fields:
SN: Subject number
likelihood: log-likelihood
scale: Scale parameter (if fitscale = 1)-exp(theta_s)
noise: Noise parameter- exp(theta_eps)
iterations: Number of interations for model fit
time: Elapsed time in sec
theta (list of np.arrays):
List of estimated model parameters, each a
G_pred (list of np.arrays):
List of estimated G-matrices under the model
"""
# Get the number of subjects
if type(Data) is list:
n_subj = len(Data)
else:
n_subj = 1
Data = [Data]
# Get the number of models
if type(M) is list:
n_model = len(M)
else:
n_model = 1
M = [M]
# Get model names
m_names = []
for m in range(n_model):
m_names.append(M[m].name)
# Preallocate output structures
iterab = [['likelihood','noise','scale','iterations'],m_names]
index = pd.MultiIndex.from_product(iterab, names=['variable', 'model'])
T = pd.DataFrame(np.zeros((n_subj, n_model * 4)), columns=index)
theta = [None] * n_model
# Determine optimal algorithm for each of the models
# M = pcm.optimize.best_algorithm(M,algorithm)
# Prepare the data for all the subjects
Z, X, YY, n_channel, Noise, G_hat = set_up_fit_group(Data,
fixed_effect = fixed_effect, noise_cov = noise_cov)
# Average second moment
G_avrg = sum(G_hat, axis=0) / n_subj
# Initialize the different indices
indx_scale = [None] * n_subj
indx_noise = [None] * n_subj
for m in range(n_model):
if verbose:
print('Fitting model',m)
# Get starting guess for theta0 is not provided
if hasattr(M[m],'common_param'):
common = M[m].common_param
else:
common = np.ones((M[m].n_param,), dtype=np.bool_)
M[m].set_theta0(G_avrg)
th0 = M[m].theta0[common]
for s in range(n_subj):
th0 = np.concatenate((th0,M[m].theta0[np.logical_not(common)]))
if (fit_scale):
indx_scale[s]=th0.shape[0]
G0,_ = M[m].predict(M[m].theta0)
scale0 = get_scale0(G0, G_hat[s])
th0 = np.concatenate((th0,scale0))
indx_noise[s]=th0.shape[0]
th0 = np.concatenate((th0,Noise[s].theta0))
if (theta0 is not None) and (len(theta0) >= m-1):
th0 = theta0[m]
# Now do the fitting, using the preferred optimization routine
if (M[m].algorithm=='newton'):
fcn = lambda x: likelihood_group(x, M[m], YY, Z, X=X,
Noise = Noise, fit_scale = fit_scale, scale_prior=scale_prior, return_deriv = 2,n_channel=n_channel)
theta[m], l, INFO = newton(th0, fcn, **optim_param)
else:
raise(NameError('not implemented yet'))
res = likelihood_group(theta[m], M[m], YY, Z, X=X,
Noise = Noise, fit_scale = fit_scale, scale_prior=scale_prior,return_deriv = 0,return_individ=True, n_channel=n_channel)
T['likelihood',m_names[m]] = -res[0]
T['iterations',m_names[m]] = INFO['iter']+1
T['noise',m_names[m]] = exp(theta[m][indx_noise])
if (fit_scale):
T['scale',m_names[m]] = exp(theta[m][indx_scale])
return [T,theta]
def fit_model_group_crossval(Data, M, fixed_effect='block', fit_scale=False,
scale_prior = 1e3, noise_cov=None, algorithm=None,
optim_param={}, theta0=None, verbose=True):
"""Fits PCM model(sto N-1 subjects and evaluates the likelihood on the Nth subject.
Only the common model parameters are shared across subjects.The scale and noise parameters
are still fitted to each subject. Some model parameters can also be made individual by setting M.common_param
Parameters:
Data (list of pcm.Datasets):
List data set has partition and condition descriptors
M (pcm.Model or list of pcm.Models):
Models to be fitted on the data sets. Optional field M.common_param indicates which model parameters are common to the group (True) and which ones are fit individually (False)
fixed effect:
None, 'block', or nd-array. Default ('block') add an intercept for each partition
fit_scale (bool):
Fit a additional scale parameter for each subject? Default is set to False.
scale_prior (float):
Prior variance for log-normal prior on scale parameter
algorithm (string):
Either 'newton' or 'minimize' - provides over-write for model specific algorithms
noise_cov:
None (i.i.d), 'block', or optional specific covariance structure of the noise
optim_param (dict):
Additional paramters to be passed to the optimizer
theta0 (list of np.arrays):
List of starting values (same format as return argument theta)
verbose (bool):
Provide printout of progress? Default: True
Returns:
T (pandas.dataframe):
Dataframe with the fields:
SN: Subject number
likelihood: log-likelihood
scale: Scale parameter (if fitscale = 1)-exp(theta_s)
noise: Noise parameter- exp(theta_eps)
iterations: Number of interations for model fit
time: Elapsed time in sec
theta (list of np.arrays):
List of estimated model parameters - common group parameters come from the training data, scale and noise parameter from the testing data
G_pred (list of np.arrays):
List of estimated G-matrices under the model
"""
# Get the number of subjects
if type(Data) is list:
n_subj = len(Data)
else:
n_subj = 1
Data = [Data]
# Get the number of models
if type(M) is list:
n_model = len(M)
else:
n_model = 1
M = [M]
# Get model names
m_names = []
for m in range(n_model):
m_names.append(M[m].name)
# Preallocate output structures
iterab = [['likelihood','noise','scale'],m_names]
index = pd.MultiIndex.from_product(iterab, names=['variable', 'model'])
T = pd.DataFrame(np.zeros((n_subj, n_model * 3)), columns=index)
theta = [None] * n_model
# Determine optimal algorithm for each of the models
# M = pcm.optimize.best_algorithm(M,algorithm)
Z, X, YY, n_channel, Noise, G_hat = set_up_fit_group(Data, fixed_effect = fixed_effect, noise_cov = None)
# Get starting values as for a group fit
G_avrg = sum(G_hat, axis=0) / n_subj
for m in range(n_model):
if verbose:
print('Fitting model',m)
# Get starting guess for theta0 is not provided
if hasattr(M[m],'common_param'):
common = M[m].common_param
else:
common = np.ones((M[m].n_param,), dtype=np.bool_)
not_common = np.logical_not(common)
n_modsu = np.sum(not_common) # Number of subject-specific parameters
M[m].set_theta0(G_avrg)
th0 = M[m].theta0[common]
# Keep track of what subject the parameter belongs to
param_indx = np.ones((np.sum(common),)) * -1
for s in range(n_subj):
th0 = np.concatenate((th0,M[m].theta0[not_common]))
param_indx = np.concatenate((param_indx,s * np.ones((n_modsu,))))
if (fit_scale):
G0,_ = M[m].predict(M[m].theta0)
scale0 = get_scale0(G0, G_hat[s])
th0 = np.concatenate((th0,scale0))
param_indx = np.concatenate((param_indx,np.ones((1,)) * s))
th0 = np.concatenate((th0,Noise[s].theta0))
param_indx = np.concatenate((param_indx, s * np.ones((Noise[s].n_param,))))
if (theta0 is not None) and (len(theta0) >= m-1):
th0 = theta0[m]
# Initialize parameter array for group
theta[m] = np.zeros((th0.shape[0],n_subj))
# Loop over subjects can fit the rest to get group parameters
for s in range(n_subj):
notS = np.arange(n_subj) != s # Get indices of training group
pNotS = param_indx != s
# Set theta0 and model (for direct estimation)
G_avrg = sum(G_hat[notS], axis=0) / n_subj
M[m].set_theta0(G_avrg)
# Now do the fitting, using the preferred optimization routine
if (M[m].algorithm=='newton'):
fcn = lambda x: likelihood_group(x, M[m], YY[notS], Z[notS],
X=X[notS], Noise = Noise[notS], fit_scale = fit_scale, scale_prior = scale_prior, return_deriv = 2, n_channel=n_channel[notS])
theta[m][:,s], l, INFO = newton(th0, fcn, **optim_param)
else:
raise(NameError('not implemented yet'))
# Evaluate likelihood on the left-out subject
if hasattr(M[m],'common_param'):
raise(NameError('Group crossval with subject specific params not implemented yet'))
else:
thm = theta[m][param_indx==-1, s] # Common model parameter
G_group, _ = M[m].predict(thm) # Predicted second moment matrix
Mindiv = pcm.model.FixedModel('name',G_group) # Make a fixed model
p = param_indx == s # Parameters for this subject
fcn = lambda x: likelihood_individ(x, Mindiv, YY[s], Z[s], X=X[s], Noise = Noise[s], n_channel=n_channel[s], fit_scale = fit_scale, scale_prior = scale_prior, return_deriv = 2)
thi, l, INF2 = newton(th0[p], fcn, **optim_param)
# record results into the array
T['likelihood',m_names[m]][s] = l
if (fit_scale):
T['scale',m_names[m]][s] = exp(thi[0])
T['noise',m_names[m]][s] = exp(thi[int(fit_scale)])
return [T,theta]
def set_up_fit(Data, fixed_effect = 'block', noise_cov = None):
"""Utility routine pre-calculates and sets design matrices, etc for the PCM fit
Parameters:
Data (pcm.dataset):
Contains activity data (measurement), and obs_descriptors partition and condition
fixed_effect:
Can be None, 'block', or a design matrix. 'block' includes an intercept for each partition.
noise_cov:
Can be None: (i.i.d noise), 'block': a common noise paramter or a List of noise covariances for the different partitions
Returns:
Z (np.array):
Design matrix for random effects
X (np.array):
Design matrix for fixed effects
YY (np.array):
Quadratic form of the data (Y Y')
Noise (pcm.model.NoiseModel):
Noise model
G_hat (np.array):
Crossvalidated estimate of second moment of U
"""
# Make design matrix
cV = Data.obs_descriptors['cond_vec']
if cV.ndim == 1:
Z = pcm.matrix.indicator(cV)
elif cV.ndim == 2:
Z = cV
n_reg = Z.shape[1]
# Get data
Y = Data.measurements
N, n_channel = Y.shape
YY = Y @ Y.T
# Initialize fixed effects
part_vec = Data.obs_descriptors['part_vec']
if fixed_effect is None:
X = None
elif fixed_effect=='block':
X = pcm.matrix.indicator(part_vec)
else:
X = fixed_effect
# Now choose the noise model
if noise_cov is None:
Noise = IndependentNoise()
elif noise_cov == 'block':
Noise = BlockPlusIndepNoise(part_vec)
else:
raise(NameError('Arbitrary covariance matrices are not yet implemented'))
# Get a cross-validated estimate of G
G_hat, _ = pcm.util.est_G_crossval(Y, Z, part_vec, X = X)
# Estimate noise parameters starting values
Noise.set_theta0(Y,Z,X)
return [Z, X, YY, n_channel, Noise, G_hat]
def set_up_fit_group(Data, fixed_effect = 'block', noise_cov = None):
"""Pre-calculates and sets design matrices, etc for the PCM fit for a full group
Parameters:
Data (list of pcm.dataset):
Contains activity data (measurement), and obs_descriptors partition and condition
fixed_effect:
Can be None, 'block', or a design matrix. 'block' includes an intercept for each partition.
noise_cov:
Can be None: (i.i.d noise), 'block': a common noise paramter or a List of noise covariances for the different partitions
Returns:
Z (np.array): Design matrix for random effects
X (np.array): Design matrix for fixed effects
YY (np.array): Quadratic form of the data (Y Y')
Noise (NoiseModel): Noise model
G_hat (np.array): Crossvalidated estimate of second moment of U
"""
n_subj = len(Data)
Z = np.empty((n_subj,),dtype=object)
X = np.empty((n_subj,),dtype=object)
YY = np.empty((n_subj,),dtype=object)
n_channel = | np.zeros((n_subj,),dtype=int) | numpy.zeros |
import numpy as np
import tensorflow as tf
import dirt
import skimage.io
import skimage
import skimage.transform
import skimage.color
import time
import os
import scipy
import scipy.optimize
import skimage.measure
from sklearn import linear_model, datasets
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import cv2
from sklearn.utils import check_random_state, check_array, check_consistent_length
from sklearn.linear_model import LinearRegression
from sklearn.utils.validation import has_fit_parameter
import sklearn.linear_model
_dynamic_max_trials = sklearn.linear_model.ransac._dynamic_max_trials
canvas_width, canvas_height = 960, 640
centre_x, centre_y = 32, 64
square_size = 16
def ransac_fit_with_weights(self, X, y, sample_weight=None, residual_threshold=None):
"""
Modified sklearn.linear_model.RANSACRegressor.fit()
sample_weight is used in sampling base points, fitting the regressor, and calculating score for candidate model
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples: n_samples = %d." % (X.shape[0]))
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if residual_threshold is None:
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 1
score_best = -np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
weight_inlier_best = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
self.n_trials_ = 0
max_trials = self.max_trials
while self.n_trials_ < max_trials:
self.n_trials_ += 1
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
break
# choose random sample set
#subset_idxs = sample_without_replacement(n_samples, min_samples,
# random_state=random_state)
# use np.random.choice here since it allows sample with prob
subset_idxs = np.random.choice(n_samples, min_samples, False, sample_weight / np.sum(sample_weight))
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
self.n_skips_invalid_data_ += 1
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
self.n_skips_invalid_model_ += 1
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
if sample_weight is None:
weight_inlier_subset = None
else:
weight_inlier_subset = sample_weight[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset,
sample_weight[inlier_idxs_subset])
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
weight_inlier_best = weight_inlier_subset
max_trials = min(
max_trials,
_dynamic_max_trials(n_inliers_best, n_samples,
min_samples, self.stop_probability))
# break if sufficient number of inliers or score is reached
if n_inliers_best >= self.stop_n_inliers or \
score_best >= self.stop_score:
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if ((self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips):
raise ValueError(
"RANSAC skipped more iterations than `max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*).")
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*).")
else:
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
warnings.warn("RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `max_skips`. See estimator attributes for"
" diagnostics (n_skips*).",
ConvergenceWarning)
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best, weight_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
linear_model.RANSACRegressor.ransac_fit_with_weights = ransac_fit_with_weights
def get_dirt_pixels(width=canvas_width, height=canvas_height):
square_vertices = tf.constant([[-1, -1, 0, 1], [-1, 1, 0, 1], [1, 1, 0, 1], [1, -1, 0, 1]], dtype=tf.float32)
#background = skimage.io.imread('/n/fs/shaderml/datas_oceanic/test_img/test_middle_ground00000.png')
#background = tf.constant(skimage.img_as_float(background), dtype=tf.float32)
background = tf.zeros([height, width, 3], dtype=tf.float32)
camera_pos = tf.placeholder(tf.float32, 8)
return dirt.rasterise(
vertices=square_vertices,
faces=[[0, 1, 2], [0, 2, 3]],
vertex_colors=tf.ones([4, 3]),
background=background,
camera_pos = camera_pos,
height=height, width=width, channels=3
), camera_pos
def main():
dir = '/n/fs/shaderml/deeplab-pytorch/result'
highlight_dir = '/n/fs/shaderml/drone_videos/drone_frames/ocean3_00/highlight'
orig_img_dir = '/n/fs/shaderml/drone_videos/drone_frames/ocean3_00'
out_dir = 'horizon_optimize'
#files = os.listdir(dir)
#files = sorted([os.path.join(dir, file) for file in files if 'coco_stuff' not in file])
files = [os.path.join(dir, '%05d.png' % ind) for ind in range(0, 1860, 11)]
#camera_pos_vals = np.load(os.path.join(dir, 'camera_pos_' + name + '.npy'))
#render_t = np.load(os.path.join(dir, 'render_t_' + name + '.npy'))
#nframes = camera_pos_vals.shape[0]
feed_dict_arr = np.zeros(8)
feed_dict_arr[1] = 200.0
feed_dict_arr[7] = 0.9
img = np.zeros([640, 960, 3])
nframes = len(files)
session = tf.Session()
ransac = linear_model.RANSACRegressor(stop_probability=0.995, max_trials=200)
line_X = np.arange(960)[:, np.newaxis]
with session.as_default():
dirt_node, camera_pos = get_dirt_pixels()
for idx in range(nframes):
filename = files[idx]
print(filename)
_, filename_short = os.path.split(filename)
filename_only, _ = os.path.splitext(filename_short)
orig_img_name = os.path.join(orig_img_dir, filename_short)
if not os.path.exists(orig_img_name):
raise
orig_img = skimage.transform.resize(skimage.io.imread(orig_img_name), (img.shape[0], img.shape[1]))
seg = skimage.transform.resize(skimage.io.imread(filename), (img.shape[0], img.shape[1]))[:, :, 0]
is_sea_col = np.argmin(seg, axis=0)
ransac.fit(line_X, is_sea_col)
line_y = ransac.predict(line_X)
fig = plt.figure()
plt.imshow(orig_img)
plt.plot(np.squeeze(line_X), line_y)
fig.savefig(os.path.join(out_dir, filename_only + '_ransac_img_comp.png'))
plt.close(fig)
fig = plt.figure()
plt.imshow(seg)
plt.plot( | np.squeeze(line_X) | numpy.squeeze |
import pandas as pd
import numpy as np
from sklearn import preprocessing
#from sklearn.mixture import GMM
from sklearn.mixture import GaussianMixture as GMM
from scipy.stats import norm
from scipy.spatial.distance import pdist
import scipy.cluster.hierarchy as sch
from collections import Counter
def get_label(table):
return table.axes[0], table.axes[1]
def sort_feature(data):
pds = | np.asarray(data) | numpy.asarray |
import csv
from re import T
import cv2
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras import layers
import sklearn
from sklearn.model_selection import train_test_split
import json
import random
center_im_list = []
left_im_list = []
right_im_list = []
label_list = []
all_images = []
all_labels = []
#path_prefix = ['./data/new_data', './data/new_data_recovery', './data/new_data_smooth', './data/new_data_curve']
#path_prefix = ['./data/new_data', './data/new_data_recovery', './data/new_data_smooth']
path_prefix = ['./data/data/']
correction = 0.2
# get data from all the three cameras
for pp in path_prefix:
lines = []
with open(os.path.join(pp, 'driving_log.csv')) as f:
reader = csv.reader(f)
for l in reader:
lines.append(l)
for line in lines[1:]:
cp = os.path.join(pp, line[0].strip())
lp = os.path.join(pp, line[1].strip())
rp = os.path.join(pp, line[2].strip())
l = float(line[3])
all_images.append(cp)
all_labels.append(l)
all_images.append(lp)
all_labels.append(l + correction)
all_images.append(rp)
all_labels.append(l - correction)
#center_im_list.append(cp)
#left_im_list.append(lp)
#right_im_list.append(rp)
#label_list.append(float(line[3]))
def data_generator(samples, batch_size=32):
"""
assume samples: (img paths, labels)
"""
X, y = samples
num_samples = len(X)
while True:
X, y = sklearn.utils.shuffle(X, y)
for offset in range(0, num_samples, batch_size):
X_path_batch = X[offset:offset+batch_size]
X_batch = []
y_batch_bef = y[offset:offset+batch_size]
y_batch = []
for b_im, b_label in zip(X_path_batch, y_batch_bef):
image = cv2.imread(b_im)
# random flipping
if random.choice((True, False)):
image = image[:,::-1,:]
b_label = - b_label
X_batch.append(image)
y_batch.append(b_label)
X_batch = np.array(X_batch)
X_batch = np.array(X_batch, dtype=np.float32)
y_batch = | np.array(y_batch) | numpy.array |
###############################################################################################################
# Here we define a dummy DockNet for testing purposes only. #
# We illustrate what is the expected behavior of a neural network with: #
# - input vectors (also called examples) of size 2 #
# - an input batch of 2 vectors (a matrix with an input example per column) #
# - a single hidden dense layer with 3 neurons (1 neuron per row) and ReLU as activation function #
# - an output layer with a single neuron and sigmoid as activation function (for binary classification) #
# Note we define all input values an parameters as small integers to ease the manual check of each #
# calculation. Each input, output and operation at each step in this file. The diagram below summarizes the #
# network structure and notation: #
# #
# #
# Input layer L0 Hidden layer L1 Output layer L2 #
# #
# example 0 example 1 linear part | activation linear part | activation #
# #
# -- -- #
# | a000 a001 | /-[Z10=X*W10+b10|A10=relu(Z10)]-\ #
# | | / \ -- -- #
# X = A0 =| | ---[Z11=X*W11+b11|A11=relu(Z11)]---[Z20=A1*W20+b20|A1=sigmoid(Z20)]--| a20 a21 | = Ŷ #
# | | \ / -- -- #
# | a010 a011 | \-[Z12=X*W12+b12|A12=relu(Z12)]-/ #
# -- -- #
# #
###############################################################################################################
import numpy as np
from docknet.function.activation_function import relu, sigmoid, sigmoid_prime, relu_prime
###############################################################################################################
# Forward propagation #
# #
# Given a batch of input vectors, feed them to the network to compute the corresponding outputs #
###############################################################################################################
# Dummy input values alij, where l is the layer index (the input layer is layer 0), i is the index of the scalar within
# the input vector, and j is the input example index
from docknet.function.cost_function import cross_entropy, dcross_entropy_dYcirc
a000 = 1
a001 = 2
a010 = 3
a011 = 4
# Dummy layer 0: all input vectors in a single matrix. Each column contains one input vector.
X = A0 = np.array([
[a000, a001],
[a010, a011]
])
# m is the amount of input vectors
m = X.shape[1]
# Dummy dataset labels
Y = np.array([[0, 1]])
# Dummy parameter values wlij for layer 1, 3 neurons X 2 input values = 6 parameters; l is the layer index, i is the
# neuron index, and j is the parameter index within neuron i (one parameter per input number received).
w100 = 0.01
w101 = 0.02
w110 = 0.03
w111 = 0.04
w120 = 0.05
w121 = 0.06
# All layer 1 w parameters in a single matrix. Each row contains all the parameters of a single neuron, one parameter
# per layer input (since the input vectors contain 2 scalars, layer 1 has 2 inputs).
W1 = np.array([
[w100, w101],
[w110, w111],
[w120, w121]
])
# Dummy bias values bli for layer 1; each neuron has a bias constant that is added, so 3 bias values for 3 neurons; l is
# the layer index and i the neuron index.
b10 = 0.01
b11 = 0.02
b12 = 0.03
# All layer 1 biases in one single matrix, one per neuron.
b1 = np.array([
[b10],
[b11],
[b12]
])
# Linear computation of layer 1: dot product of W1 and A0 plus bias b1; each column corresponds to the linear
# computation of the entire layer for one single input example:
Z1 = np.array([
[w100 * a000 + w101 * a010 + b10, w100 * a001 + w101 * a011 + b10],
[w110 * a000 + w111 * a010 + b11, w110 * a001 + w111 * a011 + b11],
[w120 * a000 + w121 * a010 + b12, w120 * a001 + w121 * a011 + b12]
])
# Activation of layer 1: application of the activation function to each element in Z1; each column corresponds to the
# output of the entire layer for one input example:
A1 = relu(Z1)
a100, a101 = A1[0][0], A1[0][1]
a110, a111 = A1[1][0], A1[1][1]
a120, a121 = A1[2][0], A1[2][1]
# Dummy parameter values for layer 2 with a single neuron for binary: 1 neuron x 3 input values = 3 parameters:
w200 = 0.01
w201 = 0.02
w202 = 0.03
# All layer 2 w parameters in a single matrix. Each row contains all the parameters of a single neuron, one parameter
# per layer input (since layer 1 has 3 neurons, this layer has 3 inputs).
W2 = np.array([
[w200, w201, w202]
])
# Dummy bias value for layer 2
b20 = 0.01
# All layer 2 biases in one single matrix, one per neuron.
b2 = np.array([
[b20]
])
# Linear computation of layer 2: dot product of W2 and A1 plus bias b2; each column corresponds to the linear
# computation of the entire layer for one single input example:
Z2 = np.array([
[w200 * a100 + w201 * a110 + w202 * a120 + b20, w200 * a101 + w201 * a111 + w202 * a121 + b20]
])
# Activation of layer 2 and final network output Ŷ: application of the activation function to each element in Z1:
Y_circ = A2 = sigmoid(Z2)
# The network output is an horizontal vector with as many scalars as input examples in the input batch:
y_circ0, y_circ1 = Y_circ[0][0], Y_circ[0][1]
###############################################################################################################
# Backward propagation #
# #
# After a forward propagation, compute the gradients of the cost function wrt each parameter in the network. #
# These gradients are the direction in which the parameters are to be subtracted some amount so that in the #
# next iteration the cost is reduced. Forward and backward propagation steps are repeated until finding a set #
# of parameter values close enough to a local minimum #
# Backward propagation is heavily based on the chain rule: #
# #
# (f(g(x))' = f'(g(x)) * g'(x) #
# #
# We start computing the derivative of the cost function J wrt the network output Ŷ, then keep computing the #
# derivatives of the cost function wrt each activation, linear part and layer parameters from the previously #
# computed derivatives #
###############################################################################################################
# The cost function:
J = cross_entropy
# Gradient of J wrt the network output:
dJdY_circ = dcross_entropy_dYcirc(Y_circ, Y)
# The network output Ŷ is A2, the activation of the second and last layer
dJdA2 = dJdY_circ
# Note we compute different derivatives for each input example (1 neuron X 2 inputs = 2 derivatives)
dJda200, dJda201 = dJdA2[0][0], dJdA2[0][1]
# Backward activation of layer 2 computes dJdZ2 from dJdA2 and Z2
dJdZ2 = dJdA2 * sigmoid_prime(Z2)
# Derivatives of J wrt the linear part of the neuron; again 1 neuron X 2 inputs = 2 derivatives
dJdz200, dzda201 = dJdZ2[0][0], dJdZ2[0][1]
# Average derivatives of J wrt the parameters dJdW2 and dJdb2 inside the linear part of the neuron
# Note we compute the averages for all input examples in order to minimize the average cost for all input examples
dJdW2 = np.dot(dJdZ2, A1.T) / m
# Same amount of derivatives as parameters W in the second layer, that is, 1 neuron X 3 inputs = 3 parameters
dJdw200, dJdw201, dJd202 = dJdW2[0][0], dJdW2[0][1], dJdW2[0][2]
# Same amount of derivatives as parameters b in the second layer, that is, 1 neuron X 3 inputs = 3 parameters
dJdb2 = np.sum(dJdZ2, axis=1, keepdims=True) / m
# Derivative of the cost function
dJdA1 = np.dot(W2.T, dJdZ2)
# Backward activation of layer 1 computes dJdZ1 from dJdA1 and Z1
dJdZ1 = dJdA1 * relu_prime(Z1)
# Backward linear computes dJdW1, dJdb1 and dJdA0 from dJdZ1 and A0
dJdW1 = | np.dot(dJdZ1, A0.T) | numpy.dot |
from csv import writer
from PIL import Image
from skimage.metrics import adapted_rand_error as are
from skimage.metrics import mean_squared_error as mse
from skimage.metrics import structural_similarity as mssim
from skimage.metrics import peak_signal_noise_ratio as psnr
import numpy as np
def write(csv, row):
with open(csv, 'a') as f_object:
writer_object = writer(f_object)
writer_object.writerow(row)
f_object.close()
def GIF(image_name, csv):
path = 'Images/Lossy/'
bmp = Image.open('Images/Standard Test Images .BMP/' + image_name + '.bmp')
reference = np.array(bmp)
# GIF
bmp.save(path + image_name + '.gif', save_all=True, append_images=[bmp])
gif = Image.open(path + image_name + '.gif')
gif = gif.convert('RGB')
test = np.array(gif)
if csv == 'Tables/Lossy/ARE.csv':
error, precision, recall = are(reference, test)
return format(error, '.3f')
else:
if csv == 'Tables/Lossy/MSE.csv':
return format(mse(reference, test), '.3f')
else:
if csv == 'Tables/Lossy/MSSIM.csv':
return format(mssim(reference, test, data_range=reference.max() - reference.min(), multichannel=True),
'.3f')
else:
return format(psnr(reference, test), '.3f')
def JPEG(image_name, csv):
path = 'Images/Lossy/'
# JPEG
bmp = Image.open('Images/Standard Test Images .BMP/' + image_name + '.bmp')
reference = | np.array(bmp) | numpy.array |
from enum import Enum
import numpy as np
from cvxopt import matrix, solvers
from processing import vector_of_quants
import math
class KnowledgePatternManager:
@staticmethod
def checkConsistency(knowledgePattern):
return KnowledgePatternManager.__getConsistencyChecker(knowledgePattern.type) \
.isConsistent(knowledgePattern)
@staticmethod
def __getConsistencyChecker(type):
if type == KnowledgePatternType.QUANTS:
return QuantConsistencyChecker()
elif type == KnowledgePatternType.DISJUNCTS:
return DisjunctConsistencyChecker()
elif type == KnowledgePatternType.CONJUNCTS:
return ConjunctConsistencyChecker()
else:
raise TypeError("Correct type of knowledge pattern")
@staticmethod
def getProbabilityFormula(knowledgePattern, formulaPattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = FormulaManager.getQuantsVector(formulaPattern, int(math.log(size, 2)))
return LinearProgrammingProblemSolver.findOptimalFormulaValues(matrix, intervals, size, vector)
@staticmethod
def __getEvidenceCorrector(type):
if type == EvidencePatternType.DETERMINISTIC:
return DeterministicEvidenceCorrector()
elif type == EvidencePatternType.STOCHASTIC:
return StochasticEvidenceCorrector()
elif type == EvidencePatternType.INACCURATE:
return InaccurateEvidenceCorrector()
@staticmethod
def correctEvidenceData(knowledgePattern, evidencePattern):
return KnowledgePatternManager.__getEvidenceCorrector(evidencePattern.type).getCorrectData(knowledgePattern, evidencePattern)
class FormulaManager:
@staticmethod
def getQuantsVector(formulaPattern, size):
return vector_of_quants(formulaPattern.string, size)
@staticmethod
def getFormulaForOptimise(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2**(evidencePattern.size)
result_formula = np.zeros(size)
vector = EvidenceManager.getSubIdealProbability(evidencePattern)
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
for i in range(0, 2**evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
formula = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
formula = np.dot(formula, np.dot(I, vector)[i])
result_formula += formula
return result_formula
@staticmethod
def getConjunctstoQuantsVector(vector):
return np.dot(MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(len(vector), 2))), vector)
@staticmethod
def getFormulaForOptimiseIn(knowledgePattern, evidencePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
matrix_for_opt = FormulaManager.getSubIdealtoIdealMatrix(evidencePattern, knowledgePattern)
size_evidence = 2 ** (evidencePattern.size)
result_formula_min = np.zeros(2 **evidencePattern.size)
result_formula_max = np.zeros(2 **evidencePattern.size)
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
for i in range(0, 2**evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
formula = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
prob = LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, formula).array
result_formula_min += I[i]*prob[0]
result_formula_max += I[i]*prob[1]
result = np.vstack([result_formula_min, result_formula_max])
return result
@staticmethod
def getSubIdealtoIdealMatrix(evidencePattern, knowledgePattern):
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
Matrix = np.zeros((2 ** evidencePattern.size, knowledgePattern.size), dtype = np.double)
for i in range(0, 2 ** evidencePattern.size):
for j in range(0, 2 **evidencePattern.size):
Matrix[i][int(ideal[j])] = I[i][j]
return Matrix
class EvidenceManager:
@staticmethod
def getConjunctsVector(evidencePattern):
arr_conj = []
num_conj = 0
p_arr = evidencePattern.p_array
for i in range(len(p_arr)):
if p_arr[i] == 0: continue #?
num_conj += pow(2, p_arr[i] - 1)
arr_conj.append(num_conj)
num_conj = 0
m_arr = evidencePattern.m_array
for i in range(len(m_arr)):
num_conj += pow(2, p_arr[i] - 1)
arr_conj.append(num_conj)
return np.array(arr_conj)
@staticmethod
def getProbabilityOfDeterministicEvidence(knowledgePattern, mas):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = MatrixProducer.getTMatrix(mas, int(math.log(size, 2)))[0].tolist()
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, vector)
@staticmethod
def getProbabilityofStochasticEvidence(knowledgePattern, evidencePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = FormulaManager.getFormulaForOptimise(knowledgePattern, evidencePattern)
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, vector)
@staticmethod
def getProbabilityofInaccurateEvidence(knowledgePattern, evidencePattern):
size = evidencePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
vectors = FormulaManager.getFormulaForOptimiseIn(knowledgePattern, evidencePattern)
intervals = EvidenceManager.getSubIdealIntervalProbability(evidencePattern)
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValuesIn(matrix, intervals, size, vectors)
@staticmethod
def getSubIdealProbability(evidencePattern):
vector = np.ones(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector[i+1] = array[i][1]
return vector
@staticmethod
def getSubIdealIntervalProbability(evidencePattern):
vector_min = np.ones(2 ** evidencePattern.size)
vector_max = np.ones(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector_min[i+1] = array[i][1]
vector_max[i+1] = array[i][2]
vector = []
vector.append(vector_min)
vector.append(vector_max)
return vector
@staticmethod
def getSubIdeal(evidencePattern):
vector = np.zeros(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector[i+1] = array[i][0]
return vector
class EvidencePatternType(Enum):
DETERMINISTIC = 'deterministic',
STOCHASTIC = 'stochastic',
INACCURATE = 'inaccurate'
class KnowledgePatternType(Enum):
QUANTS = 'quants',
DISJUNCTS = 'disjuncts',
CONJUNCTS = 'conjuncts'
class ConsistencyChecker:
@staticmethod
def isConsistent(knowledgePattern):
raise NotImplementedError("It's a method of abstract class, use appropriate implementation")
class EvidenceCorrector:
@staticmethod
def getCorrextData(knowledgePattern, evidencePattern):
raise NotImplementedError("It's a method of abstract class, use appropriate implementation")
class DeterministicEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
# разобраться с 1 и нулем
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
return LinearProgrammingProblemSolver.findOptimalEvidenceValues(matrix, intervals, size, MatrixProducer.getEvidencevector(evidencePattern.arr, int(math.log(size, 2))), intervals, MatrixProducer.getTMatrix(evidencePattern.arr, int(math.log(size, 2))))
class StochasticEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2 ** (evidencePattern.size)
result = [[0, 0] for i in range(knowledgePattern.size)]
vector = EvidenceManager.getSubIdealProbability(evidencePattern) #p_ca
I = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(knowledgePattern.size, 2)))
I_1 = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
vector_quants = np.dot(I_1, vector)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
intervals = np.array(knowledgePattern.array, dtype=np.double)
for i in range(0, 2 ** evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
divider = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
numerator = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))
ideal_ = LinearProgrammingProblemSolver.findOptimalStochasticEvidenceValues(I, intervals, size, numerator, divider)
if len(ideal_) == 0:
return EvidenceCorrectorResult(False, [])
for j in range(size):
result[j][0] += round(vector_quants[i] * ideal_[j][0], 3)
result[j][1] += round(vector_quants[i] * ideal_[j][1], 3)
if result[0][0] == 0: return EvidenceCorrectorResult(False, [])
return EvidenceCorrectorResult(True, result)
class InaccurateEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2 ** (evidencePattern.size)
result_formula_min = np.zeros((size, size_evidence))
result_formula_max = | np.zeros((size, size_evidence)) | numpy.zeros |
"""
Author: <NAME>
The notation from "SoS-RSC: A Sum-of-Squares Polynomial Approach to Robustifying Subspace Clustering Algorithms", section 2.
x = [x1, x2, ... xd]
n+d
s_nd = ( )
d
Mn: Moment matrix (s_nd, s_nd)
v_n(x): veronese map of x: all possible monomials of order n in d variables in lexicographical order
"""
import numpy as np
import torch
from scipy.special import comb
import matplotlib.pyplot as plt
import scipy.stats as ss
def generateMoments(hist, ord, d):
"""
"""
# d is the dimension of our data, d is 1 for a scalar
s_nd = int(comb(ord//2 + d , d))
z = np.linspace(0.0,1.0,len(hist))
a = np.zeros(ord+1)
for i in range(0,ord +1):
a[i] = np.sum((z**i)*hist)
M = np.zeros((s_nd, s_nd))
for i in range(0, s_nd):
for j in range(0, s_nd):
M[i,j] = a[i+j]
return M
def Q(M, z):
z = z.reshape(len(z),1)
M_inv = np.linalg.inv(M)
veronese = np.zeros((len(z), M.shape[0]))
for i in range(0, M.shape[0]):
veronese[:,i] = (z**i).reshape(len(z))
veronese_T = veronese.T
q_eval = np.matmul(veronese,np.matmul(M_inv, veronese_T))
# This was wrong, we just have to keep the i,i value of q_eval
q_final = q_eval.diagonal()
return q_final
if __name__ == "__main__":
print('Main')
# Code is this main section is intended to test the functions defined above
x = np.random.normal(0.5,0.1,20000)
hist, x_axis, _ = plt.hist(x, bins = 200)
x_axis = x_axis[:-1]
hist = hist/ | np.sum(hist) | numpy.sum |
from ast import Not
from dataclasses import dataclass
import numpy as np
import os
import pdb
class FrozenClass(object):
__is_frozen = False
def __setattr__(self, key, value):
if self.__is_frozen and not hasattr(self, key):
raise TypeError( "Invalid attribute specified for the %r class." % self )
object.__setattr__(self, key, value)
def _freeze(self):
self.__is_frozen = True
@dataclass()
class Settings(FrozenClass):
"""
pyNA settings class
"""
def __init__(self, case_name,
language = 'python',
pyNA_directory = '.',
engine_file_name = 'Engine_to.csv',
trajectory_file_name = 'Trajectory_to.csv',
output_directory_name = '',
output_file_name = 'Trajectory_stca.sql',
ac_name = 'stca',
ac_version = '',
save_results = False,
fan_inlet = False,
fan_discharge = False,
core = False,
jet_mixing = False,
jet_shock = False,
airframe = False,
all_sources = True,
fan_igv = False,
fan_id = False,
observer_lst = ('lateral', 'flyover'),
method_core_turb ='GE',
fan_BB_method ='geae',
fan_RS_method = 'allied_signal',
ge_flight_cleanup = 'takeoff',
levels_int_metric = 'epnl',
engine_mounting = 'underwing',
direct_propagation = True,
absorption = True,
groundeffects = True,
lateral_attenuation = False,
suppression = True,
fan_liner_suppression = True,
shielding = True,
hsr_calibration = True,
validation = True,
bandshare = False,
TCF800 = True,
combination_tones = False,
N_shock = 8,
dT = 10.0169,
sigma = 291.0 * 515.379,
a_coh = 0.01,
N_f = 24,
N_b = 5,
n_altitude_absorption = 5,
A_e = 10.334 * (0.3048 ** 2),
dt_epnl = 0.5,
n_harmonics = 10,
r_0 = 0.3048,
p_ref= 2e-5,
x_observer_array = np.array([[12325.*0.3048, 450., 4*0.3048], [21325.*0.3048, 0., 4*0.3048]]),
noise_optimization = False,
noise_constraint_lateral = 200.,
PTCB = False,
PHLD = False,
PKROT = False,
TS_to = 1.0,
TS_vnrs = 1.0,
TS_cutback = None,
z_cutback = 500.,
theta_flaps = 10.,
theta_slats = -6.,
n_order = 3,
max_iter = 200,
tol = 1e-4):
"""
Initialize pyNA settings class
:param case_name: Case name [-]
:type case_name: str
:param pyNA_directory: Directory where pyNA is installed
:type pyNA_directory: str
:param engine_file_name: File name of the take-off engine inputs [-]
:type engine_file_name: str
:param trajectory_file_name: File name of the take-off trajectory [-]
:type trajectory_file_name: str
:param output_directory_name: Name of the directory of output .sql file [-]
:type output_directory_name: str
:param output_file_name: Name of the output .sql file [-]
:type output_file_name: str
:param ac_name: Name of the aircraft [-]
:type ac_name: str
:param save_results: Flag to save results [-]
:type save_results: bool
:param fan_inlet: Enable fan inlet noise source [-]
:type fan_inlet: bool
:param fan_discharge: Enable fan discharge noise source [-]
:type fan_discharge: bool
:param core: Enable core noise source [-]
:type core: bool
:param jet_mixing: Enable jet mixing noise source [-]
:type jet_mixing: bool
:param jet_shock: Enable jet shock noise source [-]
:type jet_shock: bool
:param airframe: Enable airframe noise source [-]
:type airframe: bool
:param all_sources: Enable all noise sources [-]
:type all_sources: bool
:param trajectory_mode: mode for trajectory calculations [-] ('cutback', optimization')
:type trajectory_mode: str
:param observer_lst: List of observers to analyze [-] ('flyover','lateral','approach', 'contour')
:type observer_lst: lst
:param method_core_turb: Method to account for turbine transmission in the combustor ('GE', 'PW') [-]
:type method_core_turb: str
:param fan_BB_method: Method BB (original / allied_signal / geae / kresja) [-]
:type fan_BB_method: str
:param fan_RS_method: Method RS (original / allied_signal / geae / kresja) [-]
:type fan_RS_method: str
:param fan_igv: Enable fan inlet guide vanes
:type fan_igv: bool
:param fan_id: Enable fan inlet distortions
:type fan_id: bool
:param ge_flight_cleanup: GE flight cleanup switch (none/takeoff/approach) [-]
:type ge_flight_cleanup: str
:param levels_int_metric: Integrated noise metric [-]
:type levels_int_metric: str
:param engine_mounting: Engine mounting ('fuselage'/'underwing'/'none') [-]
:type engine_mounting: str
:param direct_propagation: Flag for direct propagation (including distance and characteristic impedance effects) [-]
:type direct_propagation: bool
:param absorption: Flag for atmospheric absorption [-]
:type absorption: bool
:param groundeffects: Flag for ground effects [-]
:type groundeffects: bool
:param lateral_attenuation: Flag for empirical lateral attenuation effects [-]
:type lateral_attenuation: bool
:param suppression: Flag for suppression of engine modules [-]
:type suppression: bool
:param fan_liner_suppression: Flag for fan liner suppression [-]
:type fan_liner_suppression: bool
:param shielding: Flag for shielding effects (not implemented yet) [-]
:type shielding: bool
:param hsr_calibration: Flag for HSR-era airframe calibration [-]
:type hsr_calibration: bool
:param validation: Flag for validation with NASA STCA noise model [-]
:type validation: bool
:param bandshare: Flag to plot PNLT [-]
:type bandshare: bool
:param TCF800: Flag for tone penalty addition to PNLT metric; allows any tone below 800Hz to be ignored [-]
:type TCF800: bool
:param combination_tones: Flag for combination tones int he fan noise model [-]
:type combination_tones: bool
:param N_shock: Number of shocks in supersonic jet [-]
:type N_shock: int
:param dT: dT standard atmosphere [K]
:type dT: float
:param sigma: Specific flow resistance of ground [kg/s m3]
:type sigma: float
:param a_coh: Incoherence constant [-]
:type a_coh: float
:param N_f: Number of discrete 1/3 octave frequency bands [-]
:type N_f: int
:param N_b: Number of bands (propagation) [-]
:type N_b: int
:param n_altitude_absorption: Number of integration steps in atmospheric propagation [-]
:type n_altitude_absorption: int
:param A_e: Engine reference area [m2]
:type A_e: float
:param dt_epnl: Time step of to calculate EPNL from interpolated PNLT data [s]
:type dt_epnl: float
:param n_harmonics: Number of harmonics to be considered in tones [-]
:type n_harmonics: int
:param r_0: Distance source observer in source mode [m]
:type r_0: float
:param p_ref: Reference pressure [Pa]
:type p_ref: float
:param noise_optimization: Flag to noise-optimize the trajectory [-]
:type noise_optimization: bool
:param noise_constraint_lateral: Constraint on the lateral noise [EPNdB]
:type noise_constraint_lateral: float
:param PTCB: Enable PTCB [-]
:type PTCB: bool
:param PHLD: Enable PHLD [-]
:type PHLD: bool
:param PKROT: Enable PKROT [-]
:type PKROT: bool
:param TS_to: Engine TO thrust-setting (values < 1 denote APR) [-]
:type TS_to: float
:param TS_vnrs: Engine VNRS thrust-setting [-]
:type TS_vnrs: float
:param TS_cutback: Engine cutback thrust-setting [-]
:type TS_cutback: float
:param z_cutback: z-location of cutback [m]
:type z_cutback: float
:param theta_flaps: Flap deflection angles [deg]
:type theta_flaps: float
:param theta_slats: Slat deflection angles [deg]
:type theta_slats: float
:param max_iter: Maximum number of iterations for trajectory computations [-]
:type max_iter: int
:param tol: Tolerance for trajectory computations [-]
:type tol: float
"""
self.case_name = case_name
self.language = language
self.pyNA_directory = pyNA_directory
self.engine_file_name = engine_file_name
self.trajectory_file_name = trajectory_file_name
self.output_directory_name = output_directory_name
self.output_file_name = output_file_name
self.ac_name = ac_name
self.ac_version = ac_version
self.save_results = save_results
self.fan_inlet = fan_inlet
self.fan_discharge = fan_discharge
self.core = core
self.jet_mixing = jet_mixing
self.jet_shock = jet_shock
self.airframe = airframe
self.all_sources = all_sources
self.observer_lst = observer_lst
self.x_observer_array = x_observer_array
self.method_core_turb = method_core_turb
self.fan_BB_method = fan_BB_method
self.fan_RS_method = fan_RS_method
self.fan_igv = fan_igv
self.fan_id = fan_id
self.ge_flight_cleanup = ge_flight_cleanup
self.levels_int_metric = levels_int_metric
self.engine_mounting = engine_mounting
self.direct_propagation = direct_propagation
self.absorption = absorption
self.groundeffects = groundeffects
self.lateral_attenuation = lateral_attenuation
self.suppression = suppression
self.fan_liner_suppression = fan_liner_suppression
self.shielding = shielding
self.hsr_calibration = hsr_calibration
self.validation = validation
self.bandshare = bandshare
self.TCF800 = TCF800
self.combination_tones = combination_tones
self.N_shock = N_shock
self.dT = dT
self.sigma = sigma
self.a_coh = a_coh
self.N_f = N_f
self.N_b = N_b
self.n_altitude_absorption = n_altitude_absorption
self.A_e = A_e
self.dt_epnl = dt_epnl
self.n_harmonics = n_harmonics
self.r_0 = r_0
self.p_ref = p_ref
self.noise_optimization = noise_optimization
self.noise_constraint_lateral = noise_constraint_lateral
self.PTCB = PTCB
self.PHLD = PHLD
self.PKROT = PKROT
self.TS_to = TS_to
self.TS_vnrs = TS_vnrs
self.TS_cutback = TS_cutback
self.z_cutback = z_cutback
self.theta_flaps = theta_flaps
self.theta_slats = theta_slats
self.n_order = n_order
self.max_iter = max_iter
self.tol = tol
# Freeze self.settings
self._freeze()
def check(self) -> None:
"""
Check the pyNA settings before a run.
:return: None
"""
# pyNA directory
if type(self.pyNA_directory) != str:
raise TypeError(self.pyNA_directory, "is not a valid directory location. Specify the name (string).")
# Folder and file names
if type(self.case_name) != str:
raise TypeError(self.case_name, "does not have the correct type for the engine file name. type(settings.case_name) must be str.")
if type(self.engine_file_name) != str:
raise TypeError(self.engine_file_name, "does not have the correct type for the engine file name. type(settings.engine_file_name) must be str.")
if type(self.trajectory_file_name) != str:
raise TypeError(self.trajectory_file_name, "does not have the correct type for the trajectory file name. type(settings.trajectory_file_name) must be str.")
if type(self.output_directory_name) != str:
raise TypeError(self.output_directory_name, "does not have the correct type for the output file name. type(settings.output_directory_name) must be str.")
if type(self.output_file_name) != str:
raise TypeError(self.output_file_name, "does not have the correct type for the output file name. type(settings.output_file_name) must be str.")
if self.ac_name not in ['stca', 'a10']:
raise TypeError(self.ac_name, "is not a valid aircraft name. Specify: 'stca', 'stca_verification', 'a10'.")
if type(self.ac_version) != str:
raise TypeError(self.ac_version, "does not have the correct type for the aircraft version. type (self.ac_version) must be str.")
# Flags
if type(self.save_results) != bool:
raise TypeError(self.save_results, "does not have the correct type. type(settings.save_results) must be bool.")
if type(self.fan_inlet) != bool:
raise TypeError(self.fan_inlet, "does not have the correct type. type(settings.fan_inlet) must be bool.")
if type(self.fan_discharge) != bool:
raise TypeError(self.fan_discharge, "does not have the correct type. type(settings.fan_discharge) must be bool.")
if type(self.core) != bool:
raise TypeError(self.core, "does not have the correct type. type(settings.core) must be bool.")
if type(self.jet_mixing) != bool:
raise TypeError(self.jet_mixing, "does not have the correct type. type(settings.jet_mixing) must be bool.")
if type(self.jet_shock) != bool:
raise TypeError(self.jet_shock, "does not have the correct type. type(settings.jet_shock) must be bool.")
if type(self.airframe) != bool:
raise TypeError(self.airframe, "does not have the correct type. type(settings.airframe) must be bool.")
if type(self.all_sources) != bool:
raise TypeError(self.all_sources, "does not have the correct type. type(settings.all_sources) must be bool.")
if type(self.fan_igv) != bool:
raise TypeError(self.fan_igv, "does not have the correct type. type(settings.fan_igv) must be bool.")
if type(self.fan_id) != bool:
raise TypeError(self.fan_id, "does not have the correct type. type(settings.fan_id) must be bool.")
if type(self.direct_propagation) != bool:
raise TypeError(self.direct_propagation, "does not have the correct type. type(settings.direct_propagation) must be bool.")
if type(self.absorption) != bool:
raise TypeError(self.absorption, "does not have the correct type. type(settings.absorption) must be bool.")
if type(self.groundeffects) != bool:
raise TypeError(self.groundeffects, "does not have the correct type. type(settings.groundeffects) must be bool.")
if type(self.lateral_attenuation) != bool:
raise TypeError(self.lateral_attenuation, "does not have the correct type. type(settingslateral_attenuation) must be bool.")
if type(self.suppression) != bool:
raise TypeError(self.suppression, "does not have the correct type. type(settings.suppression) must be bool.")
if type(self.fan_liner_suppression) != bool:
raise TypeError(self.fan_liner_suppression, "does not have the correct type. type(settings.fan_liner_suppression) must be bool.")
if type(self.shielding) != bool:
raise TypeError(self.shielding, "does not have the correct type. type(settings.shielding) must be bool.")
if type(self.hsr_calibration) != bool:
raise TypeError(self.hsr_calibration, "does not have the correct type. type(settings.hsr_calibration) must be bool.")
if type(self.validation) != bool:
raise TypeError(self.validation, "does not have the correct type. type(settings.validation) must be bool.")
if type(self.bandshare) != bool:
raise TypeError(self.bandshare, "does not have the correct type. type(settings.bandshare) must be bool.")
if type(self.TCF800) != bool:
raise TypeError(self.TCF800, "does not have the correct type. type(settings.TCF800) must be bool.")
if type(self.combination_tones) != bool:
raise TypeError(self.combination_tones, "does not have the correct type. type(settings.combination_tones) must be bool.")
if type(self.noise_optimization) != bool:
raise TypeError(self.noise_optimization, "does not have the correct type. type(settings.noise_optimization) must be bool.")
if type(self.noise_constraint_lateral) not in [float, np.float32, np.float64]:
raise TypeError(self.noise_constraint_lateral, "does not have the correct type. type(settings.noise_constraint_lateral) must be [float, np.float32, np.float64]")
if type(self.PTCB) != bool:
raise TypeError(self.PTCB, "does not have the correct type. type(settings.PTCB) must be bool.")
if type(self.PHLD) != bool:
raise TypeError(self.PHLD, "does not have the correct type. type(settings.PHLD) must be bool.")
if type(self.PKROT) != bool:
raise TypeError(self.PKROT, "does not have the correct type. type(settings.PKROT) must be bool.")
# Methods
if self.method_core_turb not in ['GE', 'PW']:
raise ValueError(self.method_core_turb, "is not a valid option for the core turbine attenuation method. Specify: 'GE'/'PW'.")
if self.fan_BB_method not in ['original', 'allied_signal', 'geae', 'kresja']:
raise ValueError(self.fan_BB_method, "is not a valid option for the fan broadband method. Specify: 'original'/'allied_signal'/'geae'/'kresja'.")
if self.fan_RS_method not in ['original', 'allied_signal', 'geae', 'kresja']:
raise ValueError(self.fan_RS_method, "is not a valid option for the fan rotor-stator interation method. Specify: 'original'/'allied_signal'/'geae'/'kresja'.")
if self.ge_flight_cleanup not in ['none', 'takeoff', 'approach']:
raise ValueError(self.ge_flight_cleanup, "is not a valid option for the GE flight clean-up effects method. Specify: 'none'/'takeoff'/'approach'.")
if self.levels_int_metric not in ['epnl', 'ipnlt', 'ioaspl', 'sel']:
raise ValueError(self.levels_int_metric, "is not a valid option for the integrated noise levels metric. Specify: 'epnl'/'ipnlt'/'ioaspl'/'sel'.")
if self.engine_mounting not in ['fuselage', 'underwing', 'none']:
raise ValueError(self.engine_mounting, "is not a valid option for the engine mounting description. Specify: 'fuselage', 'underwing', 'none'.")
# Values
if type(self.N_shock) not in [int, np.int32, np.int64]:
raise TypeError(self.N_shock, "does not have the correct type. type(settings.N_shock) must be [int, np.int32, np.int64]")
if type(self.dT) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.dT, "does not have the correct type. type(settings.dT) must be [float, np.float32, np.float64]")
if type(self.sigma) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.sigma, "does not have the correct type. type(settings.sigma) must be [float, np.float32, np.float64]")
if type(self.a_coh) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.a_coh, "does not have the correct type. type(settings.a_coh) must be [float, np.float32, np.float64]")
if type(self.N_f) not in [int, np.int32, np.int64]:
raise TypeError(self.N_f, "does not have the correct type. type(settings.N_f) must be [int, np.int32, np.int64]")
if type(self.N_b) not in [int, np.int32, np.int64]:
raise TypeError(self.N_b, "does not have the correct type. type(settings.N_b) must be [int, np.int32, np.int64]")
if np.remainder(self.N_b, 2) != 1:
raise ValueError("The number of 1/3rd octave frequency sub-bands needs to be odd.")
if type(self.n_altitude_absorption) not in [int, np.int32, np.int64]:
raise TypeError(self.n_altitude_absorption, "does not have the correct type. type(settings.n_altitude_absorption) must be [int, np.int32, np.int64]")
if type(self.n_harmonics) not in [int, np.int32, np.int64]:
raise TypeError(self.n_harmonics, "does not have the correct type. type(settings.n_harmonics) must be [int, np.int32, np.int64]")
if type(self.A_e) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.A_e, "does not have the correct type. type(settings.A_e) must be [float, np.float32, np.float64]")
if type(self.dt_epnl) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.dt_epnl, "does not have the correct type. type(settings.dt_epnl) must be [float, np.float32, np.float64]")
if type(self.r_0) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.r_0, "does not have the correct type. type(settings.r_0) must be [float, np.float32, np.float64]")
if type(self.p_ref) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.p_ref, "does not have the correct type. type(settings.p_ref) must be [float, np.float32, np.float64]")
if type(self.x_observer_array) != np.ndarray:
raise TypeError(self.x_observer_array, "does not have the correct type. type(settings.x_observer_array) must be np.ndarray")
if self.observer_lst in [('lateral', ), ('flyover', )] and np.shape(self.x_observer_array) != (1,3):
raise ValueError("Shape of the x_observer_array must be (1, 3); instead shape is ", np.shape(self.x_observer_array))
elif self.observer_lst in [('lateral', 'flyover', ), ('flyover', 'lateral')] and np.shape(self.x_observer_array) != (2,3):
raise ValueError("Shape of the x_observer_array must be (2, 3); instead shape is ", np.shape(self.x_observer_array))
# Trajectory options
if type(self.TS_to) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.TS_to, "does not have the correct type. type(settings.TS_to) must be in [float, np.float32, np.float64]")
if type(self.TS_vnrs) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.TS_vnrs, "does not have the correct type. type(settings.TS_vnrs) must be in [float, np.float32, np.float64]")
NoneType = type(None)
if type(self.TS_cutback) not in [NoneType, float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.TS_cutback, "does not have the correct type. type(settings.TS_cutback) must be in [float, np.float32, np.float64]")
if type(self.z_cutback) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.z_cutback, "does not have the correct type. type(settings.z_cutback) must be in [float, np.float32, np.float64]")
if type(self.theta_flaps) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.theta_flaps, "does not have the correct type. type(settings.theta_flaps) must be np.ndarray")
if type(self.theta_slats) not in [float, np.float32, np.float64, int, np.int32, np.int64]:
raise TypeError(self.theta_slats, "does not have the correct type. type(settings.theta_slats) must be np.ndarray")
if type(self.n_order) not in [int, np.int32, np.int64]:
raise TypeError(self.n_order, "does not have the correct type. type(settings.n_order) must be in [int, np.int32, np.int64]")
if type(self.max_iter) not in [int, np.int32, np.int64]:
raise TypeError(self.max_iter, "does not have the correct type. type(settings.max_iter) must be in [int, np.int32, np.int64]")
if type(self.tol) not in [float, np.float32, np.float64]:
raise TypeError(self.tol, "does not have the correct type. type(settings.tol) must be in [float, np.float32, np.float64]")
# Observer list
for observer in self.observer_lst:
if observer not in ['lateral','flyover', 'approach', 'contours', 'optimization']:
raise ValueError(observer, "is not a valid option for the observer list. Specify any from 'lateral'/'flyover'/'approach'/'contours'/'optimization'")
# Language to use to solve components (julia/python)
if self.language not in ['python', 'julia']:
raise ValueError("Invalid environment variable pyna_language. Specify 'python'/'julia'.")
# Set all noise components equal to True if settings.all_sources == True
if self.all_sources:
self.fan_inlet = True
self.fan_discharge = True
self.core = True
self.jet_mixing = True
self.jet_shock = True
self.airframe = True
# Set lateral and flyover observer locations for nasa_stca_standard trajectory
if self.case_name in ['nasa_stca_standard', 'stca_enginedesign_standard']:
if self.observer_lst == 'lateral':
self.x_observer_array = | np.array([[3756.66, 450., 1.2192]]) | numpy.array |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
import warnings
from math import ceil
from math import cos
from math import sin
from math import tan
from math import pi
from warnings import warn
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
"""
Created on March 25, 2013
@author: geoffroy
"""
class HighSymmKpath:
"""
This class looks for path along high symmetry lines in
the Brillouin Zone.
It is based on <NAME>., & <NAME>. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
It should be used with primitive structures that
comply with the definition from the paper.
The symmetry is determined by spglib through the
SpacegroupAnalyzer class. The analyzer can be used to
produce the correct primitive structure (method
get_primitive_standard_structure(international_monoclinic=False)).
A warning will signal possible compatibility problems
with the given structure.
Args:
structure (Structure): Structure object
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to compare the input
structure with the one expected as primitive standard.
A warning will be issued if the lattices don't match.
"""
def __init__(self, structure, symprec=0.01, angle_tolerance=5, atol=1e-8):
self._structure = structure
self._sym = SpacegroupAnalyzer(structure, symprec=symprec,
angle_tolerance=angle_tolerance)
self._prim = self._sym \
.get_primitive_standard_structure(international_monoclinic=False)
self._conv = self._sym.get_conventional_standard_structure(international_monoclinic=False)
self._prim_rec = self._prim.lattice.reciprocal_lattice
self._kpath = None
# Note: this warning will be issued for space groups 38-41, since the primitive cell must be
# reformatted to match Setyawan/Curtarolo convention in order to work with the current k-path
# generation scheme.
if not np.allclose(self._structure.lattice.matrix, self._prim.lattice.matrix, atol=atol):
warnings.warn("The input structure does not match the expected standard primitive! "
"The path can be incorrect. Use at your own risk.")
lattice_type = self._sym.get_lattice_type()
spg_symbol = self._sym.get_space_group_symbol()
if lattice_type == "cubic":
if "P" in spg_symbol:
self._kpath = self.cubic()
elif "F" in spg_symbol:
self._kpath = self.fcc()
elif "I" in spg_symbol:
self._kpath = self.bcc()
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "tetragonal":
if "P" in spg_symbol:
self._kpath = self.tet()
elif "I" in spg_symbol:
a = self._conv.lattice.abc[0]
c = self._conv.lattice.abc[2]
if c < a:
self._kpath = self.bctet1(c, a)
else:
self._kpath = self.bctet2(c, a)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "orthorhombic":
a = self._conv.lattice.abc[0]
b = self._conv.lattice.abc[1]
c = self._conv.lattice.abc[2]
if "P" in spg_symbol:
self._kpath = self.orc()
elif "F" in spg_symbol:
if 1 / a ** 2 > 1 / b ** 2 + 1 / c ** 2:
self._kpath = self.orcf1(a, b, c)
elif 1 / a ** 2 < 1 / b ** 2 + 1 / c ** 2:
self._kpath = self.orcf2(a, b, c)
else:
self._kpath = self.orcf3(a, b, c)
elif "I" in spg_symbol:
self._kpath = self.orci(a, b, c)
elif "C" in spg_symbol or "A" in spg_symbol:
self._kpath = self.orcc(a, b, c)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "hexagonal":
self._kpath = self.hex()
elif lattice_type == "rhombohedral":
alpha = self._prim.lattice.lengths_and_angles[1][0]
if alpha < 90:
self._kpath = self.rhl1(alpha * pi / 180)
else:
self._kpath = self.rhl2(alpha * pi / 180)
elif lattice_type == "monoclinic":
a, b, c = self._conv.lattice.abc
alpha = self._conv.lattice.lengths_and_angles[1][0]
# beta = self._conv.lattice.lengths_and_angles[1][1]
if "P" in spg_symbol:
self._kpath = self.mcl(b, c, alpha * pi / 180)
elif "C" in spg_symbol:
kgamma = self._prim_rec.lengths_and_angles[1][2]
if kgamma > 90:
self._kpath = self.mclc1(a, b, c, alpha * pi / 180)
if kgamma == 90:
self._kpath = self.mclc2(a, b, c, alpha * pi / 180)
if kgamma < 90:
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha * pi / 180) ** 2 / a ** 2 < 1:
self._kpath = self.mclc3(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha * pi / 180) ** 2 / a ** 2 == 1:
self._kpath = self.mclc4(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha * pi / 180) ** 2 / a ** 2 > 1:
self._kpath = self.mclc5(a, b, c, alpha * pi / 180)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "triclinic":
kalpha = self._prim_rec.lengths_and_angles[1][0]
kbeta = self._prim_rec.lengths_and_angles[1][1]
kgamma = self._prim_rec.lengths_and_angles[1][2]
if kalpha > 90 and kbeta > 90 and kgamma > 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma < 90:
self._kpath = self.trib()
if kalpha > 90 and kbeta > 90 and kgamma == 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma == 90:
self._kpath = self.trib()
else:
warn("Unknown lattice type %s" % lattice_type)
@property
def structure(self):
"""
Returns:
The standardized primitive structure
"""
return self._prim
@property
def conventional(self):
"""
Returns:
The conventional cell structure
"""
return self._conv
@property
def prim(self):
"""
Returns:
The primitive cell structure
"""
return self._prim
@property
def prim_rec(self):
"""
Returns:
The primitive reciprocal cell structure
"""
return self._prim_rec
@property
def kpath(self):
"""
Returns:
The symmetry line path in reciprocal space
"""
return self._kpath
def get_kpoints(self, line_density=20, coords_are_cartesian=True):
"""
Returns:
the kpoints along the paths in cartesian coordinates
together with the labels for symmetry points -Wei
"""
list_k_points = []
sym_point_labels = []
for b in self.kpath['path']:
for i in range(1, len(b)):
start = np.array(self.kpath['kpoints'][b[i - 1]])
end = np.array(self.kpath['kpoints'][b[i]])
distance = np.linalg.norm(
self._prim_rec.get_cartesian_coords(start) -
self._prim_rec.get_cartesian_coords(end))
nb = int(ceil(distance * line_density))
sym_point_labels.extend([b[i - 1]] + [''] * (nb - 1) + [b[i]])
list_k_points.extend(
[self._prim_rec.get_cartesian_coords(start)
+ float(i) / float(nb) *
(self._prim_rec.get_cartesian_coords(end)
- self._prim_rec.get_cartesian_coords(start))
for i in range(0, nb + 1)])
if coords_are_cartesian:
return list_k_points, sym_point_labels
else:
frac_k_points = [self._prim_rec.get_fractional_coords(k)
for k in list_k_points]
return frac_k_points, sym_point_labels
def cubic(self):
self.name = "CUB"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'X': np.array([0.0, 0.5, 0.0]),
'R': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "X", "M", "\\Gamma", "R", "X"], ["M", "R"]]
return {'kpoints': kpoints, 'path': path}
def fcc(self):
self.name = "FCC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'K': np.array([3.0 / 8.0, 3.0 / 8.0, 3.0 / 4.0]),
'L': np.array([0.5, 0.5, 0.5]),
'U': np.array([5.0 / 8.0, 1.0 / 4.0, 5.0 / 8.0]),
'W': np.array([0.5, 1.0 / 4.0, 3.0 / 4.0]),
'X': np.array([0.5, 0.0, 0.5])}
path = [["\\Gamma", "X", "W", "K",
"\\Gamma", "L", "U", "W", "L", "K"], ["U", "X"]]
return {'kpoints': kpoints, 'path': path}
def bcc(self):
self.name = "BCC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'H': np.array([0.5, -0.5, 0.5]),
'P': np.array([0.25, 0.25, 0.25]),
'N': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "H", "N", "\\Gamma", "P", "H"], ["P", "N"]]
return {'kpoints': kpoints, 'path': path}
def tet(self):
self.name = "TET"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.5, 0.0]),
'R': np.array([0.0, 0.5, 0.5]),
'X': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "X", "M", "\\Gamma", "Z", "R", "A", "Z"], ["X", "R"],
["M", "A"]]
return {'kpoints': kpoints, 'path': path}
def bctet1(self, c, a):
self.name = "BCT1"
eta = (1 + c ** 2 / a ** 2) / 4.0
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'M': np.array([-0.5, 0.5, 0.5]),
'N': np.array([0.0, 0.5, 0.0]),
'P': np.array([0.25, 0.25, 0.25]),
'X': np.array([0.0, 0.0, 0.5]),
'Z': np.array([eta, eta, -eta]),
'Z_1': np.array([-eta, 1 - eta, eta])}
path = [["\\Gamma", "X", "M", "\\Gamma", "Z", "P", "N", "Z_1", "M"],
["X", "P"]]
return {'kpoints': kpoints, 'path': path}
def bctet2(self, c, a):
self.name = "BCT2"
eta = (1 + a ** 2 / c ** 2) / 4.0
zeta = a ** 2 / (2 * c ** 2)
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.0, 0.5, 0.0]),
'P': np.array([0.25, 0.25, 0.25]),
'\\Sigma': np.array([-eta, eta, eta]),
'\\Sigma_1': np.array([eta, 1 - eta, -eta]),
'X': np.array([0.0, 0.0, 0.5]),
'Y': np.array([-zeta, zeta, 0.5]),
'Y_1': np.array([0.5, 0.5, -zeta]),
'Z': np.array([0.5, 0.5, -0.5])}
path = [["\\Gamma", "X", "Y", "\\Sigma", "\\Gamma", "Z",
"\\Sigma_1", "N", "P", "Y_1", "Z"], ["X", "P"]]
return {'kpoints': kpoints, 'path': path}
def orc(self):
self.name = "ORC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'R': np.array([0.5, 0.5, 0.5]),
'S': np.array([0.5, 0.5, 0.0]),
'T': np.array([0.0, 0.5, 0.5]),
'U': np.array([0.5, 0.0, 0.5]),
'X': np.array([0.5, 0.0, 0.0]),
'Y': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "X", "S", "Y", "\\Gamma",
"Z", "U", "R", "T", "Z"], ["Y", "T"], ["U", "X"], ["S", "R"]]
return {'kpoints': kpoints, 'path': path}
def orcf1(self, a, b, c):
self.name = "ORCF1"
zeta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
eta = (1 + a ** 2 / b ** 2 + a ** 2 / c ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5 + zeta, zeta]),
'A_1': np.array([0.5, 0.5 - zeta, 1 - zeta]),
'L': np.array([0.5, 0.5, 0.5]),
'T': np.array([1, 0.5, 0.5]),
'X': | np.array([0.0, eta, eta]) | numpy.array |
# implementing policy gradient using keras
import numpy as np
import matplotlib.pyplot as plt
import gym
from tqdm import trange
import keras.layers as layers
from keras.models import Model
from keras.optimizers import Adam
import keras.backend as K
from keras.initializers import glorot_uniform
def get_policy_model(env, hidden_layer_neurons, lr):
num_actions = env.action_space.n
inp = layers.Input(shape=[1], name='input_x')
adv = layers.Input(shape=[1], name="advantages")
x = layers.Dense(hidden_layer_neurons,
activation='relu',
use_bias=False,
kernel_initializer=glorot_uniform(seed=42),
name="dense_1")(inp)
out = layers.Dense(num_actions,
activation='softmax',
kernel_initializer=glorot_uniform(seed=42),
use_bias=False,
name="out")(x)
def custom_loss(y_true, y_pred):
log_lik = K.log(y_true * (y_true - y_pred) + (1 - y_true) * (y_true + y_pred))
return K.mean(log_lik * adv, keepdims=True)
model_train = Model(inputs=[inp, adv], outputs=out)
model_train.compile(loss=custom_loss, optimizer=Adam(lr))
model_predict = Model(inputs=[inp], outputs=out)
return model_train, model_predict
def discount_rewards(r, gamma=0.99):
prior = 0
out = []
for val in r:
new_val = val + prior * gamma
out.append(new_val)
prior = new_val
return np.array(out[::-1])
def score_model(model, num_tests, render=False):
scores = []
for _ in range(num_tests):
observation = env.reset()
reward_sum = 0
while True:
if render:
env.render()
state = np.reshape(observation, [1, dimen])
predict = model.predict([state])[0]
action = np.argmax(predict)
observation, reward, done, _ = env.step(action)
reward_sum += reward
if done:
break
scores.append(reward_sum)
env.close()
return np.mean(scores)
def policy_gradient_nn(env,num_games=100):
model_train, model_predict = get_policy_model(env, hidden_layer_neurons, lr)
reward = 0
reward_sum = 0
num_actions = env.action_space.n
# Placeholders for our observations, outputs and rewards
states = np.empty(0).reshape(0,dimen)
actions = | np.empty(0) | numpy.empty |
'''
desisim.quickcat
================
Code for quickly generating an output zcatalog given fiber assignment tiles,
a truth catalog, and optionally a previous zcatalog.
'''
from __future__ import absolute_import, division, print_function
import os
import yaml
from collections import Counter
from pkg_resources import resource_filename
from time import asctime
import numpy as np
from astropy.io import fits
from astropy.table import Table, Column, vstack
import sys
import scipy.special as sp
import desisim
from desisim.targets import get_simtype
import astropy.constants
c = astropy.constants.c.to('km/s').value
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask
from desiutil.log import get_logger
log = get_logger()
#- redshift errors, zwarn, cata fail rate fractions from
#- /project/projectdirs/desi/datachallenge/redwood/spectro/redux/redwood/
#- sigmav = c sigmaz / (1+z)
_sigma_v = {
# 'ELG': 38.03,
# 'LRG': 67.38,
'BGS': 37.70,
# 'QSO': 182.16,
'STAR': 51.51,
'WD':54.35,
'SKY': 9999, #- meaningless
'UNKNOWN': 9999, #- meaningless
}
_zwarn_fraction = {
# 'ELG': 0.087,
# 'LRG': 0.007,
# 'QSO': 0.020,
'BGS': 0.024,
'STAR': 0.345,
'WD':0.094,
'SKY': 1.0,
'UNKNOWN': 1.0,
}
_cata_fail_fraction = {
# 'ELG': 0.020,
# 'LRG': 0.002,
# 'QSO': 0.012,
'BGS': 0.003,
'STAR': 0.050,
'WD':0.0,
'SKY': 0.,
'UNKNOWN': 0.,
}
def get_zeff_obs(simtype, obsconditions):
'''
'''
if(simtype=='LRG'):
p_v = [1.0, 0.15, -0.5]
p_w = [1.0, 0.4, 0.0]
p_x = [1.0, 0.06, 0.05]
p_y = [1.0, 0.0, 0.08]
p_z = [1.0, 0.0, 0.0]
sigma_r = 0.02
elif(simtype=='QSO'):
p_v = [1.0, -0.2, 0.3]
p_w = [1.0, -0.5, 0.6]
p_x = [1.0, -0.1, -0.075]
p_y = [1.0, -0.08, -0.04]
p_z = [1.0, 0.0, 0.0]
sigma_r = 0.05
elif(simtype=='ELG'):
p_v = [1.0, -0.1, -0.2]
p_w = [1.0, 0.25, -0.75]
p_x = [1.0, 0.0, 0.05]
p_y = [1.0, 0.2, 0.1]
p_z = [1.0, -10.0, 300.0]
sigma_r = 0.075
else:
log.warning('No model for how observing conditions impact {} redshift efficiency'.format(simtype))
return np.ones(len(obsconditions))
ncond = len(np.atleast_1d(obsconditions['AIRMASS']))
# airmass
v = obsconditions['AIRMASS'] - np.mean(obsconditions['AIRMASS'])
pv = p_v[0] + p_v[1] * v + p_v[2] * (v**2. - np.mean(v**2))
# ebmv
if 'EBMV' in obsconditions :
w = obsconditions['EBMV'] - np.mean(obsconditions['EBMV'])
pw = p_w[0] + p_w[1] * w + p_w[2] * (w**2 - np.mean(w**2))
else :
pw = np.ones(ncond)
# seeing
x = obsconditions['SEEING'] - np.mean(obsconditions['SEEING'])
px = p_x[0] + p_x[1]*x + p_x[2] * (x**2 - np.mean(x**2))
# transparency
if 'LINTRANS' in obsconditions :
y = obsconditions['LINTRANS'] - np.mean(obsconditions['LINTRANS'])
py = p_y[0] + p_y[1]*y + p_y[2] * (y**2 - np.mean(y**2))
else :
py = np.ones(ncond)
# moon illumination fraction
z = obsconditions['MOONFRAC'] - np.mean(obsconditions['MOONFRAC'])
pz = p_z[0] + p_z[1]*z + p_z[2] * (z**2 - np.mean(z**2))
#- if moon is down phase doesn't matter
pz = np.ones(ncond)
pz[obsconditions['MOONALT'] < 0] = 1.0
pr = 1.0 + np.random.normal(size=ncond, scale=sigma_r)
#- this correction factor can be greater than 1, but not less than 0
pobs = (pv * pw * px * py * pz * pr).clip(min=0.0)
return pobs
def get_redshift_efficiency(simtype, targets, truth, targets_in_tile, obsconditions, params, ignore_obscondition=False):
"""
Simple model to get the redshift effiency from the observational conditions or observed magnitudes+redshuft
Args:
simtype: ELG, LRG, QSO, MWS, BGS
targets: target catalog table; currently used only for TARGETID
truth: truth table with OIIFLUX, TRUEZ
targets_in_tile: dictionary. Keys correspond to tileids, its values are the
arrays of targetids observed in that tile.
obsconditions: table observing conditions with columns
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
parameter_filename: yaml file with quickcat parameters
ignore_obscondition: if True, no variation of efficiency with obs. conditions (adjustment of exposure time should correct for mean change of S/N)
Returns:
tuple of arrays (observed, p) both with same length as targets
observed: boolean array of whether the target was observed in these tiles
p: probability to get this redshift right
"""
targetid = targets['TARGETID']
n = len(targetid)
try:
if 'DECAM_FLUX' in targets.dtype.names :
true_gflux = targets['DECAM_FLUX'][:, 1]
true_rflux = targets['DECAM_FLUX'][:, 2]
else:
true_gflux = targets['FLUX_G']
true_rflux = targets['FLUX_R']
except:
raise Exception('Missing photometry needed to estimate redshift efficiency!')
a_small_flux=1e-40
true_gflux[true_gflux<a_small_flux]=a_small_flux
true_rflux[true_rflux<a_small_flux]=a_small_flux
if (obsconditions is None) or ('OIIFLUX' not in truth.dtype.names):
raise Exception('Missing obsconditions and flux information to estimate redshift efficiency')
if (simtype == 'ELG'):
# Read the model OII flux threshold (FDR fig 7.12 modified to fit redmonster efficiency on OAK)
# filename = resource_filename('desisim', 'data/quickcat_elg_oii_flux_threshold.txt')
# Read the model OII flux threshold (FDR fig 7.12)
filename = resource_filename('desisim', 'data/elg_oii_flux_threshold_fdr.txt')
fdr_z, modified_fdr_oii_flux_threshold = np.loadtxt(filename, unpack=True)
# Compute OII flux thresholds for truez
oii_flux_limit = np.interp(truth['TRUEZ'],fdr_z,modified_fdr_oii_flux_threshold)
oii_flux_limit[oii_flux_limit<1e-20]=1e-20
# efficiency is modeled as a function of flux_OII/f_OII_threshold(z) and an arbitrary sigma_fudge
snr_in_lines = params["ELG"]["EFFICIENCY"]["SNR_LINES_SCALE"]*7*truth['OIIFLUX']/oii_flux_limit
snr_in_continuum = params["ELG"]["EFFICIENCY"]["SNR_CONTINUUM_SCALE"]*true_rflux
snr_tot = np.sqrt(snr_in_lines**2+snr_in_continuum**2)
sigma_fudge = params["ELG"]["EFFICIENCY"]["SIGMA_FUDGE"]
nsigma = 3.
simulated_eff = eff_model(snr_tot,nsigma,sigma_fudge)
elif(simtype == 'LRG'):
r_mag = 22.5 - 2.5*np.log10(true_rflux)
sigmoid_cutoff = params["LRG"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LRG"]["EFFICIENCY"]["SIGMOID_FUDGE"]
simulated_eff = 1./(1.+np.exp((r_mag-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format(simtype,sigmoid_cutoff,sigmoid_fudge))
elif(simtype == 'QSO'):
zsplit = params['QSO_ZSPLIT']
r_mag = 22.5 - 2.5*np.log10(true_rflux)
simulated_eff = np.ones(r_mag.shape)
# lowz tracer qsos
sigmoid_cutoff = params["LOWZ_QSO"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LOWZ_QSO"]["EFFICIENCY"]["SIGMOID_FUDGE"]
ii=(truth['TRUEZ']<=zsplit)
simulated_eff[ii] = 1./(1.+np.exp((r_mag[ii]-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format("LOWZ QSO",sigmoid_cutoff,sigmoid_fudge))
# highz lya qsos
sigmoid_cutoff = params["LYA_QSO"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LYA_QSO"]["EFFICIENCY"]["SIGMOID_FUDGE"]
ii=(truth['TRUEZ']>zsplit)
simulated_eff[ii] = 1./(1.+np.exp((r_mag[ii]-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format("LYA QSO",sigmoid_cutoff,sigmoid_fudge))
elif simtype == 'BGS':
simulated_eff = 0.98 * np.ones(n)
elif simtype == 'MWS':
simulated_eff = 0.98 * np.ones(n)
else:
default_zeff = 0.98
log.warning('using default redshift efficiency of {} for {}'.format(default_zeff, simtype))
simulated_eff = default_zeff * np.ones(n)
#- Get the corrections for observing conditions per tile, then
#- correct targets on those tiles. Parameterize in terms of failure
#- rate instead of success rate to handle bookkeeping of targets that
#- are observed on more than one tile.
#- NOTE: this still isn't quite right since multiple observations will
#- be simultaneously fit instead of just taking whichever individual one
#- succeeds.
if ignore_obscondition :
ncond = len(np.atleast_1d(obsconditions['AIRMASS']))
zeff_obs = np.ones(ncond)
else :
zeff_obs = get_zeff_obs(simtype, obsconditions)
pfail = np.ones(n)
observed = np.zeros(n, dtype=bool)
# More efficient alternative for large numbers of tiles + large target
# list, but requires pre-computing the sort order of targetids.
# Assume targets['TARGETID'] is unique, so not checking this.
sort_targetid = np.argsort(targetid)
# Extract the targets-per-tile lists into one huge list.
concat_targets_in_tile = np.concatenate([targets_in_tile[tileid] for tileid in obsconditions['TILEID']])
ntargets_per_tile = np.array([len(targets_in_tile[tileid]) for tileid in obsconditions['TILEID']])
# Match entries in each tile list against sorted target list.
target_idx = targetid[sort_targetid].searchsorted(concat_targets_in_tile,side='left')
target_idx_r = targetid[sort_targetid].searchsorted(concat_targets_in_tile,side='right')
del(concat_targets_in_tile)
# Flag targets in tiles that do not appear in the target list (sky,
# standards).
not_matched = target_idx_r - target_idx == 0
target_idx[not_matched] = -1
del(target_idx_r,not_matched)
# Not every tile has 5000 targets, so use individual counts to
# construct offset of each tile in target_idx.
offset = np.concatenate([[0],np.cumsum(ntargets_per_tile[:-1])])
# For each tile, process targets.
for i, tileid in enumerate(obsconditions['TILEID']):
if ntargets_per_tile[i] > 0:
# Quickly get all the matched targets on this tile.
targets_this_tile = target_idx[offset[i]:offset[i]+ntargets_per_tile[i]]
targets_this_tile = targets_this_tile[targets_this_tile > 0]
# List of indices into sorted target list for each observed
# source.
ii = sort_targetid[targets_this_tile]
tmp = (simulated_eff[ii]*zeff_obs[i]).clip(0, 1)
pfail[ii] *= (1-tmp)
observed[ii] = True
simulated_eff = (1-pfail)
return observed, simulated_eff
# Efficiency model
def eff_model(x, nsigma, sigma, max_efficiency=1):
return 0.5*max_efficiency*(1.+sp.erf((x-nsigma)/(np.sqrt(2.)*sigma)))
def reverse_dictionary(a):
"""Inverts a dictionary mapping.
Args:
a: input dictionary.
Returns:
b: output reversed dictionary.
"""
b = {}
for i in a.items():
try:
for k in i[1]:
if k not in b.keys():
b[k] = [i[0]]
else:
b[k].append(i[0])
except:
k = i[1]
if k not in b.keys():
b[k] = [i[0]]
else:
b[k].append(i[0])
return b
def get_observed_redshifts(targets, truth, targets_in_tile, obsconditions, parameter_filename=None, ignore_obscondition=False):
"""
Returns observed z, zerr, zwarn arrays given true object types and redshifts
Args:
targets: target catalog table; currently used only for target mask bits
truth: truth table with OIIFLUX, TRUEZ
targets_in_tile: dictionary. Keys correspond to tileids, its values are the
arrays of targetids observed in that tile.
obsconditions: table observing conditions with columns
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
parameter_filename: yaml file with quickcat parameters
ignore_obscondition: if True, no variation of efficiency with obs. conditions (adjustment of exposure time should correct for mean change of S/N)
Returns:
tuple of (zout, zerr, zwarn)
"""
if parameter_filename is None :
# Load efficiency parameters yaml file
parameter_filename = resource_filename('desisim', 'data/quickcat.yaml')
params=None
with open(parameter_filename,"r") as file :
params = yaml.safe_load(file)
simtype = get_simtype(np.char.strip(truth['TRUESPECTYPE']), targets['DESI_TARGET'], targets['BGS_TARGET'], targets['MWS_TARGET'])
#simtype = get_simtype(np.char.strip(truth['TEMPLATETYPE']), targets['DESI_TARGET'], targets['BGS_TARGET'], targets['MWS_TARGET'])
truez = truth['TRUEZ']
targetid = truth['TARGETID']
try:
if 'DECAM_FLUX' in targets.dtype.names :
true_gflux = targets['DECAM_FLUX'][:, 1]
true_rflux = targets['DECAM_FLUX'][:, 2]
else:
true_gflux = targets['FLUX_G']
true_rflux = targets['FLUX_R']
except:
raise Exception('Missing photometry needed to estimate redshift efficiency!')
a_small_flux=1e-40
true_gflux[true_gflux<a_small_flux]=a_small_flux
true_rflux[true_rflux<a_small_flux]=a_small_flux
zout = truez.copy()
zerr = np.zeros(len(truez), dtype=np.float32)
zwarn = np.zeros(len(truez), dtype=np.int32)
objtypes = list(set(simtype))
n_tiles = len(np.unique(obsconditions['TILEID']))
if(n_tiles!=len(targets_in_tile)):
raise ValueError('Number of obsconditions {} != len(targets_in_tile) {}'.format(n_tiles, len(targets_in_tile)))
for objtype in objtypes:
ii=(simtype==objtype)
###################################
# redshift errors
###################################
if objtype =='ELG' :
sigma = params["ELG"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["ELG"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
oiiflux = truth['OIIFLUX'][ii]*1e17
zerr[ii] = sigma/(1.e-9+oiiflux**powerlawindex)*(1.+truez[ii])
zout[ii] += np.random.normal(scale=zerr[ii])
log.info("ELG sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[ii])))
elif objtype == 'LRG' :
sigma = params["LRG"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LRG"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
zerr[ii] = sigma/(1.e-9+true_rflux[ii]**powerlawindex)*(1.+truez[ii])
zout[ii] += np.random.normal(scale=zerr[ii])
log.info("LRG sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[ii])))
elif objtype == 'QSO' :
zsplit = params['QSO_ZSPLIT']
sigma = params["LOWZ_QSO"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LOWZ_QSO"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
jj=ii&(truth['TRUEZ']<=zsplit)
zerr[jj] = sigma/(1.e-9+(true_rflux[jj])**powerlawindex)*(1.+truez[jj])
log.info("LOWZ QSO sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[jj])))
sigma = params["LYA_QSO"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LYA_QSO"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
jj=ii&(truth['TRUEZ']>zsplit)
zerr[jj] = sigma/(1.e-9+(true_rflux[jj])**powerlawindex)*(1.+truez[jj])
log.info("LYA QSO sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[jj])))
zout[ii] += np.random.normal(scale=zerr[ii])
elif objtype in _sigma_v.keys() :
log.info("{} use constant sigmav = {} km/s".format(objtype,_sigma_v[objtype]))
ii = (simtype == objtype)
zerr[ii] = _sigma_v[objtype] * (1+truez[ii]) / c
zout[ii] += np.random.normal(scale=zerr[ii])
else :
log.info("{} no redshift error model, will use truth")
###################################
# redshift efficiencies
###################################
# Set ZWARN flags for some targets
# the redshift efficiency only sets warning, but does not impact
# the redshift value and its error.
was_observed, goodz_prob = get_redshift_efficiency(
objtype, targets[ii], truth[ii], targets_in_tile,
obsconditions=obsconditions,params=params,
ignore_obscondition=ignore_obscondition)
n=np.sum(ii)
assert len(was_observed) == n
assert len(goodz_prob) == n
r = np.random.random(len(was_observed))
zwarn[ii] = 4 * (r > goodz_prob) * was_observed
###################################
# catastrophic failures
###################################
zlim=[0.,3.5]
cata_fail_fraction = | np.zeros(n) | numpy.zeros |
"""Action selector implementations.
Action selectors are objects that when called return a desired
action. These actions may be stochastically chosen (e.g. randomly chosen
from a list of candidates) depending on the choice of `ActionSelector`
implementation, and how it is configured.
Examples include the following
* `DeterministicActionSelector`: always returns the same (specified) action
* `UniformDiscreteActionSelector`: selections an action uniformly at random
from a specified discrete action space
* `NoisyActionSelector`: uses either a "preferred" action selector (with
probability `1 - epsilon`) or a "noise" action selector (with probability
`epsilon`) to determine the action. Useful, for example, to implement an
epsilon-greedy agent.
"""
from abc import ABC, abstractmethod
from numpy.typing import ArrayLike
import numpy as np
class ActionSelector(ABC):
@abstractmethod
def __call__(self):
"""Returns selected action."""
pass
class DeterministicActionSelector(ActionSelector):
"""Deterministic action selector.
Always returns the specified action when called.
Args:
chosen_action: action to return when called.
"""
def __init__(self, chosen_action):
self.chosen_action = chosen_action
def __call__(self):
return self.chosen_action
class UniformDiscreteActionSelector(ActionSelector):
"""Uniform discrete action selector.
Picks an action from a discrete action space (zero-indexed) of
size `n_actions` uniformly at random.
Args:
n_actions: number of actions to choose from
random_state: `None`, `int`, `np.random.Generator`, etc for initialising
the random number generator.
"""
def __init__(self, n_actions: int, *, random_state=None):
self.n_actions = n_actions
self._rng = | np.random.default_rng(random_state) | numpy.random.default_rng |
#! /usr/bin/env python3
"""
creates an (d2, w1, w2) "movie" scan
"""
# --- import --------------------------------------------------------------------------------------
import os
import sys
import time
import numpy as np
from matplotlib import pyplot as plt
import WrightTools as wt
import WrightSim as ws
# --- define --------------------------------------------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
dt = 50. # pulse duration (fs)
slitwidth = 120. # mono resolution (wn)
nw = 256 # number of frequency points (w1 and w2)
nt = 256 # number of delay points (d2)
# --- workspace -----------------------------------------------------------------------------------
# create experiment
exp = ws.experiment.builtin('trive')
exp.w1.points = | np.linspace(-2.5, 2.5, nw) | numpy.linspace |
# -*- coding: utf-8 -*-
from numpy.random import random
from numpy.linalg import norm
from numpy import zeros
from numpy import reshape
from numpy import column_stack
from numpy import logical_not
from numpy import logical_and
from numpy import array
from numpy import pi as PI
from numpy import cos
from numpy import sin
TWOPI = 2.0*PI
def unit_vec(num, scale):
from numpy.random import normal
rnd = normal(size=(num, 3))
d = norm(rnd, axis=1)
rnd[:] /= reshape(d, (num, 1))
return rnd*scale
def in_circle(n, xx, yy, rr):
"""
get n random points in a circle.
"""
rnd = random(size=(n, 3))
t = TWOPI * rnd[:, 0]
u = rnd[:, 1:].sum(axis=1)
r = zeros(n, 'float')
mask = u > 1.
xmask = logical_not(mask)
r[mask] = 2.-u[mask]
r[xmask] = u[xmask]
xyp = | reshape(rr * r, (n, 1)) | numpy.reshape |
import numpy as np
import os
import argparse
import sys
"""
This set of functions allows to read a binary file containing SPAD measurements
using only the file name. The parameters are extracted from the matrix using
the tags. The assumpstion is that the parameters are constant and that all the
frames are complete.
Author: <NAME>
"""
def file_to_count(fname, datatype=np.uint16):
"""
Read a bin file and returns an array with the decoded count for each measurement
Args:
fname: name of the file containing the data
Returns:
A numpy array of unsigned int16 os size N x 25 where N is the number of measurements
"""
try:
raw = np.fromfile(fname, dtype=">u8")
except:
print("Error reading binary file")
return None
elements = raw.shape[0]
print(f"Elements: {elements}")
positions = int(elements/2)
print(f"Positions: {positions}")
raw_pos = np.reshape(raw, (positions, 2))
print(f"data table: {raw_pos.shape}")
time_per_pixel_tag = np.bitwise_and(raw_pos[:,1], 0b1)
idx = np.argmax(time_per_pixel_tag != time_per_pixel_tag[0]) # positions per time
time_per_pixel = int(idx)
print(f"time per pixel: {time_per_pixel}")
frame_tag = np.bitwise_and(np.right_shift(raw_pos[:,1], 2), 0b1)
idx = np.argmax(frame_tag != frame_tag[0]) # positions per frame
if idx == 0:
print("Unique frame")
frames = 1
else:
frames = positions/idx # TODO: check condition with larger dataset
line_tag = np.bitwise_and(np.right_shift(raw_pos[:,1], 1), 0b1)
idx = int(np.argmax(line_tag != line_tag[0])/time_per_pixel) # positions per line
print(f"Positions per lines: {idx}")
x = int(idx)
y = int(positions/x/time_per_pixel)
print(f"Dimensions: Y:{y}, X:{x}")
out = np.zeros((positions , 25), dtype = datatype)
matrix_to_count(raw_pos, out)
return out, frames, y, x, time_per_pixel
def file_to_FCScount(fname, datatype=np.uint16, Npoints=-1, Noffset=0):
"""
Read a bin file and returns an array with the decoded count for each measurement
Args:
fname: name of the file containing the data
Returns:
A numpy array of unsigned int16 os size N x 25 where N is the number of measurements
"""
try:
Npoints = Npoints * 2
NbytesOffset = 16 * Noffset
raw = np.fromfile(fname, dtype=">u8", count=Npoints, offset=NbytesOffset)
except:
print("Error reading binary file")
return None
elements = raw.shape[0]
print(f"Elements: {elements}")
positions = int(elements/2)
print(f"Positions: {positions}")
print("Freeing memory")
out = np.zeros((positions , 25), dtype = datatype)
print("Done.")
raw_pos = np.reshape(raw, (positions, 2))
print(f"data table: {raw_pos.shape}")
print("Converting data to counts")
matrix_to_count(raw_pos, out)
print("Done.")
return out
def matrix_to_count(values, out):
"""
Read an array of N measurements and write the count values in the out
array
Args:
values: N x 2 unsigned int array with measurements
out: N x 25 unsigned int array for storing results
Returns:
The matrix out filled with the count
"""
out[:,0] = np.bitwise_and(np.right_shift(values[:,0], 64 - 59), 0b1111) # 4 bits
out[:,1] = np.bitwise_and(np.right_shift(values[:,0], 64 - 55), 0b1111) # 4 bits
out[:,2] = np.bitwise_and(np.right_shift(values[:,0], 64 - 51), 0b1111) # 4 bits
out[:,3] = np.bitwise_and(np.right_shift(values[:,0], 64 - 47), 0b1111) # 4 bits
out[:,4] = np.bitwise_and(np.right_shift(values[:,0], 64 - 43), 0b1111) # 4 bits
out[:,5] = np.bitwise_and(np.right_shift(values[:,0], 64 - 39), 0b1111) # 4 bits
out[:,6] = np.bitwise_and(np.right_shift(values[:,1], 64 - 59), 0b11111) # 5 bits
out[:,7] = np.bitwise_and(np.right_shift(values[:,1], 64 - 54), 0b111111) # 6 bits
out[:,8] = np.bitwise_and(np.right_shift(values[:,1], 64 - 48), 0b11111) # 5 bits
out[:,9] = np.bitwise_and(np.right_shift(values[:,1], 64 - 43), 0b1111) # 4 bits
out[:,10] = np.bitwise_and(np.right_shift(values[:,1], 64 - 39), 0b1111) # 4 bits
out[:,11] = np.bitwise_and(np.right_shift(values[:,1], 64 - 35), 0b111111) # 6 bits
out[:,12] = np.bitwise_and(np.right_shift(values[:,1], 64 - 29), 0b1111111111) # 10 bits
out[:,13] = np.bitwise_and(np.right_shift(values[:,1], 64 - 19), 0b111111) # 6 bits
out[:,14] = np.bitwise_and(np.right_shift(values[:,1], 64 - 13), 0b1111) # 4 bits
out[:,15] = np.bitwise_and(np.right_shift(values[:,1], 64 - 9), 0b1111) # 4 bits
out[:,16] = np.right_shift(values[:,1], 64 - 5) # 5 bits
out[:,17] = np.bitwise_and(np.right_shift(values[:,0], 64 - 35), 0b111111) # 6 bits
out[:,18] = np.bitwise_and(np.right_shift(values[:,0], 64 - 29), 0b11111) # 5 bits
out[:,19] = np.bitwise_and( | np.right_shift(values[:,0], 64 - 24) | numpy.right_shift |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 08:58, 16/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import numpy as np
from math import gamma
from copy import deepcopy
class Problem:
ID_MIN_PROB = 0 # min problem
ID_MAX_PROB = -1 # max problem
ID_TAR = 0 # Index of target (the final fitness) in fitness
ID_OBJ = 1 # Index of objective list in fitness
DEFAULT_BATCH_IDEA = False
DEFAULT_BATCH_SIZE = 10
DEFAULT_LB = -1
DEFAULT_UB = 1
def __init__(self, problem: dict):
"""
Args:
problem (dict): Dict properties of your problem
Examples:
problem = {
"obj_func": your objective function,
"lb": list of value
"ub": list of value
"minmax": "min" or "max"
"verbose": True or False
"problem_size": int (Optional)
"batch_idea": True or False (Optional)
"batch_size": int (Optional, smaller than population size)
"obj_weight": list weights for all your objectives (Optional, default = [1, 1, ...1])
}
"""
self.minmax = "min"
self.verbose = True
self.batch_size = 10
self.batch_idea = False
self.n_objs = 1
self.obj_weight = None
self.multi_objs = False
self.obj_is_list = False
self.problem_size, self.lb, self.ub = None, None, None
self.__set_parameters__(problem)
self.__check_parameters__(problem)
self.__check_optional_parameters__(problem)
self.__check_objective_function__(problem)
def __set_parameters__(self, kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __check_parameters__(self, kwargs):
if "lb" in kwargs and "ub" in kwargs:
lb, ub = kwargs["lb"], kwargs["ub"]
if (lb is None) or (ub is None):
if "problem_size" in kwargs:
print(f"Default lb={self.DEFAULT_LB}, ub={self.DEFAULT_UB}.")
self.problem_size = self.__check_problem_size__(kwargs["problem_size"])
self.lb = self.DEFAULT_LB * np.ones(self.problem_size)
self.ub = self.DEFAULT_UB * np.ones(self.problem_size)
else:
print("If lb, ub are undefined, then you must set problem size to be an integer.")
exit(0)
else:
if isinstance(lb, list) and isinstance(ub, list):
if len(lb) == len(ub):
if len(lb) == 0:
if "problem_size" in kwargs:
print(f"Default lb={self.DEFAULT_LB}, ub={self.DEFAULT_UB}.")
self.problem_size = self.__check_problem_size__(kwargs["problem_size"])
self.lb = self.DEFAULT_LB * np.ones(self.problem_size)
self.ub = self.DEFAULT_UB * np.ones(self.problem_size)
else:
print("Wrong lower bound and upper bound parameters.")
exit(0)
elif len(lb) == 1:
if "problem_size" in kwargs:
self.problem_size = self.__check_problem_size__(kwargs["problem_size"])
self.lb = lb[0] * np.ones(self.problem_size)
self.ub = ub[0] * | np.ones(self.problem_size) | numpy.ones |
"""
The ``image_loader`` module
======================
Contains methods for loading images, cropping, exporting, generating splicing, etc.
"""
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
import os, os.path
import sys
import random
class Database_loader :
"""
Class Database_loader
======================
Defines a loading scheme for database
"""
def __init__(self, directory, size, proportion = 1.0, seed=42, only_green=True, rand_crop = True) :
# data init
self.dir = directory # directory with the train / test / validation sudirectories
self.size = size # size of the sub image that should be croped
self.nb_channels = 3 # return only the green channel of the images
self.proportion = proportion
if(only_green == True) :
self.nb_channels = 1
self.file_train = [] # list of the train images : tuple (image name / class)
self.file_test = [] # list of the test images : tuple (image name / class)
self.file_validation = [] # list of the validation images : tuple (image name / class)
self.image_class = ['Real', 'CGG'] # list of the class (label) used in the process
self.nb_class = 0
self.train_iterator = 0 # iterator over the train images
self.test_iterator = 0 # iterator over the test images
self.validation_iterator = 0 # iterator over the validation images
self.rand_crop = rand_crop
self.load_images(seed) # load the data base
def extract_channel(self, rgb_image, channel=1) :
if channel > 2 :
channel = 2
return rgb_image[:,:,channel]
def get_immediate_subdirectories(self,a_dir) :
# return the list of sub directories of a directory
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def load_images_in_dir(self, dir_name, image_class) :
# file extension accepted as image data
proportion = self.proportion
valid_image_extension = [".jpg",".gif",".png",".tga",".tif", ".JPG", ".jpeg"]
file_list = []
for c in image_class :
nb_image_per_class = 0
file_list_by_class = []
for filename in os.listdir(dir_name+'/'+c):
# check if the file is an image
extension = os.path.splitext(filename)[1]
if extension.lower() in valid_image_extension:
file_list_by_class.append(filename)
for i in range(int(len(file_list_by_class)*proportion)):
file_list.append((file_list_by_class[i],c))
nb_image_per_class += 1
print(' ',c,nb_image_per_class,'images loaded')
return file_list
def load_images(self, seed) :
# check if train / test / validation directories exists
train_dir_name = self.dir + '/train'
if not os.path.exists(train_dir_name):
print("error: train directory does not exist")
sys.exit(0)
return
validation_dir_name = self.dir + '/validation'
if not os.path.exists(validation_dir_name):
print("error: validation directory does not exist")
sys.exit(0)
return
test_dir_name = self.dir + '/test'
if not os.path.exists(test_dir_name):
print("error: test directory does not exist")
return []
sys.exit(0)
# count number of classes
# self.image_class = self.get_immediate_subdirectories(train_dir_name)
self.nb_class = len(self.image_class)
print(' number of classes :', self.nb_class, ' ', self.image_class)
# load image file name and class
print("\n train data")
self.file_train = self.load_images_in_dir(train_dir_name,self.image_class)
print("\n test data")
self.file_test = self.load_images_in_dir(test_dir_name,self.image_class)
print("\n validation data")
self.file_validation = self.load_images_in_dir(validation_dir_name,self.image_class)
# shuffle the lists
print("\n shuffle lists ...")
random.seed(seed)
random.shuffle(self.file_train)
random.shuffle(self.file_test)
random.shuffle(self.file_validation)
#print(self.file_train)
#print("\n loading done.")
def get_next_train(self, crop = True, rand_crop = True, random_flip_flop = False, random_rotate = False, verbose = False) :
# load next image (size should be big enough)
image = []
while True:
# pop file name and class
data = self.file_train[self.train_iterator]
self.train_iterator += 1
if self.train_iterator >= len(self.file_train) :
self.train_iterator = 0
# load image
file_name = self.dir + '/train/' + data[1] + '/' + data[0]
image = Image.open(file_name)
if(verbose) :
print(" ", file_name)
print( ' index :', self.train_iterator -1)
print( ' width :', image.size[0] )
print( ' height :', image.size[1] )
print( ' mode :', image.mode )
print( ' format :', image.format )
# image size test
if crop and (image.size[0] <= self.size or image.size[1] <= self.size) :
if(verbose) :
print('image too small for cropping (train) : ', data[1] + '/' + data[0])
else :
break
# crop image
if crop:
if rand_crop:
crop_width = random.randint(0, image.size[0]-self.size-1)
crop_height = random.randint(0, image.size[1]-self.size-1)
else:
crop_width = 0
crop_height = 0
box = (crop_width, crop_height, crop_width+self.size, crop_height+self.size)
# print('crop ', box)
image = image.crop(box)
# image transform
#image.save(self.dir+'/'+str(self.train_iterator -1)+'.jpg')
orientation = [ Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270]
flip = [Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM]
if (random_flip_flop == True) :
if (random.choice([True, False])) :
image = image.transpose(random.choice(flip))
if (random_rotate == True) :
if (random.choice([True, False])) :
image = image.transpose(random.choice(orientation))
#image.save(self.dir+'/tranpose_'+str(self.train_iterator -1)+'.jpg')
# convert the image into a array
image = np.asarray(image)
# extract green
if( self.nb_channels == 1 and len(image.shape) > 2) :
image = self.extract_channel(image,1)
else:
image = np.asarray(image)
if( self.nb_channels == 1 and len(image.shape) > 2 ) :
image = self.extract_channel(image,1)
# convert to float image
image = image.astype(np.float32) / 255.
#image = image.reshape(1, self.size, self.size, 3)
if self.size == None:
image = image.reshape(image.shape[0], image.shape[1], self.nb_channels)
else:
image = image.reshape(self.size, self.size, self.nb_channels)
# build class label
label = np.zeros(len(self.image_class))
pos = self.image_class.index(data[1])
label[pos] = 1.0
# return image and label
return (image, label)
def get_next_train_batch(self, batch_size = 50, crop = True, random_flip_flop = False, random_rotate = False) :
batch_image = np.empty([batch_size, self.size, self.size, self.nb_channels])
batch_label = np.empty([batch_size, self.nb_class])
for i in range(0,batch_size) :
data = self.get_next_train(crop, self.rand_crop, random_flip_flop,random_rotate, verbose=False)
batch_image[i] = data[0]
batch_label[i] = data[1]
return (batch_image.astype(np.float32),batch_label)
def get_next_test(self, crop = True, rand_crop = True, random_flip_flop = False, random_rotate = False, verbose = False) :
# load next image (size should be big enough)
image = []
while True:
# pop file name and class
data = self.file_test[self.test_iterator]
self.test_iterator += 1
if self.test_iterator >= len(self.file_test) :
self.test_iterator = 0
# load image
file_name = self.dir + '/test/' + data[1] + '/' + data[0]
image = Image.open(file_name)
if(verbose) :
print(" ", file_name)
print( ' index :', self.train_iterator -1)
print( ' width :', image.size[0] )
print( ' height :', image.size[1] )
print( ' mode :', image.mode )
print( ' format :', image.format )
# image size test
if crop and (image.size[0] <= self.size or image.size[1] <= self.size) :
if(verbose) :
print('image too small for cropping (test) : ', data[1] + '/' + data[0])
else :
break
# crop image
if crop:
if rand_crop:
crop_width = random.randint(0, image.size[0]-self.size-1)
crop_height = random.randint(0, image.size[1]-self.size-1)
else:
crop_width = 0
crop_height = 0
box = (crop_width, crop_height, crop_width+self.size, crop_height+self.size)
image = image.crop(box)
# image transform
#image.save(self.dir+'/'+str(self.train_iterator -1)+'.jpg')
orientation = [ Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270]
flip = [Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM]
if (random_flip_flop == True) :
if (random.choice([True, False])) :
image = image.transpose(random.choice(flip))
if (random_rotate == True) :
if (random.choice([True, False])) :
image = image.transpose(random.choice(orientation))
# convert the image into a array
image = np.asarray(image)
# extract green
if( self.nb_channels == 1 and len(image.shape) > 2) :
image = self.extract_channel(image,1)
else:
image = np.asarray(image)
if( self.nb_channels == 1 and len(image.shape) > 2 ) :
image = self.extract_channel(image,1)
# convert to float image
image = image.astype(np.float32) / 255.
#image = image.reshape(1, self.size, self.size, 3)
if self.size == None:
image = image.reshape(image.shape[0], image.shape[1], self.nb_channels)
else:
image = image.reshape(self.size, self.size, self.nb_channels)
# buils class label
label = np.zeros(len(self.image_class))
pos = self.image_class.index(data[1])
label[pos] = 1.0
# return image and label
return (image, label)
def get_batch_test(self, batch_size = 50, crop = True, random_flip_flop = False, random_rotate = False) :
batch_image = np.empty([batch_size, self.size, self.size, self.nb_channels])
batch_label = np.empty([batch_size, self.nb_class])
for i in range(0,batch_size) :
data = self.get_next_test(crop, self.rand_crop, random_flip_flop,random_rotate)
batch_image[i] = data[0]
batch_label[i] = data[1]
return (batch_image.astype(np.float32),batch_label)
def get_next_validation(self, crop = True, rand_crop = True, random_flip_flop = False, random_rotate = False, verbose = False) :
# load next image (size should be big enough)
image = []
while True:
# pop file name and class
data = self.file_validation[self.validation_iterator]
self.validation_iterator += 1
if self.validation_iterator >= len(self.file_validation) :
self.validation_iterator = 0
# load image
file_name = self.dir + '/validation/' + data[1] + '/' + data[0]
image = Image.open(file_name)
if(verbose) :
print(" ", file_name)
print( ' index :', self.train_iterator -1)
print( ' width :', image.size[0] )
print( ' height :', image.size[1] )
print( ' mode :', image.mode )
print( ' format :', image.format )
# image size test
if crop and (image.size[0] <= self.size or image.size[1] <= self.size) :
if(verbose) :
print('image too small for cropping (validation) : ', data[1] + '/' + data[0])
else :
break
if crop:
# crop image
if rand_crop:
crop_width = random.randint(0, image.size[0]-self.size-1)
crop_height = random.randint(0, image.size[1]-self.size-1)
else:
crop_width = 0
crop_height = 0
box = (crop_width, crop_height, crop_width+self.size, crop_height+self.size)
image = image.crop(box)
# image transform
#image.save(self.dir+'/'+str(self.train_iterator -1)+'.jpg')
orientation = [ Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270]
flip = [Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM]
if (random_flip_flop == True) :
if (random.choice([True, False])) :
image = image.transpose(random.choice(flip))
if (random_rotate == True) :
if (random.choice([True, False])) :
image = image.transpose(random.choice(orientation))
# convert the image into a array
image = np.asarray(image)
# extract green
if( self.nb_channels == 1 and len(image.shape) > 2) :
image = self.extract_channel(image,1)
else:
image = | np.asarray(image) | numpy.asarray |
__author__ = 'wangyi'
__emails__ = ['<EMAIL>',
'<EMAIL>', #staff email address
'<EMAIL>',
'<EMAIL>']
# Created on 1st Nov 2017
# Updated on 5th July 2019:
# Modified:
# costimpl.ConvNet.layers.Layer
# costimpl.common.Node
# costimpl.common.Graph
# costimpl.common.Tensor
# costimpl.common.Device
# costimpl.common.Protocol
# costimpl.common.Randomness.Distribution
# costimpl.common.Randomness.IncrDistribution
# costimpl.common.Randomness.SparseDistribution
# costimpl.common.Randomness.IncrSparseDistribution
# tests.test_conv_net
# tests.test_kdtree
# tests.test_kmean_plus_plus
# tests.test_svm
# Added:
# costimpl.ConvNet.layers.ReluActivation
# costimpl.ConvNet.layers.BatchNorm
# costimpl.ConvNet.layers.MaxPooling
# costimpl.ConvNet.layers.UpSampling
# costimpl.ConvNet.layers.Dropout
# costimpl.KMenas++.model
# costimpl.KdTree.model
# costimpl.SVM.model
from ..common.node import Node
from ..common.device import CPUDevice
from .vol import Vol
import math
import numpy as np
class ImproperType(Exception):pass
class NotImplemented( Exception):pass
class UnSupportedDevice( Exception):pass
class UnSupportedAlgorithm( Exception):pass
# @todo: TODO Device instance factory
def Device(device_type):
pass
# @todo: TODO Device plural instances factory
def Device_plura(device_list):
return []
_c = {
'device_types': 'cpu',
'device_list': ['cpu://all'], # ${device_protocol}://, see Protocol parser implementation
}
class Layer(Node):
def __init__(self, inp, Theta, in_num, out_num, name='Layer', device_types=_c['device_types'], device_list=_c['device_list']):
# input data might be batch_size * col_size, where a col_size*1 vector isomorphic to input
self.in_nm = in_num
self.out_nm = out_num
# dynamic binding
self._forward_call = self.forward1
# check devices
self._devices = Device_plura(device_list) if self.isSupported(device_types) else None
if self._devices is None:
raise UnSupportedDevice("%s not supported yet!" % device_types)
super(Layer, self).__init__(name, (inp, Theta, (self.out_nm, self.in_nm+1)))
def forward(self, inp):
if inp.batch_size == 1:
inp.w = inp.w[np.newaxis, ...]
inp.grad = inp.grad[np.newaxis, ...]
return self._forward_call(inp)
def forward1(self, inp):
"""
Vallina implementation, without further optimization and mainly running in CPU mode
:param inp: Vol
:return: Vol
"""
raise NotImplementedError("Not Implemented. Should be implemented in subclass of Layer")
def forward2(self, inp):
"""
Used for a real assigned device, may be called underlying libraries of implementation to achieve that goal
:param inp:
:return:
"""
raise NotImplementedError("Not Implemented. Should be implemented in the subclass of Layer")
def fn_call_forward_all(self, inp):
if isinstance(inp, Layer):
inp.add_child(self)
return self
elif isinstance(inp, Vol):
out_vol = self.forward(inp)
if len(self.children) == 0:
# if no children, return numeric value directly
return out_vol
rets = []
for childOp in self.children:
# bottom-up implementation
ret = childOp.fn_call_forward_all(out_vol)
rets.append(ret)
if len(rets) == 1:
# reduce the dimension
return rets[0]
return rets
else:
raise ImproperType("Improper type to pass into the layer. Only accept Vol and Layer types!")
__call__ = fn_call_forward_all
def bp(self, top_layer):
"""
:param top_layer: subclasses of Layer
:return: (nparray, nparray | None, nparray | None)
"""
raise NotImplementedError("Not Implemented. Should be implemented in subclass of Layer!")
def bp_all(self, top_layer):
inp_grad, dW, db = self.bp(top_layer)
if self.father is not None:
return self.father.bp_all(self)
else:
return inp_grad, dW, db
# This method is not efficient and need to be optimized in the future
def img2col(self, vol):
# img to col: (n, channel, in_nm_h, in_nm_w) => (n, channel * kernel_h * kernel_w, out_nm_h * out_nm_w)
X = vol.w
convs = self.filters
_, kernel_depth, kernel_h, kernel_w = convs.shape
l = kernel_depth*kernel_h*kernel_w
size1 = (vol.batch_size, self.channel * kernel_h * kernel_w, self.out_nm_h * self.out_nm_w)
ret = np.zeros(size1)
pad_default_val = 0
try:
pad_default_val = self.padding_default_val
except:
pass
# loop through samples
for i in range(vol.batch_size):
for w0 in range(0, self.out_nm_w): # col first
pad_left, pad_right = 0, 0
w1 = -self.pad + w0 * self.strip
w2 = w1 + kernel_w
if w1 < 0: w1, pad_left= 0, -w1
if w1 >= self.in_nm_w:
continue
if w2 < 0:
continue
if w2 > self.in_nm_w:
pad_right = w2 - self.in_nm_w
for h0 in range(0, self.out_nm_h):
pad_top, pad_bottom = 0, 0
h1 = -self.pad + h0 * self.strip
h2 = h1 + kernel_h
if h1 < 0: h1, pad_top = 0, -h1
if h1 >= self.in_nm_h:
continue
if h2 < 0:
continue
if h2 > self.in_nm_h:
pad_bottom = h2 - self.in_nm_h
kernel_conlv = X[i,:, h1:h2, w1:w2]
col = np.pad(kernel_conlv, [(0,0), (pad_top, pad_bottom), (pad_left, pad_right)],
mode='constant',
constant_values=pad_default_val).flatten() # row-major (C-style) order
if len(col) != l:
raise Exception("Wrong Padding")
ret[i,:,h0 * self.out_nm_w + w0] = col[:]
return ret
@property
def grad(self):
return self.filters.grad
# @todo : TODO
def isSupported(self, device_types):
return True
class FackedConvLayer(Layer):
def __init__(self, inp, Theta):
self.inp = inp
self.Theta = Theta
# The convolution is intensively used for image alike input data. Hence we might have multi-dimensional data.
# For the most use cases, three dimensional filters of size P*P*K are applied on multi dimensional input source.
# For the input layer , the dimension might range from 2 dimensions to 5 dimensions (LiDar depth info added).
# Operation required : (( img2col -> conv -> col2img ) Repeated 2 ~ 3 times -> Pooling -> dropout) Repeated many
# times > (upsampling mirror)
class ConvLayer(Layer):
def __init__(self, spatial_shape, convs, pad=0, strip=2,
lr=1.0, bias=None, **kw):
# input Vol instance
self.inp = None
self.channel, self.in_nm_h, self.in_nm_w = spatial_shape
# Please refer to this post for details of conv filter paramter definition
# Conv optimization: http://mp.weixin.qq.com/s/GKZeTrfTgRXmj29c_XouOQ
self.filters = convs # Vol
self.pad = pad
self.strip = strip
self.LAYER_TYPE = 'conv'
self.lr = lr
K, kernel_depth, kernel_h, kernel_w = convs.shape
self.bias = bias # Vol
self.out_nm_d = K
self.out_nm_w = int(math.floor((self.in_nm_w + self.pad*2 - kernel_w) / self.strip + 1))
self.out_nm_h = int(math.floor((self.in_nm_h + self.pad*2 - kernel_h) / self.strip + 1))
# padding defaults to 0
self.padding_default_val = 0
# output Vol instance
self.out = None
super(ConvLayer, self).__init__(None, convs, self.channel*self.in_nm_h*self.in_nm_w,
K*(kernel_h*kernel_w + 1),
name='ConvLayer')
# using nested For-Loop to implement raw ConvNet forward.
# This is not efficient and left to be improved in the near future
# See: https://github.com/karpathy/convnetjs/blob/master/src/convnet_layers_dotproducts.js
# https://github.com/costapt/cs231n/blob/master/assignment2/cs231n/im2col.py
def forward1(self, inp):
"""
By applying `img2col` borrowed from caffe, we implemented shared parameters convolution with respect the input.
The parameters consist in the layer are K * (kernel_depth * kernel_h * kernel_w + 1). To reduce parameters, we could
apply (kernel_h * kernel_w) on original input shifting window and repeated the computation 3 times and sum them into
an `aggregated ` super pixel value. We will use this concept in our `SpatialDetectron` algorithm for arbiturary 3d points
convolution.
:param inp: Vol
:return: Vol
"""
self.inp = inp # Vol
n = inp.batch_size
# might need to check here
convs = self.filters
K, kernel_depth, kernel_h, kernel_w = convs.shape
size1 = (n, K, self.out_nm_h, self.out_nm_w)
ret = np.zeros(size1)
# filters to row
W_row = convs.w.reshape(K, kernel_depth * kernel_h * kernel_w) # -1
# img to col: (n, channel, in_nm_h, in_nm_w) => (n, kernel_depth * kernel_h * kernel_w, out_nm_h * out_nm_w)
X_col = self.img2col(inp)
self.X_col = X_col
# loop through samples
for i in range(n):
# (K, kd*kh*kw) mul (kd*kh*hw, oh*ow)
out = np.matmul(W_row, X_col[i,:]) + self.bias.w
# col2img
out.resize(K, self.out_nm_h, self.out_nm_w)
ret[i,:] = out[:]
self.out = Vol(n, (K, self.out_nm_h, self.out_nm_w), init_gen=ret)
return self.out
def bp(self, top_layer):
"""
The algorithm implemented here based on my mathematical interpretation by partial derivatives on both input data and input parameters.
By partial derivatives of filters, we concluded that each parameter at (i, kd, kh, kw) is determined by:
- X[p, kd, kh + q*strip_h, kw + s*strip_w] : input convolution window box
- top_layer.inp.grad[:, i, q, s], where (p, q, s) loop through (K0, out_nm_h, out_nm_w), i loop through K1:
Simply put, when (q,s) loop through (out_nm_h, out_nm_w), we actually apply `an altrous conv` to the last layer grad to get the
gradient of this conv layer parameters:
By partial derivatives with respect to input data -- `self.inp.w`, bp resets `self.inp.grad`. To compute the derivative of
self.inp.grad[:, channel, in_nm_h, in_nm_w] denoted as grad[:,k,i,j], first apply algebra replacement to forward convolution index:
- i <- kh + q*strip_h
- j <- kw + s*strip_w
Suppose our `filters = self.convs` which is denoted as W,
we derived that perception field `W(kh,kw)` is equal to `W(i-q*strip_h, j-s*strip_w)`, we acutually got a fliped version of original convs
In our implementation, we also check whether we the padded index if out of the filter W boundaries.
Simply put, when (i,j) loop through (in_nm_h, in_nm_w), we actually apply `a flipped conv` to the last layer grad to get the
gradient of input data, and use transposed convolution to update input data.
:param top_layer: Layer
:return: (nparray, nparray, nparray)
"""
# bias_grad, partial derivatives of biases
# dW, partial derivatives of filters
# convol top_layer.grad with inp
top_grad = top_layer.inp.grad # work as filters
W = self.filters.w
K0, kernel_depth, kernel_h, kernel_w = self.filters.shape
n, K1, out_nm_h, out_nm_w = top_grad.shape
# Partial derivatives of filters:
# A naive explanation:
# for i in range(K1):
# f = top_grad[:,i]
# # convolute with inp:
# # (n, oh, ow) conv (n, kd*kh*kw, oh, ow) => (K, kd, kh, kw):
# for kw in range(kernel_w):
# for kh in range(kernel_h):
# for kd in range(kernel_depth):
# # self.filters_grad[i, kd, kh, kw] += 1/n *
# sum_(p,q,s){ X[p, kd, kh + q*strip_h, kw + s*strip_w] * f[q,s] } # pay attention to indice
#
# Parameters:
# i: output data channel index, equal to self.inp.shape[0], denoted as K1
# p: original input batch sample index, equal to top, denoted as K0
# q: out_nm_h index
# s: out_nm_w index
# Rearrange above loop:
self.filters.grad[:] = 0.0
for k in range(K1):
f = top_grad[:, k]
for kd in range(kernel_depth):
for kh in range(kernel_h):
for kw in range(kernel_w):
uid = (kd*kernel_h+kh)*kernel_w+kw
self.filters.grad[k, kd, kh, kw] += np.sum(self.X_col[:,uid] * f.reshape(-1, out_nm_h * out_nm_w))
# partial derivatives of inp
# opposite to forward , inp computed in flipped direction
# (n, channel, in_nm_h, in_nm_w) <= (n, K , oh, ow) conv flipped(filter)
self.inp.grad[:] = 0.0
for k in range(self.channel):
for i in range(self.in_nm_h):
for j in range(self.in_nm_w):
# grad (n, K, oh, ow) conlv flipped(f) (K, kernel_depth, kernel_h, kernel_w)
self.conlv(self.inp.grad, top_grad, W, (k,i,j))
# partial derivatives of bias
for d in range(K1):
f = top_grad[:, d]
self.bias.grad[d] = np.sum(f)
return self.inp.grad, self.filters.grad, self.bias.grad
# Transposed convolution
def conlv(self, target, grad, convs, index):
'''
Transposed Convolution
:param target: np.array, destination
:param grad: np.array, top_diff
:param convs: np.array, original convolution
:param index: tuple, destination index
:return:
'''
K0, kernel_depth, kernel_h, kernel_w = convs.shape
k,i,j = index
for h in range(self.out_nm_h):
for w in range(self.out_nm_w):
if i-h*self.strip+self.pad < 0 or i-h*self.strip+self.pad >= kernel_h or \
j-w*self.strip+self.pad < 0 or j-w*self.strip+self.pad >= kernel_w:
continue
try:
target[:,k,i,j] += np.matmul(grad[:,:,h,w], convs[:, k, i-h*self.strip+self.pad, j-w*self.strip+self.pad])
except Exception as e:
raise(e)
class FullyCnnLayer(Layer):
"""
See a naive implementation inspired from my solution submitted to <NAME>'s coursera deeplearning course in 2014 and 2015 where I passed
server tests with 100% scores!
logistic implementation with L1, L2 normalization experiments solution provided:
fully connected neural network implementation solution provided:
"""
pass
class AtrousConvLayer(Layer):
"""
AtrousConv (also called Dilate Convolution) correspond to the backpropogation algorithm with respect to filters or
gradient of them being applied to the input in a forward process.
"""
pass
class BatchNorm(Layer):
def __init__(self, frazed=False, gamma=1, beta=0, bn_params={}, mode='trainning'):
# input vol instance
self.inp = None
self.LAYER_TYPE = 'batch_norm'
# Not used for the moment TODO
self.frazed = frazed # Mask RCNN implementation for details
# parameters
self.spatial_size = None
self.gamma = gamma
self.beta = beta
# the defaul values borrow from cs231n, I didnt find out reasons why it is good, maybe it is not.
self.epilon = bn_params.get('epilon', 1e-5)
self.stat_momentum = bn_params.get('stat_momentum', 0.9)
# parameters used inside forward operation, needed to be persistent
self.running_mean = bn_params.get('running_mean', None)
self.running_var = bn_params.get('running_var' , None)
# learnable parameters
# to make a reasonable difference, W and bias should have the same size of gradient spatial shape
# where parital gamma_0 (scalar) = sum_(i) { partial J over partial y_i0 * partial y_i0 over partial gamma_0 }
# which means y_i0 can only be computed from gamma_i0 values. Hence if gamma is a scalar equal to gamma_0, that does
# not make sense
self._param_initialized = False
self.W = Vol(1, (1,), init_gen=np.array([self.gamma,])) if | np.isscalar(self.gamma) | numpy.isscalar |
import csv
import os
import timeit
import logging
import numpy as np
from scipy import fftpack as scipyfftpack
from scipy import interpolate, optimize
import matplotlib.pyplot as plt
import matplotlib
log = logging.getLogger(__name__)
np.seterr(all='raise')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log.addHandler(ch)
SFRFILENAME = 'edge_sfr_values.txt'
CURRENT_JITTER_CODE_VERSION = 2
MULTIPROCESSING = 8 # Number of processes to use (1 to disable multiprocessing)
SAGITTAL = "SAGITTAL"
MERIDIONAL = "MERIDIONAL"
MEDIAL = "MEDIAL"
BOTH_AXES = "BOTH"
ALL_THREE_AXES = "ALL THREE AXES"
SAGITTAL_COMPLEX = "SAGITTAL_COMPLEX"
MERIDIONAL_COMPLEX = "MERIDIONAL_COMPLEX"
SAGITTAL_REAL = "SAGITTAL_REAL"
MERIDIONAL_REAL = "MERIDIONAL_REAL"
SAGITTAL_IMAG = "SAGITTAL_IMAJ"
MERIDIONAL_IMAG = "MERIDIONAL_IMAJ"
SAGITTAL_ANGLE = "SAGGITAL_ANGLE"
MERIDIONAL_ANGLE = "MERIDIONAL_ANGLE"
COMPLEX_AXES = [SAGITTAL_COMPLEX, MERIDIONAL_COMPLEX, SAGITTAL_REAL, MERIDIONAL_REAL, SAGITTAL_IMAG, MERIDIONAL_IMAG, SAGITTAL_ANGLE, MERIDIONAL_ANGLE]
REAL_AXES = [SAGITTAL_REAL, MERIDIONAL_REAL]
IMAG_AXES = [SAGITTAL_IMAG, MERIDIONAL_IMAG, MERIDIONAL_ANGLE, SAGITTAL_ANGLE]
SAGITTAL_AXES = [SAGITTAL, SAGITTAL_REAL, SAGITTAL_IMAG, SAGITTAL_COMPLEX, SAGITTAL_ANGLE]
MERIDIONAL_AXES = [MERIDIONAL, MERIDIONAL_REAL, MERIDIONAL_IMAG, MERIDIONAL_COMPLEX, MERIDIONAL_ANGLE]
POLAR_AXES = [SAGITTAL_ANGLE, MERIDIONAL_ANGLE]
COMPLEX_CARTESIAN = 1
COMPLEX_POLAR_TUPLE = 2
COMPLEX_CARTESIAN_REAL_TUPLE = 3
PLOT_ON_FIT_ERROR = True
PLOT_MTF50_ERROR = True
TRUNCATE_MTF_LOBES = False
SFR_HEADER = [
'blockid',
'edgex',
'edgey',
'edgeangle',
'radialangle'
]
FIELD_SMOOTHING_MIN_POINTS = 16
FIELD_SMOOTHING_MAX_RATIO = 0.3
FIELD_SMOOTHING_ORDER = 3
LOW_BENCHMARK_FSTOP = 14
HIGH_BENCHBARK_FSTOP = 2.8
# LOW_BENCHMARK_FSTOP = 32
# HIGH_BENCHBARK_FSTOP = 13
# IMAGE_WIDTH = 8256
IMAGE_WIDTH = 6000
# SENSOR_WIDTH = 0.0357
SENSOR_WIDTH = 0.0236
IMAGE_HEIGHT = IMAGE_WIDTH * 2 / 3
IMAGE_DIAGONAL = (IMAGE_WIDTH**2 + IMAGE_HEIGHT**2)**0.5
DEFAULT_PIXEL_SIZE = SENSOR_WIDTH / IMAGE_WIDTH
THETA_BOTTOM_RIGHT = np.arctan(IMAGE_HEIGHT / IMAGE_WIDTH)
THETA_TOP_RIGHT = np.pi * 2.0 - THETA_BOTTOM_RIGHT
CHART_WIDTH = 18 * 0.0254
# CHART_WIDTH = SENSOR_WIDTH * 33
CHART_DIAGONAL = (CHART_WIDTH ** 2 + (CHART_WIDTH * IMAGE_HEIGHT / IMAGE_WIDTH)**2) ** 0.5
DEFAULT_SENSOR_DIAGONAL = IMAGE_DIAGONAL * DEFAULT_PIXEL_SIZE
# LOWAVG_NOMBINS = np.arange(2, 6)
LOWAVG_NOMBINS = np.arange(3, 12)
ACUTANCE_PRINT_HEIGHT = 0.6
ACUTANCE_VIEWING_DISTANCE = 0.74
CONTOUR2D = 0
PROJECTION3D = 1
SMOOTH2D = 3
DEFAULT_FREQ = -2
MTF50 = -1
AUC = -2
ACUTANCE = -3
LOWAVG = -4
DIFFRACTION_WAVELENGTH = 575e-9
FOCUS_SCALE_COC = "Defocus blur circle diameter (µm)"
FOCUS_SCALE_COC_PIXELS = "Defocus blur circle diameter (pixels)"
FOCUS_SCALE_FOCUS_SHIFT = "Image-side long. focus shift (µm)"
FOCUS_SCALE_SUB_FOCUS_SHIFT = "Subject-side focus shift (mm)"
FOCUS_SCALE_RMS_WFE = "RMS Defocus wavefront error (λ)"
def CENTRE_WEIGHTED(height):
return (1.0 - height) ** 1
def EDGE_WEIGHTED(height):
return np.clip(1.1 - np.abs(0.6 - height)*1.4, 0.0001, 1.0) ** 2
def CORNER_WEIGHTED(height):
return height ** 1
def EVEN_WEIGHTED(height):
return 1.0
def plot_weighting(weightingfn):
x = np.linspace(0, 1, 100)
plt.plot(x, weightingfn(x))
plt.show()
# plot_weighting(EDGE_WEIGHTED)
# exit()
def EVEN_WEIGHTED(height):
return 1.0
def diffraction_mtf(freq, fstop=8, calibration=None):
if type(freq) is int and freq == AUC:
return diffraction_mtf(np.linspace(0, 0.5-1.0/32, 32), fstop, calibration).mean()
if type(freq) is int and freq == ACUTANCE:
# print(22, calibration)
return calc_acutance(diffraction_mtf(RAW_SFR_FREQUENCIES, fstop, calibration))
mulfreq = np.clip(freq / DEFAULT_PIXEL_SIZE * DIFFRACTION_WAVELENGTH * fstop, 0, 1)
if calibration is None:
calibration_mul = 1.0
else:
interpfn = interpolate.InterpolatedUnivariateSpline(RAW_SFR_FREQUENCIES[:],
np.pad(calibration, (0,64-len(calibration)),
'constant',
constant_values=0), k=1)
calibration_mul = np.clip(interpfn(freq), 1e-6, np.inf)
diff = 2.0 / np.pi * (np.arccos(mulfreq) - mulfreq * (1 - mulfreq ** 2) ** 0.5) * calibration_mul
return diff * 0.98 + 0.02
def calc_acutance(sfr, print_height=ACUTANCE_PRINT_HEIGHT, viewing_distance=ACUTANCE_VIEWING_DISTANCE):
if viewing_distance is None:
viewing_distance = max(0.15, print_height ** 0.5)
def csf(af): # Contrast sensitivity function
return 75 * af ** 0.8 * np.exp(-0.2 * af)
if len(sfr) < 64:
sfr = np.pad(sfr, (0, 64 - len(sfr)), 'constant', constant_values=0.0)
print_cy_per_m = RAW_SFR_FREQUENCIES * 4000 / print_height
cy_per_rad = print_cy_per_m * viewing_distance # atan Small angle approximation
cy_per_degree = cy_per_rad / 180 * np.pi
specific_csf = csf(cy_per_degree)
total = (specific_csf * sfr).sum() / specific_csf.sum()
return total
def gaussian_fourier(c):
f = RAW_SFR_FREQUENCIES
gauss = np.exp(-f ** 2 * c ** 2 * 0.5)
# plt.plot(f, gauss);plt.show()
return gauss
def pixel_aperture_mtf(freq):
freq = np.clip(freq, 0.0001, 1.0)
return np.sin(np.pi*freq) / np.pi / freq
def tukey(x, alpha):
tukey_window = np.cos(np.clip((abs(x) - 1.0 + alpha) * np.pi / alpha, 0, np.pi)) + 1
return tukey_window
def calc_image_height(x, y):
"""
Calculate image height (distance from centre) ranging from 0.0 to 1.0
:param x: x loc(s)
:param y: y loc(s)
:return: height(s)
"""
img_height = (((IMAGE_WIDTH / 2) - x) ** 2 + ((IMAGE_HEIGHT / 2) - y) ** 2) ** 0.5 / IMAGE_DIAGONAL * 2
return img_height
RAW_SFR_FREQUENCIES = np.array([x / 64 for x in range(64)]) # List of sfr frequencies in cycles/pixel
GOOD = [1., 0.98582051, 0.95216779, 0.91605742, 0.88585631, 0.86172936,
0.84093781, 0.82116408, 0.80170952, 0.78201686, 0.76154796, 0.73985244,
0.7166293, 0.69158089, 0.66423885, 0.63510484, 0.60407738, 0.57122645,
0.53737249, 0.50266147, 0.46764089, 0.43269842, 0.39822897, 0.36466347,
0.33236667, 0.30161039, 0.27266122, 0.24569197, 0.2208242, 0.19810618,
0.17752172, 0.15900566, 0.14245044, 0.1277121, 0.11462787, 0.10302666,
0.09274069, 0.08361389, 0.07550579, 0.06829461, 0.06187432, 0.05615253,
0.05104666, 0.04648352, 0.04239983, 0.03874731, 0.03549705, 0.03264138,
0.03019484, 0.0281874, 0.0266599, 0.02565582, 0.02520846, 0.02533362,
0.02601429, 0.02719823, 0.02879615, 0.03068963, 0.03274225, 0.03481336,
0.0367723, 0.03850572, 0.03992789, 0.04098472]
def fastgauss(gaussx, a,b,c):
return a * np.exp(-(gaussx - b) ** 2 / (2 * c ** 2))
def twogauss(gaussx, a, b, c, peaky):
peaky = peaky * np.clip((c - 0.7) / 2.0, 0.0, 1.0) # No peaky at low sigma
a1 = 1 / (1 + peaky)
a2 = peaky / (1 + peaky)
c1 = c * 1.8
c2 = c / 1.4
wide = a1 * np.exp(-(gaussx - b) ** 2 / (2 * c1 ** 2))
narrow = a2 * np.exp(-(gaussx - b) ** 2 / (2 * c2 ** 2))
both = (wide + narrow) * a
return both
count=0
def cauchy(xin, max, x0, gamma):
global count
count += 1
# print(count)
# print(xin)
return max / (1.0 + ((xin - x0) / gamma) ** 2)
def c_init(x, y, inc):
return y, x, 3.0 * inc
def c_bounds(x, y, inc):
return ((y * 0.98, x - inc * 2, 0.4 * inc,),
(y * 1.15, x + inc * 2, 100.0 * inc,))
cauchy.initial = c_init
cauchy.bounds = c_bounds
def psysmfit(defocus, defocus_offset, aberr):
pupil = NollZernike(Z4=defocus + defocus_offset, dia=10, norm=True, **{zedstr: add}, wavelength=wl,
opd_unit="um")
m = MTF.from_pupil(pupil, efl=fl)
if 0:
plt.plot(freqs, m.exact_xy(freqs))
# cauchy.bounds = lambda x, y, inc: (highest_data_y * 0.98, mean_peak_x - x_inc * 2, 0.4 * x_inc,), \
# (highest_data_y * 1.15, mean_peak_x + x_inc * 2, 100.0 * x_inc,)
class EXIF:
def __init__(self, sfr_pathname=None, exif_pathname=None):
self.exif = {"NOTHING HERE FOR SPACE PURPOSES": True}
value = ""
self.aperture = 1.0
self.focal_length_str = value
self.lens_model = value
self.max_aperture = value
self.distortionexif = value
self.ca_exif = value
if exif_pathname is None and sfr_pathname is not None:
pathsplit = os.path.split(sfr_pathname)
fnamesplit = pathsplit[1].split(".")
exiffilename = ".".join(fnamesplit[:2]) + ".exif.csv"
exif_pathname = os.path.join(pathsplit[0], exiffilename)
print(exif_pathname)
if exif_pathname is not None:
try:
print("Tring to open {}".format(exif_pathname))
print(pathsplit)
with open(exif_pathname, 'r') as file:
print("Found EXIF file")
reader = csv.reader(file, delimiter=',', quotechar='|')
for row in reader:
# if row[0] in self.exif:
# self.exif[row[0]+"_dup"] = row[1]
# else:
# self.exif[row[0]] = row[1]
tag, value = row[:2]
# print(tag, value)
if tag == "Aperture":
fl = float(value[:])
self.aperture = 1.25 if fl == 1.2 else fl
elif tag == "Focal Length" and "equivalent" not in value:
self.focal_length_str = value
elif tag == "Lens Model":
self.lens_model = value
elif tag == "Max Aperture Value":
self.max_aperture = value
elif tag == "Geometric Distortion Params":
self.distortionexif = value
elif tag == "Chromatic Aberration Params":
self.ca_exif = value
except FileNotFoundError:
log.warning("No EXIF found")
print("Aperture is {}".format(self.aperture))
@property
def summary(self):
if len(self.exif) is 0:
return "No EXIF available"
return "{} at {}, f/{}".format(self.lens_model, self.focal_length, self.aperture)
@property
def angle_of_view(self):
sensor_diagonal_m = IMAGE_DIAGONAL * DEFAULT_PIXEL_SIZE
focal_length_m = self.focal_length * 1e-3
lens_angle_of_view = 2 * np.arctan(sensor_diagonal_m / focal_length_m / 2)
return lens_angle_of_view
@property
def focal_length(self):
return float(self.focal_length_str.split(" ")[0])
@focal_length.setter
def focal_length(self, fl):
self.focal_length_str = "{} mm".format(fl)
@focal_length.setter
def focal_length(self, floatin):
self.focal_length_str = "{:.1f} mm".format(floatin)
def truncate_at_zero(in_sfr):
# in_sfr = np.array(in_sfr) + 0.0
# plt.plot(RAW_SFR_FREQUENCIES[:len(in_sfr)], in_sfr)
sfr = np.concatenate(([1.0], in_sfr, [0.0]))
l = len(sfr)
derivative = sfr[1:l] - sfr[:l-1]
# plt.plot(RAW_SFR_FREQUENCIES[:l-3], derivative[1:l-2], '--')
# plt.plot(RAW_SFR_FREQUENCIES[:l-3], sfr[1:l-2], '--')
# plt.hlines([0], 0, 1, linestyles='dotted')
# derivative_shift = derivative[:32]
# second_der = derivative_shift - derivative[:32]
# plt.plot(RAW_SFR_FREQUENCIES[:l-3], derivative[:l-3])
cuts = np.all((derivative[1:l-1] > 0.002, derivative[:l-2] < 0.002, sfr[1:l-1] < 0.13), axis=0)
cumsum = np.cumsum(cuts)
# plt.plot(RAW_SFR_FREQUENCIES[:l-2], cumsum)
out_sfr = in_sfr * (cumsum == 0) + 1e-6
# print(sfr[1:l-1] < 0.08)
# print(cuts)
# plt.plot(RAW_SFR_FREQUENCIES[:len(in_sfr)], out_sfr-0.01)
# plt.show()
return out_sfr
def fallback_results_path(basepath, number):
for n in range(number, 2, -1):
path = os.path.join(basepath, "mtfm{}".format(n))
if os.path.exists(path):
for entry in os.scandir(path):
# if entry.is_file:
return path
if os.path.exists(basepath):
return basepath
raise FileNotFoundError("Can't find results at path {}".format(basepath))
COLOURS = ['red',
'orangered',
'darkorange',
'green',
'blue',
'darkviolet',
'deeppink',
'black']
NICECOLOURS = ['red',
'green',
'blue',
'darkviolet']
class Calibrator:
def __init__(self):
self.calibrations = []
self.averaged = None
self.used_calibration = False
def add_focusset(self, focusset):
self.calibrations.append((focusset.exif, focusset.build_calibration(fstop=None, opt_freq=AUC, plot=False, writetofile=False,use_centre=False)))
if focusset.use_calibration:
self.used_calibration = True
def average_calibrations(self, absolute=False, plot=True, trim=None):
if len(self.calibrations) == 0:
raise ValueError("No Calibrations!")
exifs, tups = zip(*self.calibrations)
datas, diffs, cals = zip(*tups)
data_stack = np.vstack(datas)
diff_stack = np.vstack(diffs)
if absolute:
stack = diff_stack - data_stack
invert = False
else:
stack = diff_stack / data_stack
invert = self.used_calibration
if trim is None:
trim = not self.used_calibration
if invert:
if absolute:
stack = - stack
else:
stack = 1 / stack
if trim:
length = int(len(self.calibrations) * 0.7)
else:
length = len(self.calibrations)
aucs = stack[:, :30].mean(axis=1)
sortorder = np.argsort(aucs)
use_order = sortorder[:length]
sortedstack = stack[use_order, :]
weights = np.linspace(1.0, 0, len(sortedstack))
averaged = np.average(sortedstack, axis=0, weights=weights)
sortedcallist = []
sortedexif = []
for arg in use_order:
sortedcallist.append(self.calibrations[arg])
sortedexif.append(exifs[arg])
print("Averaged {} calibrations".format(len(sortedstack)))
order = 0
colour = 0
plt.plot(RAW_SFR_FREQUENCIES[:len(averaged)], averaged, '-', label="Average", color='black')
for exif, line in zip(sortedexif, sortedstack):
# if exif.aperture != 11.0:
# continue
color = 'grey'
print("Line", exif.summary)
print(line)
if exif.aperture > 5.5:
color = 'red'
if exif.aperture > 7.9:
color = 'green'
if exif.aperture > 10.9:
color = 'blue'
if exif.aperture > 15.0:
color = 'magenta'
print(exif.aperture, color)
color = (COLOURS*2)[colour]
if 1 or order:
plt.plot(RAW_SFR_FREQUENCIES[:len(line)], line, '-', label=exif.summary, alpha=0.6, color=color)
colour += 1
else:
plt.plot(RAW_SFR_FREQUENCIES[:len(line)], line, '-', label=exif.summary, alpha=0.8, color=color)
order = (order + 1) % 2
plt.legend()
if absolute:
plt.ylim(-0.15, 0.15)
else:
plt.ylim(0, 1.3)
plt.xlabel("Spatial Frequency (cy/px)")
plt.xlim(0, 0.5)
if invert:
plt.title("Lens MTF vs Diffraction MTF for EXIF F/ number")
if absolute:
plt.ylabel("MTF Error (Inverted)")
else:
plt.ylabel("Relative MTF")
else:
plt.title("Gain required for Lens MTF to match expected diffraction MTF from EXIF")
if absolute:
plt.ylabel("MTF Error")
else:
plt.ylabel("Gain")
plt.hlines([1.0], 0, 0.5, linestyles='--', alpha=0.5)
plt.grid()
plt.show()
self.averaged = averaged
def write_calibration(self):
if self.used_calibration:
raise ValueError("Existing calibration was used in at least one FocusSet, run without calibration")
with open("calibration.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|')
csvwriter.writerow(list(RAW_SFR_FREQUENCIES[:len(self.averaged)]))
csvwriter.writerow(list(self.averaged))
print("Calibration written!")
with open("photopic.csv", 'r') as photopic_file:
reader = csv.reader(photopic_file, delimiter=',', quotechar='|')
waves, mags = zip(*reader)
photopic_fn = interpolate.InterpolatedUnivariateSpline([float(_) for _ in waves], [float(_) for _ in mags], k=1)
# plotfreqs = np.linspace(400, 700, 50)
# plt.plot(plotfreqs, photopic_fn(plotfreqs))
# plt.show()
def convert_complex(tup, type):
if type == COMPLEX_CARTESIAN_REAL_TUPLE:
return tup
if type == COMPLEX_CARTESIAN:
return tup[0] + 1j * tup[1]
if type == COMPLEX_POLAR_TUPLE:
r, i = tup
return (r**2 + i**2)**0.5, np.angle(r + 1j * i)
def convert_complex_from_polar(tup, type):
if type == COMPLEX_CARTESIAN_REAL_TUPLE:
return tup[0] * np.cos(tup[1]), tup[0] * np.sin(tup[1])
if type == COMPLEX_CARTESIAN:
return tup[0] * np.exp(1j * tup[1])
if type == COMPLEX_POLAR_TUPLE:
return tup
def tryfloat(inp):
try:
return float(inp)
except ValueError:
return inp
class FocusSetData:
def __init__(self):
self.merged_mtf_values = None
self.sag_mtf_values = None
self.mer_mtf_values = None
self.mtf_means = None
# self.focus_values = None
self.max_pos = None
self.weights = None
self.exif = None
self.cauchy_peak_x = None
self.x_loc = None
self.y_loc = None
self.hints = {}
self.wavefront_data = [("", {})]
def get_wavefront_data_path(self, seed="less"):
try:
return "wavefront_results/Seed{}/f{:.2f}/".format(seed, self.exif.aperture)
except AttributeError:
return "wavefront_results/Seed{}/f{:.2f}/".format(seed, 0)
D50 = {
380: 24.875289,
385: 27.563481,
390: 30.251674,
395: 40.040332,
400: 49.828991,
405: 53.442452,
410: 57.055912,
415: 58.804446,
420: 60.552981,
425: 59.410306,
430: 58.267630,
435: 66.782105,
440: 75.296579,
445: 81.505921,
450: 87.715262,
455: 89.377806,
460: 91.040350,
465: 91.389339,
470: 91.738329,
475: 93.587777,
480: 95.437226,
485: 93.832642,
490: 92.228058,
495: 94.083274,
500: 95.938491,
505: 96.364129,
510: 96.789768,
515: 97.020168,
520: 97.250568,
525: 99.719339,
530: 102.188110,
535: 101.500286,
540: 100.812463,
545: 101.578486,
550: 102.344510,
555: 101.172255,
560: 100.000000,
565: 98.856409,
570: 97.712817,
575: 98.290562,
580: 98.868307,
585: 96.143758,
590: 93.419210,
595: 95.490174,
600: 97.561139,
605: 98.335311,
610: 99.109482,
615: 98.982006,
620: 98.854530,
625: 97.185755,
630: 95.516980,
635: 97.061662,
640: 98.606343,
645: 97.006890,
650: 95.407437,
655: 96.649097,
660: 97.890758,
665: 100.274818,
670: 102.658878,
675: 100.722246,
680: 98.785615,
685: 92.936539,
690: 87.087464,
695: 89.179124,
700: 91.270785,
705: 91.925918,
710: 92.581051,
715: 84.591223,
720: 76.601396,
725: 81.418425,
730: 86.235455,
735: 89.262560,
740: 92.289664,
745: 85.138388,
750: 77.987113,
755: 67.745912,
760: 57.504710,
765: 70.080157,
770: 82.655604,
775: 80.341321,
780: 78.027038}
nms, spds = zip(*D50.items())
d50_interpolator = interpolate.InterpolatedUnivariateSpline(np.array(nms) * 1e-3, spds, k=1)
# import cupy
# from cupyx.scipy import fftpack
def get_good_fft_sizes():
_all = []
_upto = 2048
_factors = [2, 3, 5]
_power_lst = []
for factor in _factors:
powers = np.arange(-1, int(np.log(_upto) / np.log(factor) + 1.1))
_power_lst.append(powers)
_power_lst = [np.arange(-1, 14), [-1,0,1,2,3,4,5,6], [-1,0,1,2,3]]
print(_power_lst)
mesh = np.meshgrid(*_power_lst)
for powers in zip(*[_.flatten() for _ in mesh]):
sum = 1
for power, factor in zip(powers, _factors):
# print(factor, power)
if power != -1:
sum *= factor ** power
# print(sum)
_all.append(sum)
# print()
unique = np.unique(_all)
unique = unique[unique <= _upto]
uniquebig = unique[unique >= 64]
for _ in range(2):
unique_worth_it = []
best_time = np.inf
for size in np.flip(uniquebig):
if size % 2 == 1:
continue
# arr = cupy.ones((size, size)) * 0.2 + 0.1j
if size == _upto:
runtimes = 3
else:
runtimes = 2
for _ in range(runtimes):
reps = int(100 * (2048 + 256)**2 / (size+256)**2)
# time = timeit.timeit("ndimage.affine_transform(cupy.abs(fftpack.fft(arr))**2, transform, offset, order=1)", number=reps,
# setup="from cupyx.scipy import fftpack, ndimage; import cupy;"
# "transform = cupy.array([[1.01,0.01],[0.99, -0.01]]);offset=0.01;"
# "arr = cupy.ones(({},{}), dtype='complex128') * 0.2 + 0.1j".format(size, size)) / reps * 1000
reps = int(2 * (2048 + 256)**2 / (size+256)**2)
# time = timeit.timeit("ndimage.affine_transform(numpy.abs(fftpack.fft(arr))**2, transform, offset, order=1)", number=reps,
# setup="from scipy import fftpack, ndimage; import numpy;"
# "transform = numpy.array([[1.01,0.01],[0.99, -0.01]]);offset=0.01;"
# "arr = numpy.ones(({},{}), dtype='complex128') * 0.2 + 0.1j".format(size, size)) / reps * 1000
time = timeit.timeit("cupy.abs(fftpack.fft(arr))**2", number=reps,
setup="from cupyx.scipy import fftpack, ndimage; import cupy;"
"transform = cupy.array([[1.01,0.01],[0.99, -0.01]]);offset=0.01;"
"arr = cupy.ones(({},{}), dtype='complex128') * 0.2 + 0.1j".format(size, size)) / reps * 1000
# time = timeit.timeit("numpy.abs(fftpack.fft(arr))**2", number=reps,
# setup="from scipy import fftpack, ndimage; import numpy;"
# "transform = numpy.array([[1.01,0.01],[0.99, -0.01]]);offset=0.01;"
# "arr = numpy.ones(({},{}), dtype='complex128') * 0.2 + 0.1j".format(size, size)) / reps * 1000
print("FFT {}, {}s".format(size, time))
if time < best_time:
print("Worth it!")
best_time = time
unique_worth_it.append(size)
print(repr(np.array(unique_worth_it)))
return uniquebig
# CUDA_GOOD_FFT_SIZES = get_good_fft_sizes()
# exit()
CUDA_GOOD_FFT_SIZES = np.flip(np.array([2048, 2000, 1944, 1920, 1800, 1728, 1620, 1600, 1536, 1500, 1458,
1440, 1350, 1296, 1280, 1200, 1152, 1080, 1024, 1000, 972, 960,
900, 864, 810, 800, 768, 750, 720, 648, 640, 600, 576,
540, 512, 486, 480, 450, 432, 400, 384, 324, 320, 300,
288, 270, 256, 216, 160, 144, 128, 96, 64]))
CPU_GOOD_FFT_SIZES = np.flip(np.array([2048, 2000, 1944, 1800, 1728, 1620, 1536, 1500, 1458, 1440, 1350,
1296, 1200, 1152, 1080, 1024, 1000, 972, 900, 864, 810, 768,
750, 720, 648, 640, 600, 576, 540, 512, 500, 486, 480,
450, 432, 400, 384, 360, 324, 320, 300, 288, 270, 256,
250, 240, 216, 200, 192, 180, 162, 160, 150, 144, 128,
120, 108, 100, 96, 90, 80, 72, 64]))
# CUDA_GOOD_FFT_SIZES = np.array((768,))
# CPU_GOOD_FFT_SIZES = np.array((256,))
class NoPhaseData(Exception):
pass
class InvalidFrequency(Exception):
pass
def _norm_phase_and_magnitude(r, i, x, inc_neg_freqs=False, return_type=COMPLEX_CARTESIAN, plot=False):
"""
Normalises complex phase in array
Zero frequency is assumed to be at index 0 (ie. unshifted)
:param r: Real component
:param i: Imaginary component (as real float)
:param x: Frequencies
:param inc_neg_freqs: Includes second half of FFT with neg. frequencies
:param return_type: default COMPLEX_CARTESIAN (ie. real + imag * 1j)
:return: Normalised result
"""
# def custom_unwrap(pha):
if not inc_neg_freqs:
meanlen = len(x)
else:
meanlen = int(len(x) / 2)
mag = (r ** 2 + i ** 2) ** 0.5
phase = np.unwrap(np.angle(r + i*1j))
weights = mag[:meanlen] * np.linspace(1,0,meanlen)
weights = np.zeros(meanlen)
# weights[1] = 1
weights = mag[:meanlen] ** 2
meanphase = np.average(phase[:meanlen], weights=weights)
mean_x = np.average(x[:meanlen], weights=weights)
phase_shift = - (meanphase / mean_x) * x
if plot:
oldphase = phase.copy()
phase += phase_shift
if inc_neg_freqs and 1:
phase[meanlen:] = -np.flip(phase[:meanlen])
# new_meanphase = np.average(phase[:meanlen], weights=weights)
# if new_meanphase < 0:
# phase *= -1
if x[0] == 0:
mag /= mag[0]
if plot:
plotx = x[:meanlen]
if inc_neg_freqs and 0:
s = np.fft.fftshift
else:
s = lambda _: _
fig, (a1, a2) = plt.subplots(1,2)
a1.plot(x, s(r), label='real')
a1.plot(x, s(i), label='imag')
# a1.plot(plotx, weights, label='weights')
a1.plot(x, s(mag), label='mag')
a2.plot(x, s(oldphase), label='oldphase')
a2.plot(x, s(phase), label='newphase')
a2.plot(x, s(phase_shift), label="phaseshift")
nr, ni = convert_complex_from_polar((mag, phase), COMPLEX_CARTESIAN_REAL_TUPLE)
a1.plot(x, s(nr), label="new real")
a1.plot(x, s(ni), label="new imag")
a1.legend()
a2.legend()
plt.show()
return convert_complex_from_polar((mag, phase), return_type)
def ___test_phase_normalisation():
a = fastgauss(np.arange(64)**2, 1.0, 32**2, 14**2)
b = np.flip(fastgauss(np.arange(64)**2, 1.0, 32**2, 14**2))
a /= a.max()
b /= b.max()
a = np.roll(a, -3)
b = np.roll(b, 7)
ft = np.fft.fft(np.fft.fftshift(a))
ft_b = np.fft.fft(np.fft.fftshift(b))
ftr, fti = normalised_centreing_fft(ft.real, ft.imag, np.arange(64), return_type=COMPLEX_CARTESIAN_REAL_TUPLE, inc_neg_freqs=True, plot=True)
ftr_b, fti_b = normalised_centreing_fft(ft_b.real, ft_b.imag, np.arange(64), return_type=COMPLEX_CARTESIAN_REAL_TUPLE, inc_neg_freqs=True, plot=True)
plt.plot(a, '--', color="green", alpha=0.5)
plt.plot(b, '--', color="gray", alpha=0.5)
plt.plot(ftr[:16], color="red", alpha=0.5)
plt.plot(fti[:16], color="purple", alpha=0.5)
plt.plot(ftr_b[:16], color="orange", alpha=0.5)
plt.plot(fti_b[:16], color="blue", alpha=0.5)
# plt.plot(ft.real[:16], '--', color="red", alpha=0.5)
# plt.plot(ft.imag[:16], '--', color="purple", alpha=0.5)
newgauss = np.fft.fftshift(np.fft.ifft(ftr + 1j * fti))
newgauss_b = np.fft.fftshift(np.fft.ifft(ftr_b + 1j * fti_b))
plt.plot(newgauss.real / newgauss.real.max(), color="green", alpha=0.5)
plt.plot(newgauss_b.real / newgauss_b.real.max(), color="black", alpha=0.5)
plt.show()
def normalised_centreing_fft(y, x=None, return_type=COMPLEX_CARTESIAN, engine=np, fftpack=None, plot=False):
"""
Normalises complex wrapped_phase in array
Zero frequency is assumed to be at index 0 (ie. unshifted)
:param x: x-axis
:param y: input to fft
:param return_type: default COMPLEX_CARTESIAN (ie. real + imag * 1j)
:return: Normalised result
"""
if x is None:
x = engine.arange(len(y))
if fftpack is None:
fftpack = scipyfftpack
yzero = (y == 0).sum() == len(y)
if yzero:
return convert_complex((np.zeros_like(x), np.zeros_like(x)), type=return_type)
if y.sum() == 0:
mid = x.mean()
else:
mid = (x * y).sum() / y.sum()
ftr = fftpack.fft(engine.fft.fftshift(y))
ftr /= abs(ftr[0])
meanlen = int(len(x) / 2)
mag = abs(ftr)
phase = engine.angle(ftr)
phase_shift = (mid - meanlen) * x
if plot:
oldphase = phase.copy()
phase += phase_shift * engine.pi * 2 / len(x)
phase[meanlen:] = -engine.flip(phase[:meanlen], axis=0)
if plot:
plotx = x[:meanlen]
if 0:
s = engine.fft.fftshift
else:
s = lambda _: _
fig, (a1, a2) = plt.subplots(1,2)
a1.plot(x, s(ftr.real), label='real')
a1.plot(x, s(ftr.imag), label='imag')
# a1.plot(plotx, weights, label='weights')
a1.plot(x, s(mag), label='mag')
a2.plot(x, s(oldphase), label='oldphase')
a2.plot(x, s(phase), label='oldwrappedphase')
# a2.plot(x, s(wrapped_phase), label='newphase')
a2.plot(x, s(phase_shift), label="phaseshift")
nr, ni = convert_complex_from_polar((mag, phase), COMPLEX_CARTESIAN_REAL_TUPLE)
a1.plot(x, s(nr), label="new real")
a1.plot(x, s(ni), label="new imag")
a1.legend()
a2.legend()
plt.show()
return convert_complex_from_polar((mag, phase), return_type)
def _test_phase_normalisation():
a = fastgauss(np.arange(64)**2, 1.0, 32**2, 14**2)
# a = fastgauss(np.arange(64), 1.0, 32, 5)
b = fastgauss(np.arange(64)**2, 1.0, 32**2, 14**2)
# b = np.flip(fastgauss(np.arange(64), 1.0, 32, 5))
a /= a.max()
b /= b.max()
a = np.roll(a, -7)
b = np.roll(b, 4)
ftr, fti = normalised_centreing_fft(np.arange(64), a, return_type=COMPLEX_CARTESIAN_REAL_TUPLE, plot=True)
ftr_b, fti_b = normalised_centreing_fft(np.arange(64), b, return_type=COMPLEX_CARTESIAN_REAL_TUPLE, plot=True)
plt.plot(a, '--', color="green", alpha=0.5)
plt.plot(b, '--', color="gray", alpha=0.5)
plt.plot(ftr[:16], color="red", alpha=0.5)
plt.plot(fti[:16], color="purple", alpha=0.5)
plt.plot(ftr_b[:16], color="orange", alpha=0.5)
plt.plot(fti_b[:16], color="blue", alpha=0.5)
# plt.plot(ft.real[:16], '--', color="red", alpha=0.5)
# plt.plot(ft.imag[:16], '--', color="purple", alpha=0.5)
newgauss = np.fft.fftshift(np.fft.ifft(ftr + 1j * fti))
newgauss_b = np.fft.fftshift(np.fft.ifft(ftr_b + 1j * fti_b))
plt.plot(newgauss.real / newgauss.real.max(), color="green", alpha=0.5)
plt.plot(newgauss_b.real / newgauss_b.real.max(), color="black", alpha=0.5)
plt.show()
def __test_normalisation2():
x = np.arange(64)
# ga = np.roll(fastgauss(np.arange(64)**2, 1.0, 32**2, 14**2), -2)
# gb = np.roll(fastgauss(np.arange(64)**2, 1.0, 32**2, 14**2), 3)
ga = np.roll(fastgauss(np.arange(64), 1.0, 32, 3.5), -18)
gb = np.roll(fastgauss(np.arange(64), 1.0, 32, 3.5), 2)
print((x*ga).sum() / ga.sum())
print((x*gb).sum() / gb.sum())
f, (ax1, ax2) = plt.subplots(1,2)
ax1.plot(x, ga, label="ga")
ax1.plot(x, gb, label="gb")
ffta = np.fft.fft( | np.fft.fftshift(ga) | numpy.fft.fftshift |
"""
Model predictive control sample code without modeling tool (cvxpy)
author: <NAME>
"""
import cvxpy
import numpy as np
import matplotlib.pyplot as plt
import cvxopt
from cvxopt import matrix
import scipy.linalg
DEBUG_ = False
def use_modeling_tool(A, B, N, Q, R, P, x0, umax=None, umin=None, xmin=None, xmax=None):
"""
solve MPC with modeling tool for test
"""
(nx, nu) = B.shape
# mpc calculation
x = cvxpy.Variable(nx, N + 1)
u = cvxpy.Variable(nu, N)
costlist = 0.0
constrlist = []
for t in range(N):
costlist += 0.5 * cvxpy.quad_form(x[:, t], Q)
costlist += 0.5 * cvxpy.quad_form(u[:, t], R)
constrlist += [x[:, t + 1] == A * x[:, t] + B * u[:, t]]
if xmin is not None:
constrlist += [x[:, t] >= xmin]
if xmax is not None:
constrlist += [x[:, t] <= xmax]
costlist += 0.5 * cvxpy.quad_form(x[:, N], P) # terminal cost
if xmin is not None:
constrlist += [x[:, N] >= xmin]
if xmax is not None:
constrlist += [x[:, N] <= xmax]
prob = cvxpy.Problem(cvxpy.Minimize(costlist), constrlist)
prob.constraints += [x[:, 0] == x0] # inital state constraints
if umax is not None:
prob.constraints += [u <= umax] # input constraints
if umin is not None:
prob.constraints += [u >= umin] # input constraints
prob.solve(verbose=True)
return x.value, u.value
def opt_mpc_with_input_const(A, B, N, Q, R, P, x0, umax=None, umin=None):
"""
optimize model predictive control only with input constraints
(if you want to solve a problem with state constraints, you can use opt_mpc_with_state_const())
return
x: state
u: input
"""
(nx, nu) = B.shape
# calc AA
Ai = A
AA = Ai
for i in range(2, N + 1):
Ai = A * Ai
AA = np.vstack((AA, Ai))
# print(AA)
# calc BB
AiB = B
BB = np.kron(np.eye(N), AiB)
for i in range(1, N):
AiB = A * AiB
BB += np.kron(np.diag(np.ones(N - i), -i), AiB)
# print(BB)
RR = np.kron(np.eye(N), R)
QQ = scipy.linalg.block_diag(np.kron(np.eye(N - 1), Q), P)
H = (BB.T * QQ * BB + RR)
# print(H)
gx0 = BB.T * QQ * AA * x0
# print(gx0)
P = matrix(H)
q = matrix(gx0)
if umax is None and umin is None:
sol = cvxopt.solvers.qp(P, q)
# print(sol)
else:
G = np.zeros((0, nu * N))
h = np.zeros((0, 1))
if umax is not None:
tG = np.eye(N * nu)
th = np.kron(np.ones((N * nu, 1)), umax)
G = np.vstack([G, tG])
h = np.vstack([h, th])
if umin is not None:
tG = np.eye(N * nu) * -1.0
th = np.kron(np.ones((N * nu, 1)), umin * -1.0)
G = np.vstack([G, tG])
h = np.vstack([h, th])
G = matrix(G)
h = matrix(h)
sol = cvxopt.solvers.qp(P, q, G, h)
u = np.matrix(sol["x"])
# recover x
xx = AA * x0 + BB * u
x = np.vstack((x0.T, xx.reshape(N, nx)))
return x, u
def generate_inequalities_constraints_mat(N, nx, nu, xmin, xmax, umin, umax):
"""
generate matrices of inequalities constrints
return G, h
"""
G = np.zeros((0, (nx + nu) * N))
h = np.zeros((0, 1))
if umax is not None:
tG = np.hstack([np.eye(N * nu), np.zeros((N * nu, nx * N))])
th = np.kron(np.ones((N * nu, 1)), umax)
G = np.vstack([G, tG])
h = np.vstack([h, th])
if umin is not None:
tG = np.hstack([np.eye(N * nu) * -1.0, np.zeros((N * nu, nx * N))])
th = np.kron(np.ones((N, 1)), umin * -1.0)
G = np.vstack([G, tG])
h = np.vstack([h, th])
if xmax is not None:
tG = np.hstack([np.zeros((N * nx, nu * N)), np.eye(N * nx)])
th = np.kron(np.ones((N, 1)), xmax)
G = np.vstack([G, tG])
h = np.vstack([h, th])
if xmin is not None:
tG = np.hstack([np.zeros((N * nx, nu * N)), np.eye(N * nx) * -1.0])
th = np.kron(np.ones((N, 1)), xmin * -1.0)
G = np.vstack([G, tG])
h = np.vstack([h, th])
return G, h
def opt_mpc_with_state_constr(A, B, N, Q, R, P, x0, xmin=None, xmax=None, umax=None, umin=None):
"""
optimize MPC problem with state and (or) input constraints
return
x: state
u: input
"""
(nx, nu) = B.shape
H = scipy.linalg.block_diag(np.kron(np.eye(N), R), np.kron(
np.eye(N - 1), Q), np.eye(P.shape[0]))
# print(H)
# calc Ae
Aeu = np.kron(np.eye(N), -B)
# print(Aeu)
# print(Aeu.shape)
Aex = scipy.linalg.block_diag(np.eye((N - 1) * nx), P)
Aex -= np.kron(np.diag([1.0] * (N - 1), k=-1), A)
# print(Aex)
# print(Aex.shape)
Ae = np.hstack((Aeu, Aex))
# print(Ae.shape)
# calc be
be = np.vstack((A, np.zeros(((N - 1) * nx, nx)))) * x0
# print(be)
np.set_printoptions(precision=3)
# print(H.shape)
# print(H)
# print(np.zeros((N * nx + N * nu, 1)))
# print(Ae)
# print(be)
# === optimization ===
P = matrix(H)
q = matrix(np.zeros((N * nx + N * nu, 1)))
A = matrix(Ae)
b = matrix(be)
if umax is None and umin is None:
sol = cvxopt.solvers.qp(P, q, A=A, b=b)
else:
G, h = generate_inequalities_constraints_mat(
N, nx, nu, xmin, xmax, umin, umax)
# print(G)
# print(h)
G = matrix(G)
h = matrix(h)
sol = cvxopt.solvers.qp(P, q, G, h, A=A, b=b)
# print(sol)
fx = np.matrix(sol["x"])
# print(fx)
u = fx[0:N * nu].reshape(N, nu).T
x = fx[-N * nx:].reshape(N, nx).T
x = np.hstack((x0, x))
# print(x)
# print(u)
return x, u
def test1():
print("start!!")
A = np.matrix([[0.8, 1.0], [0, 0.9]])
# print(A)
B = np.matrix([[-1.0], [2.0]])
# print(B)
(nx, nu) = B.shape
# print(nx, nu)
N = 10 # number of horizon
Q = np.eye(nx)
# print(Q)
R = np.eye(nu)
# print(R)
P = np.eye(nx)
# print(P)
# umax = 0.7
x0 = np.matrix([[1.0], [2.0]]) # init state
x, u = use_modeling_tool(A, B, N, Q, R, P, x0)
rx1 = np.array(x[0, :]).flatten()
rx2 = np.array(x[1, :]).flatten()
ru = np.array(u[0, :]).flatten()
if DEBUG_:
flg, ax = plt.subplots(1)
plt.plot(rx1, label="x1")
plt.plot(rx2, label="x2")
plt.plot(ru, label="u")
plt.legend()
plt.grid(True)
x, u = opt_mpc_with_input_const(A, B, N, Q, R, P, x0)
x1 = np.array(x[:, 0]).flatten()
x2 = np.array(x[:, 1]).flatten()
u = np.array(u).flatten()
if DEBUG_:
# flg, ax = plt.subplots(1)
plt.plot(x1, '*r', label="x1")
plt.plot(x2, '*b', label="x2")
plt.plot(u, '*k', label="u")
plt.legend()
plt.grid(True)
plt.show()
test_output_check(rx1, rx2, ru, x1, x2, u)
def test2():
print("start!!")
A = np.matrix([[0.8, 1.0], [0, 0.9]])
B = np.matrix([[-1.0], [2.0]])
(nx, nu) = B.shape
N = 10 # number of horizon
Q = np.eye(nx)
R = np.eye(nu)
P = np.eye(nx)
umax = 0.7
umin = -0.7
x0 = np.matrix([[1.0], [2.0]]) # init state
x, u = use_modeling_tool(A, B, N, Q, R, P, x0, umax=umax, umin=umin)
# x, u = use_modeling_tool(A, B, N, Q, R, P, x0, umin=umin)
rx1 = np.array(x[0, :]).flatten()
rx2 = np.array(x[1, :]).flatten()
ru = np.array(u[0, :]).flatten()
if DEBUG_:
flg, ax = plt.subplots(1)
plt.plot(rx1, label="x1")
plt.plot(rx2, label="x2")
plt.plot(ru, label="u")
plt.legend()
plt.grid(True)
x, u = opt_mpc_with_input_const(A, B, N, Q, R, P, x0, umax=umax, umin=umin)
x1 = np.array(x[:, 0]).flatten()
x2 = np.array(x[:, 1]).flatten()
u = np.array(u).flatten()
if DEBUG_:
# flg, ax = plt.subplots(1)
plt.plot(x1, '*r', label="x1")
plt.plot(x2, '*b', label="x2")
plt.plot(u, '*k', label="u")
plt.legend()
plt.grid(True)
plt.show()
test_output_check(rx1, rx2, ru, x1, x2, u)
def test3():
print("start!!")
A = np.matrix([[0.8, 1.0], [0, 0.9]])
B = np.matrix([[-1.0], [2.0]])
(nx, nu) = B.shape
N = 10 # number of horizon
Q = np.eye(nx)
R = np.eye(nu)
P = np.eye(nx)
umax = 0.7
umin = -0.7
x0 = np.matrix([[1.0], [2.0]]) # init state
x, u = use_modeling_tool(A, B, N, Q, R, P, x0, umax=umax, umin=umin)
rx1 = np.array(x[0, :]).flatten()
rx2 = np.array(x[1, :]).flatten()
ru = np.array(u[0, :]).flatten()
if DEBUG_:
flg, ax = plt.subplots(1)
plt.plot(rx1, label="x1")
plt.plot(rx2, label="x2")
plt.plot(ru, label="u")
plt.legend()
plt.grid(True)
x, u = opt_mpc_with_state_constr(
A, B, N, Q, R, P, x0, umax=umax, umin=umin)
x1 = np.array(x[0, :]).flatten()
x2 = np.array(x[1, :]).flatten()
u = np.array(u).flatten()
if DEBUG_:
# flg, ax = plt.subplots(1)
plt.plot(x1, '*r', label="x1")
plt.plot(x2, '*b', label="x2")
plt.plot(u, '*k', label="u")
plt.legend()
plt.grid(True)
plt.show()
test_output_check(rx1, rx2, ru, x1, x2, u)
def test4():
print("start!!")
A = np.matrix([[0.8, 1.0], [0, 0.9]])
B = np.matrix([[-1.0], [2.0]])
(nx, nu) = B.shape
N = 10 # number of horizon
Q = np.eye(nx)
R = np.eye(nu)
P = np.eye(nx)
x0 = np.matrix([[1.0], [2.0]]) # init state
x, u = use_modeling_tool(A, B, N, Q, R, P, x0)
rx1 = np.array(x[0, :]).flatten()
rx2 = np.array(x[1, :]).flatten()
ru = np.array(u[0, :]).flatten()
if DEBUG_:
flg, ax = plt.subplots(1)
plt.plot(rx1, label="x1")
plt.plot(rx2, label="x2")
plt.plot(ru, label="u")
plt.legend()
plt.grid(True)
x, u = opt_mpc_with_state_constr(A, B, N, Q, R, P, x0)
x1 = np.array(x[0, :]).flatten()
x2 = np.array(x[1, :]).flatten()
u = np.array(u).flatten()
if DEBUG_:
# flg, ax = plt.subplots(1)
plt.plot(x1, '*r', label="x1")
plt.plot(x2, '*b', label="x2")
plt.plot(u, '*k', label="u")
plt.legend()
plt.grid(True)
test_output_check(rx1, rx2, ru, x1, x2, u)
if DEBUG_:
plt.show()
def test5():
print("start!!")
A = np.matrix([[0.8, 1.0], [0, 0.9]])
B = np.matrix([[-1.0], [2.0]])
(nx, nu) = B.shape
N = 10 # number of horizon
Q = np.eye(nx)
R = np.eye(nu)
P = np.eye(nx)
x0 = np.matrix([[1.0], [2.0]]) # init state
umax = 0.7
x, u = use_modeling_tool(A, B, N, Q, R, P, x0, umax=umax)
rx1 = np.array(x[0, :]).flatten()
rx2 = np.array(x[1, :]).flatten()
ru = np.array(u[0, :]).flatten()
if DEBUG_:
flg, ax = plt.subplots(1)
plt.plot(rx1, label="x1")
plt.plot(rx2, label="x2")
plt.plot(ru, label="u")
plt.legend()
plt.grid(True)
x, u = opt_mpc_with_state_constr(A, B, N, Q, R, P, x0, umax=umax)
x1 = np.array(x[0, :]).flatten()
x2 = np.array(x[1, :]).flatten()
u = np.array(u).flatten()
if DEBUG_:
# flg, ax = plt.subplots(1)
plt.plot(x1, '*r', label="x1")
plt.plot(x2, '*b', label="x2")
plt.plot(u, '*k', label="u")
plt.legend()
plt.grid(True)
plt.show()
test_output_check(rx1, rx2, ru, x1, x2, u)
def test6():
print("start!!")
A = np.matrix([[0.8, 1.0], [0, 0.9]])
B = np.matrix([[-1.0], [2.0]])
(nx, nu) = B.shape
N = 10 # number of horizon
Q = np.eye(nx)
R = np.eye(nu)
P = np.eye(nx)
x0 = np.matrix([[1.0], [2.0]]) # init state
umax = 0.7
umin = -0.7
x0 = np.matrix([[1.0], [2.0]]) # init state
xmin = np.matrix([[-3.5], [-0.5]]) # state constraints
xmax = np.matrix([[3.5], [2.0]]) # state constraints
x, u = use_modeling_tool(A, B, N, Q, R, P, x0,
umax=umax, umin=umin, xmin=xmin, xmax=xmax)
rx1 = np.array(x[0, :]).flatten()
rx2 = np.array(x[1, :]).flatten()
ru = np.array(u[0, :]).flatten()
if DEBUG_:
flg, ax = plt.subplots(1)
plt.plot(rx1, label="x1")
plt.plot(rx2, label="x2")
plt.plot(ru, label="u")
plt.legend()
plt.grid(True)
x, u = opt_mpc_with_state_constr(
A, B, N, Q, R, P, x0, umax=umax, umin=umin, xmin=xmin, xmax=xmax)
x1 = np.array(x[0, :]).flatten()
x2 = np.array(x[1, :]).flatten()
u = np.array(u).flatten()
if DEBUG_:
# flg, ax = plt.subplots(1)
plt.plot(x1, '*r', label="x1")
plt.plot(x2, '*b', label="x2")
plt.plot(u, '*k', label="u")
plt.legend()
plt.grid(True)
plt.show()
test_output_check(rx1, rx2, ru, x1, x2, u)
def test7():
print("start!!")
A = np.matrix([[0.8, 1.0], [0, 0.9]])
B = np.matrix([[-1.0], [2.0]])
(nx, nu) = B.shape
N = 3 # number of horizon
Q = np.eye(nx)
R = np.eye(nu)
P = np.eye(nx)
x0 = np.matrix([[1.0], [2.0]]) # init state
umax = 0.7
umin = -0.7
x0 = np.matrix([[1.0], [2.0]]) # init state
# xmin = np.matrix([[-3.5], [-0.5]]) # state constraints
# xmax = np.matrix([[3.5], [2.0]]) # state constraints
x, u = use_modeling_tool(A, B, N, Q, R, P, x0, umax=umax, umin=umin)
# x, u = use_modeling_tool(A, B, N, Q, R, P, x0, umax=umax, umin=umin, xmin=xmin, xmax=xmax)
# x, u = use_modeling_tool(A, B, N, Q, R, P, x0)
rx1 = np.array(x[0, :]).flatten()
rx2 = np.array(x[1, :]).flatten()
ru = | np.array(u[0, :]) | numpy.array |
from __future__ import division, print_function
import vtk
import numpy as np
from vtkplotter import settings
from vtk.util.numpy_support import numpy_to_vtk
import vtkplotter.utils as utils
import vtkplotter.colors as colors
from vtkplotter.actors import Actor, Assembly
import vtkplotter.docs as docs
__doc__ = (
"""
Submodule to generate basic geometric shapes.
"""
+ docs._defs
)
__all__ = [
"Point",
"Points",
"Line",
"Tube",
"Lines",
"Ribbon",
"Arrow",
"Arrows",
"FlatArrow",
"Polygon",
"Rectangle",
"Disc",
"Sphere",
"Spheres",
"Earth",
"Ellipsoid",
"Grid",
"Plane",
"Box",
"Cube",
"Spring",
"Cylinder",
"Cone",
"Pyramid",
"Torus",
"Paraboloid",
"Hyperboloid",
"Text",
"Latex",
"Glyph",
"Tensors",
]
########################################################################
def Point(pos=(0, 0, 0), r=12, c="red", alpha=1):
"""Create a simple point actor."""
if len(pos) == 2:
pos = (pos[0], pos[1], 0)
actor = Points([pos], r, c, alpha)
return actor
def Points(plist, r=5, c="gray", alpha=1):
"""
Build a point ``Actor`` for a list of 2D/3D points.
Both shapes (N, 3) or (3, N) are accepted as input - if N>3.
For very large point clouds a list of colors and alpha can be assigned to each
point in the form `c=[(R,G,B,A), ... ]` where `0 <= R < 256, ... 0 <= A < 256`.
:param float r: point radius.
:param c: color name, number, or list of [R,G,B] colors of same length as plist.
:type c: int, str, list
:param float alpha: transparency in range [0,1].
|manypoints.py|_ |lorenz.py|_
|lorenz|
"""
################ interpret the input format:
n = len(plist)
if n == 0:
return None
elif n == 3: # assume plist is in the format [all_x, all_y, all_z]
if utils.isSequence(plist[0]) and len(plist[0]) > 3:
plist = tuple(zip(plist[0], plist[1], plist[2]))
elif n == 2: # assume plist is in the format [all_x, all_y, 0]
if utils.isSequence(plist[0]) and len(plist[0]) > 3:
plist = tuple(zip(plist[0], plist[1], [0] * len(plist[0])))
if len(plist[0]) == 2: #make it 3d
plist = np.c_[np.array(plist), np.zeros(len(plist))]
################
if ( (utils.isSequence(c) and (len(c) > 3 or len(c[0]) == 4))
or utils.isSequence(alpha)
):
actor = _PointsColors(plist, r, c, alpha)
else:
n = len(plist) # refresh
sourcePoints = vtk.vtkPoints()
sourceVertices = vtk.vtkCellArray()
is3d = len(plist[0]) > 2
if is3d: # its faster
for pt in plist:
aid = sourcePoints.InsertNextPoint(pt)
sourceVertices.InsertNextCell(1)
sourceVertices.InsertCellPoint(aid)
else:
for pt in plist:
aid = sourcePoints.InsertNextPoint(pt[0], pt[1], 0)
sourceVertices.InsertNextCell(1)
sourceVertices.InsertCellPoint(aid)
pd = vtk.vtkPolyData()
pd.SetPoints(sourcePoints)
pd.SetVerts(sourceVertices)
if n == 1: # passing just one point
pd.GetPoints().SetPoint(0, [0, 0, 0])
else:
pd.GetPoints().SetData(numpy_to_vtk(plist, deep=True))
actor = Actor(pd, c, alpha)
actor.GetProperty().SetPointSize(r)
if n == 1:
actor.SetPosition(plist[0])
settings.collectable_actors.append(actor)
return actor
def _PointsColors(plist, r, cols, alpha):
n = len(plist)
if n != len(cols):
colors.printc("~times mismatch in Points() colors", n, len(cols), c=1)
raise RuntimeError()
src = vtk.vtkPointSource()
src.SetNumberOfPoints(n)
src.Update()
vgf = vtk.vtkVertexGlyphFilter()
vgf.SetInputData(src.GetOutput())
vgf.Update()
pd = vgf.GetOutput()
pd.GetPoints().SetData(numpy_to_vtk(plist, deep=True))
ucols = vtk.vtkUnsignedCharArray()
ucols.SetNumberOfComponents(4)
ucols.SetName("pointsRGBA")
if utils.isSequence(alpha):
if len(alpha) != n:
colors.printc("~times mismatch in Points() alphas", n, len(alpha), c=1)
raise RuntimeError()
alphas = alpha
alpha = 1
else:
alphas = (alpha,) * n
if utils.isSequence(cols):
c = None
if len(cols[0]) == 4:
for i in range(n): # FAST
rc,gc,bc,ac = cols[i]
ucols.InsertNextTuple4(rc, gc, bc, ac)
else:
for i in range(n): # SLOW
rc,gc,bc = colors.getColor(cols[i])
ucols.InsertNextTuple4(rc*255, gc*255, bc*255, alphas[i]*255)
else:
c = cols
pd.GetPointData().SetScalars(ucols)
actor = Actor(pd, c, alpha)
actor.mapper.ScalarVisibilityOn()
actor.GetProperty().SetInterpolationToFlat()
actor.GetProperty().SetPointSize(r)
return actor
def Glyph(actor, glyphObj, orientationArray=None,
scaleByVectorSize=False, tol=0, c=None, alpha=1):
"""
At each vertex of a mesh, another mesh - a `'glyph'` - is shown with
various orientation options and coloring.
Color can be specfied as a colormap which maps the size of the orientation
vectors in `orientationArray`.
:param orientationArray: list of vectors, ``vtkAbstractArray``
or the name of an already existing points array.
:type orientationArray: list, str, vtkAbstractArray
:param bool scaleByVectorSize: glyph mesh is scaled by the size of the vectors.
:param float tol: set a minimum separation between two close glyphs
(not compatible with `orientationArray` being a list).
|glyphs.py|_ |glyphs_arrows.py|_
|glyphs| |glyphs_arrows|
"""
cmap = None
# user passing a color map to map orientationArray sizes
if c in list(colors._mapscales.cmap_d.keys()):
cmap = c
c = None
if tol:
actor = actor.clone().clean(tol)
poly = actor.polydata()
# user is passing an array of point colors
if utils.isSequence(c) and len(c) > 3:
ucols = vtk.vtkUnsignedCharArray()
ucols.SetNumberOfComponents(3)
ucols.SetName("glyphRGB")
for col in c:
cl = colors.getColor(col)
ucols.InsertNextTuple3(cl[0]*255, cl[1]*255, cl[2]*255)
poly.GetPointData().SetScalars(ucols)
c = None
if isinstance(glyphObj, Actor):
glyphObj = glyphObj.clean().polydata()
gly = vtk.vtkGlyph3D()
gly.SetInputData(poly)
gly.SetSourceData(glyphObj)
gly.SetColorModeToColorByScalar()
if orientationArray is not None:
gly.OrientOn()
gly.SetScaleFactor(1)
if scaleByVectorSize:
gly.SetScaleModeToScaleByVector()
else:
gly.SetScaleModeToDataScalingOff()
if isinstance(orientationArray, str):
if orientationArray.lower() == "normals":
gly.SetVectorModeToUseNormal()
else: # passing a name
gly.SetInputArrayToProcess(0, 0, 0, 0, orientationArray)
gly.SetVectorModeToUseVector()
elif isinstance(orientationArray, vtk.vtkAbstractArray):
poly.GetPointData().AddArray(orientationArray)
poly.GetPointData().SetActiveVectors("glyph_vectors")
gly.SetInputArrayToProcess(0, 0, 0, 0, "glyph_vectors")
gly.SetVectorModeToUseVector()
elif utils.isSequence(orientationArray) and not tol: # passing a list
actor.addPointVectors(orientationArray, "glyph_vectors")
gly.SetInputArrayToProcess(0, 0, 0, 0, "glyph_vectors")
if cmap:
gly.SetColorModeToColorByVector()
else:
gly.SetColorModeToColorByScalar()
gly.Update()
pd = gly.GetOutput()
gactor = Actor(pd, c, alpha)
if cmap:
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(512)
lut.Build()
for i in range(512):
r, g, b = colors.colorMap(i, cmap, 0, 512)
lut.SetTableValue(i, r, g, b, 1)
gactor.mapper.SetLookupTable(lut)
gactor.mapper.ScalarVisibilityOn()
gactor.mapper.SetScalarModeToUsePointData()
rng = pd.GetPointData().GetScalars().GetRange()
gactor.mapper.SetScalarRange(rng[0], rng[1])
gactor.GetProperty().SetInterpolationToFlat()
settings.collectable_actors.append(gactor)
return gactor
def Tensors(domain, source='ellipsoid', useEigenValues=True, isSymmetric=True,
threeAxes=False, scale=1, maxScale=None, length=None,
c=None, alpha=1):
"""Geometric representation of tensors defined on a domain or set of points.
Tensors can be scaled and/or rotated according to the source at eache input point.
Scaling and rotation is controlled by the eigenvalues/eigenvectors of the symmetrical part
of the tensor as follows:
For each tensor, the eigenvalues (and associated eigenvectors) are sorted
to determine the major, medium, and minor eigenvalues/eigenvectors.
The eigenvalue decomposition only makes sense for symmetric tensors,
hence the need to only consider the symmetric part of the tensor,
which is 1/2*(T+T.transposed()).
:param str source: preset type of source shape
['ellipsoid', 'cylinder', 'cube' or any specified ``Actor``]
:param bool useEigenValues: color source glyph using the eigenvalues or by scalars.
:param bool threeAxes: if `False` scale the source in the x-direction,
the medium in the y-direction, and the minor in the z-direction.
Then, the source is rotated so that the glyph's local x-axis lies
along the major eigenvector, y-axis along the medium eigenvector, and z-axis along the minor.
If `True` three sources are produced, each of them oriented along an eigenvector
and scaled according to the corresponding eigenvector.
:param bool isSymmetric: If `True` each source glyph is mirrored (2 or 6 glyphs will be produced).
The x-axis of the source glyph will correspond to the eigenvector on output.
:param float length: distance from the origin to the tip of the source glyph along the x-axis
:param float scale: scaling factor of the source glyph.
:param float maxScale: clamp scaling at this factor.
|tensors| |tensors.py|_
"""
if 'ellip' in source:
src = vtk.vtkSphereSource()
src.SetPhiResolution(24)
src.SetThetaResolution(12)
elif 'cyl' in source:
src = vtk.vtkCylinderSource()
src.SetResolution(48)
src.CappingOn()
elif source == 'cube':
src = vtk.vtkCubeSource()
else:
src = source.normalize().polydata(False)
src.Update()
tg = vtk.vtkTensorGlyph()
tg.SetInputData(domain.GetMapper().GetInput())
tg.SetSourceData(src.GetOutput())
if c is None:
tg.ColorGlyphsOn()
else:
tg.ColorGlyphsOff()
tg.SetSymmetric(int(isSymmetric))
if length is not None:
tg.SetLength(length)
if useEigenValues:
tg.ExtractEigenvaluesOn()
tg.SetColorModeToEigenvalues()
else:
tg.SetColorModeToScalars()
tg.SetThreeGlyphs(threeAxes)
tg.ScalingOn()
tg.SetScaleFactor(scale)
if maxScale is None:
tg.ClampScalingOn()
maxScale = scale*10
tg.SetMaxScaleFactor(maxScale)
tg.Update()
tgn = vtk.vtkPolyDataNormals()
tgn.SetInputData(tg.GetOutput())
tgn.Update()
return Actor(tgn.GetOutput(), c, alpha)
def Line(p0, p1=None, c="r", alpha=1, lw=1, dotted=False, res=None):
"""
Build the line segment between points `p0` and `p1`.
If `p0` is a list of points returns the line connecting them.
A 2D set of coords can also be passed as p0=[x..], p1=[y..].
:param c: color name, number, or list of [R,G,B] colors.
:type c: int, str, list
:param float alpha: transparency in range [0,1].
:param lw: line width.
:param bool dotted: draw a dotted line
:param int res: number of intermediate points in the segment
"""
# detect if user is passing a 2D ist of points as p0=xlist, p1=ylist:
if len(p0) > 3:
if not utils.isSequence(p0[0]) and not utils.isSequence(p1[0]) and len(p0)==len(p1):
# assume input is 2D xlist, ylist
p0 = list(zip(p0, p1))
p1 = None
# detect if user is passing a list of points:
if utils.isSequence(p0[0]):
ppoints = vtk.vtkPoints() # Generate the polyline
dim = len((p0[0]))
if dim == 2:
for i, p in enumerate(p0):
ppoints.InsertPoint(i, p[0], p[1], 0)
else:
ppoints.SetData(numpy_to_vtk(p0, deep=True))
lines = vtk.vtkCellArray() # Create the polyline.
lines.InsertNextCell(len(p0))
for i in range(len(p0)):
lines.InsertCellPoint(i)
poly = vtk.vtkPolyData()
poly.SetPoints(ppoints)
poly.SetLines(lines)
else: # or just 2 points to link
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(p0)
lineSource.SetPoint2(p1)
if res:
lineSource.SetResolution(res)
lineSource.Update()
poly = lineSource.GetOutput()
actor = Actor(poly, c, alpha)
actor.GetProperty().SetLineWidth(lw)
if dotted:
actor.GetProperty().SetLineStipplePattern(0xF0F0)
actor.GetProperty().SetLineStippleRepeatFactor(1)
actor.base = np.array(p0)
actor.top = np.array(p1)
settings.collectable_actors.append(actor)
return actor
def Lines(startPoints, endPoints=None, c=None, alpha=1, lw=1, dotted=False, scale=1):
"""
Build the line segments between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
:param float scale: apply a rescaling factor to the lengths.
|lines|
.. hint:: |fitspheres2.py|_
"""
if endPoints is not None:
startPoints = list(zip(startPoints, endPoints))
polylns = vtk.vtkAppendPolyData()
for twopts in startPoints:
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(twopts[0])
if scale != 1:
vers = (np.array(twopts[1]) - twopts[0]) * scale
pt2 = np.array(twopts[0]) + vers
else:
pt2 = twopts[1]
lineSource.SetPoint2(pt2)
polylns.AddInputConnection(lineSource.GetOutputPort())
polylns.Update()
actor = Actor(polylns.GetOutput(), c, alpha)
actor.GetProperty().SetLineWidth(lw)
if dotted:
actor.GetProperty().SetLineStipplePattern(0xF0F0)
actor.GetProperty().SetLineStippleRepeatFactor(1)
settings.collectable_actors.append(actor)
return actor
def Tube(points, r=1, c="r", alpha=1, res=12):
"""Build a tube along the line defined by a set of points.
:param r: constant radius or list of radii.
:type r: float, list
:param c: constant color or list of colors for each point.
:type c: float, list
|ribbon.py|_ |tube.py|_
|ribbon| |tube|
"""
ppoints = vtk.vtkPoints() # Generate the polyline
ppoints.SetData(numpy_to_vtk(points, deep=True))
lines = vtk.vtkCellArray()
lines.InsertNextCell(len(points))
for i in range(len(points)):
lines.InsertCellPoint(i)
polyln = vtk.vtkPolyData()
polyln.SetPoints(ppoints)
polyln.SetLines(lines)
tuf = vtk.vtkTubeFilter()
tuf.CappingOn()
tuf.SetNumberOfSides(res)
tuf.SetInputData(polyln)
if utils.isSequence(r):
arr = numpy_to_vtk(np.ascontiguousarray(r), deep=True)
arr.SetName("TubeRadius")
polyln.GetPointData().AddArray(arr)
polyln.GetPointData().SetActiveScalars("TubeRadius")
tuf.SetVaryRadiusToVaryRadiusByAbsoluteScalar()
else:
tuf.SetRadius(r)
usingColScals = False
if utils.isSequence(c) and len(c) != 3:
usingColScals = True
cc = vtk.vtkUnsignedCharArray()
cc.SetName("TubeColors")
cc.SetNumberOfComponents(3)
cc.SetNumberOfTuples(len(c))
for i, ic in enumerate(c):
r, g, b = colors.getColor(ic)
cc.InsertTuple3(i, int(255 * r), int(255 * g), int(255 * b))
polyln.GetPointData().AddArray(cc)
c = None
tuf.Update()
polytu = tuf.GetOutput()
actor = Actor(polytu, c, alpha, computeNormals=0)
actor.phong()
if usingColScals:
actor.mapper.SetScalarModeToUsePointFieldData()
actor.mapper.ScalarVisibilityOn()
actor.mapper.SelectColorArray("TubeColors")
actor.mapper.Modified()
actor.base = np.array(points[0])
actor.top = np.array(points[-1])
settings.collectable_actors.append(actor)
return actor
def Ribbon(line1, line2, c="m", alpha=1, res=(200, 5)):
"""Connect two lines to generate the surface inbetween.
|ribbon| |ribbon.py|_
"""
if isinstance(line1, Actor):
line1 = line1.coordinates()
if isinstance(line2, Actor):
line2 = line2.coordinates()
ppoints1 = vtk.vtkPoints() # Generate the polyline1
ppoints1.SetData(numpy_to_vtk(line1, deep=True))
lines1 = vtk.vtkCellArray()
lines1.InsertNextCell(len(line1))
for i in range(len(line1)):
lines1.InsertCellPoint(i)
poly1 = vtk.vtkPolyData()
poly1.SetPoints(ppoints1)
poly1.SetLines(lines1)
ppoints2 = vtk.vtkPoints() # Generate the polyline2
ppoints2.SetData(numpy_to_vtk(line2, deep=True))
lines2 = vtk.vtkCellArray()
lines2.InsertNextCell(len(line2))
for i in range(len(line2)):
lines2.InsertCellPoint(i)
poly2 = vtk.vtkPolyData()
poly2.SetPoints(ppoints2)
poly2.SetLines(lines2)
# build the lines
lines1 = vtk.vtkCellArray()
lines1.InsertNextCell(poly1.GetNumberOfPoints())
for i in range(poly1.GetNumberOfPoints()):
lines1.InsertCellPoint(i)
polygon1 = vtk.vtkPolyData()
polygon1.SetPoints(ppoints1)
polygon1.SetLines(lines1)
lines2 = vtk.vtkCellArray()
lines2.InsertNextCell(poly2.GetNumberOfPoints())
for i in range(poly2.GetNumberOfPoints()):
lines2.InsertCellPoint(i)
polygon2 = vtk.vtkPolyData()
polygon2.SetPoints(ppoints2)
polygon2.SetLines(lines2)
mergedPolyData = vtk.vtkAppendPolyData()
mergedPolyData.AddInputData(polygon1)
mergedPolyData.AddInputData(polygon2)
mergedPolyData.Update()
rsf = vtk.vtkRuledSurfaceFilter()
rsf.CloseSurfaceOff()
rsf.SetRuledModeToResample()
rsf.SetResolution(res[0], res[1])
rsf.SetInputData(mergedPolyData.GetOutput())
rsf.Update()
actor = Actor(rsf.GetOutput(), c=c, alpha=alpha)
settings.collectable_actors.append(actor)
return actor
def FlatArrow(line1, line2, c="m", alpha=1, tipSize=1, tipWidth=1):
"""Build a 2D arrow in 3D space by joining two close lines.
|flatarrow| |flatarrow.py|_
"""
if isinstance(line1, Actor):
line1 = line1.coordinates()
if isinstance(line2, Actor):
line2 = line2.coordinates()
sm1, sm2 = np.array(line1[-1]), np.array(line2[-1])
v = (sm1-sm2)/3*tipWidth
p1 = sm1+v
p2 = sm2-v
pm1 = (sm1+sm2)/2
pm2 = (np.array(line1[-2])+np.array(line2[-2]))/2
pm12 = pm1-pm2
tip = pm12/np.linalg.norm(pm12)* | np.linalg.norm(v) | numpy.linalg.norm |
import numpy
from cryspy import file_to_globaln
from cryspy.A_functions_base.function_1_matrices import calc_chi_sq
f_name = "main.rcif"
mem_obj = file_to_globaln(f_name)
mem_obj.calc_fr()
density_point = mem_obj.density_point
diffrn = mem_obj.diffrn_exp_mnbite
diffrn_refln = diffrn.diffrn_refln
# Analytical Derivatives
fr_e = numpy.array(diffrn_refln.fr, dtype=float)
fr_s = numpy.array(diffrn_refln.fr_sigma, dtype=float)
crystal = mem_obj.crystals()[0]
l_diffrn = mem_obj.experiments()
mem_parameters = mem_obj.mem_parameters
density_point.volume_unit_cell = crystal.cell.volume
density_point.number_unit_cell = \
mem_parameters.points_a * mem_parameters.points_b * \
mem_parameters.points_c
chi_iso_ferro = mem_parameters.chi_ferro
chi_iso_antiferro = mem_parameters.chi_antiferro
points_a = mem_parameters.points_a
points_b = mem_parameters.points_b
points_c = mem_parameters.points_c
prior_density = mem_parameters.prior_density
flag_two_channel = mem_parameters.method == "2channel"
gof_desired = mem_parameters.gof_desired
cell = crystal.cell
space_group = crystal.space_group
atom_site = crystal.atom_site
space_group_symop = space_group.full_space_group_symop
atom_site_susceptibility = crystal.atom_site_susceptibility
l_magnetic_labes = atom_site_susceptibility.label
l_f_nucl, l_v_2d_i, l_fr_e, l_fr_s = [], [], [], []
total_peaks = 0
for diffrn in l_diffrn:
diffrn_orient_matrix = diffrn.diffrn_orient_matrix
e_up = diffrn_orient_matrix.calc_e_up()
setup = diffrn.setup
field = float(setup.field)
h_loc = (field*e_up[0], field*e_up[1], field*e_up[2])
diffrn_refln = diffrn.diffrn_refln
ind_h = numpy.array(diffrn_refln.index_h, dtype=int)
ind_k = numpy.array(diffrn_refln.index_k, dtype=int)
ind_l = numpy.array(diffrn_refln.index_l, dtype=int)
total_peaks += ind_h.size
hkl = (ind_h, ind_k, ind_l)
fr_e = numpy.array(diffrn_refln.fr, dtype=float)
fr_s = numpy.array(diffrn_refln.fr_sigma, dtype=float)
v_hkl_perp_2d_i, v_b_ferro, v_b_antiferro = \
density_point.calc_factor_in_front_of_density_for_fm_perp(
hkl, space_group_symop, cell, atom_site_susceptibility, h_loc,
chi_iso_ferro=chi_iso_ferro,
chi_iso_antiferro=chi_iso_antiferro,
flag_two_channel=flag_two_channel)
f_nucl = crystal.calc_f_nucl(*hkl)
l_f_nucl.append(f_nucl)
l_v_2d_i.append((v_hkl_perp_2d_i, v_b_ferro, v_b_antiferro))
l_fr_e.append(fr_e)
l_fr_s.append(fr_s)
def temp_func(numpy_den=None):
l_chi_sq, l_der_chi_sq = [], []
l_der_chi_sq_f, l_der_chi_sq_a = [], []
for diffrn, f_nucl, v_2d_i, fr_e, fr_s in \
zip(l_diffrn, l_f_nucl, l_v_2d_i, l_fr_e, l_fr_s):
f_m_perp, delta_f_m_perp, delta_f_m_perp_f, delta_f_m_perp_a = \
density_point.calc_fm(*v_2d_i)
fr_m, delta_fr_m = diffrn.calc_fr(cell, f_nucl, f_m_perp,
delta_f_nucl=None,
delta_f_m_perp=delta_f_m_perp)
delta_fr_m_f = diffrn.calc_fr(cell, f_nucl, f_m_perp,
delta_f_nucl=None,
delta_f_m_perp=delta_f_m_perp_f)[1]
delta_fr_m_a = diffrn.calc_fr(cell, f_nucl, f_m_perp,
delta_f_nucl=None,
delta_f_m_perp=delta_f_m_perp_a)[1]
diffrn.diffrn_refln.numpy_fr_calc = fr_m
chi_sq, der_chi_sq = calc_chi_sq(fr_e, fr_s, fr_m, delta_fr_m)
der_chi_sq_f = calc_chi_sq(fr_e, fr_s, fr_m, delta_fr_m_f)[1]
der_chi_sq_a = calc_chi_sq(fr_e, fr_s, fr_m, delta_fr_m_a)[1]
l_chi_sq.append(chi_sq)
l_der_chi_sq.append(der_chi_sq)
l_der_chi_sq_f.append(der_chi_sq_f)
l_der_chi_sq_a.append(der_chi_sq_a)
# print(" ".join([f" {val:10.2f}" for val in l_chi_sq]))
return sum(l_chi_sq), sum(l_der_chi_sq), sum(l_der_chi_sq_f), \
sum(l_der_chi_sq_a)
chi_sq, delta_chi_sq, delta_chi_sq_f, delta_chi_sq_a = temp_func()
print("lengths: ", len(delta_chi_sq), len(density_point.items))
# fr_m = numpy.array(diffrn_refln.fr_calc, dtype=float)
#
# delta_fr_m =
#
# chi_sq_0, der_chi_sq = calc_chi_sq(fr_e, fr_s, fr_m, delta_fr_m)
# Numerical Derivatives
delta_den = 1e-5
atom_label = "Mn1"
print("indexes_xyz numerical_deriv analytical_deriv")
for item, der_anal, der_anal_f, der_anal_a in zip(density_point.items, delta_chi_sq, delta_chi_sq_f, delta_chi_sq_a):
if item.basin_atom_label == atom_label:
# Minus delta
den_orig = float(item.density)
item.density = den_orig - delta_den
den_minus = item.density
mem_obj.calc_fr()
fr_m = numpy.array(diffrn_refln.fr_calc, dtype=float)
chi_sq_minus = (numpy.square((fr_e-fr_m)/fr_s)).sum()
# Plus delta
item.density = den_orig + delta_den
den_plus = item.density
mem_obj.calc_fr()
fr_m = numpy.array(diffrn_refln.fr_calc, dtype=float)
chi_sq_plus = (numpy.square((fr_e-fr_m)/fr_s)).sum()
delta_chi_sq = (chi_sq_plus-chi_sq_minus)/(den_plus-den_minus)
print(f"{item.index_x:3}{item.index_y:3}{item.index_z:3} {delta_chi_sq:12.3f} {der_anal:12.3f}")
if abs(der_anal-delta_chi_sq) > 0.002:
print("ALARM "+50*"*")
item.density = den_orig
# Minus delta
den_orig = float(item.density_ferro)
item.density_ferro = den_orig - delta_den
den_minus = item.density_ferro
mem_obj.calc_fr()
fr_m = numpy.array(diffrn_refln.fr_calc, dtype=float)
chi_sq_minus = (numpy.square((fr_e-fr_m)/fr_s)).sum()
# Plus delta
item.density_ferro = den_orig + delta_den
den_plus = item.density_ferro
mem_obj.calc_fr()
fr_m = numpy.array(diffrn_refln.fr_calc, dtype=float)
chi_sq_plus = (numpy.square((fr_e-fr_m)/fr_s)).sum()
delta_chi_sq = (chi_sq_plus-chi_sq_minus)/(den_plus-den_minus)
print(f" {delta_chi_sq:12.3f} {der_anal_f:12.3f} - ferro")
if abs(der_anal_f-delta_chi_sq) > 0.002:
print("ALARM "+50*"*")
item.density_ferro = den_orig
# Minus delta
den_orig = float(item.density_antiferro)
item.density_antiferro = den_orig - delta_den
den_minus = item.density_antiferro
mem_obj.calc_fr()
fr_m = numpy.array(diffrn_refln.fr_calc, dtype=float)
chi_sq_minus = (numpy.square((fr_e-fr_m)/fr_s)).sum()
# Plus delta
item.density_antiferro = den_orig + delta_den
den_plus = item.density_antiferro
mem_obj.calc_fr()
fr_m = | numpy.array(diffrn_refln.fr_calc, dtype=float) | numpy.array |
"""
Volatility processes for ARCH model estimation. All volatility processes must
inherit from :class:`VolatilityProcess` and provide the same methods with the
same inputs.
"""
from __future__ import annotations
from abc import ABCMeta, abstractmethod
import itertools
import operator
from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, Union, cast
from warnings import warn
import numpy as np
from numpy.random import RandomState
from scipy.special import gammaln
from arch.typing import (
ArrayLike1D,
Float64Array,
ForecastingMethod,
Int32Array,
RNGType,
)
from arch.univariate.distribution import Normal
from arch.utility.array import AbstractDocStringInheritor, ensure1d
from arch.utility.exceptions import InitialValueWarning, initial_value_warning
if TYPE_CHECKING:
from arch.univariate import recursions_python as rec
else:
try:
from arch.univariate import recursions as rec
except ImportError:
from arch.univariate import recursions_python as rec
__all__ = [
"GARCH",
"ARCH",
"HARCH",
"ConstantVariance",
"EWMAVariance",
"RiskMetrics2006",
"EGARCH",
"FIGARCH",
"FixedVariance",
"BootstrapRng",
"MIDASHyperbolic",
"VolatilityProcess",
]
def _common_names(p: int, o: int, q: int) -> List[str]:
names = ["omega"]
names.extend(["alpha[" + str(i + 1) + "]" for i in range(p)])
names.extend(["gamma[" + str(i + 1) + "]" for i in range(o)])
names.extend(["beta[" + str(i + 1) + "]" for i in range(q)])
return names
class BootstrapRng(object):
"""
Simple fake RNG used to transform bootstrap-based forecasting into a standard
simulation forecasting problem
Parameters
----------
std_resid : ndarray
Array containing standardized residuals
start : int
Location of first forecast
random_state : RandomState, optional
NumPy RandomState instance
"""
def __init__(
self,
std_resid: Float64Array,
start: int,
random_state: Optional[RandomState] = None,
) -> None:
if start <= 0 or start > std_resid.shape[0]:
raise ValueError("start must be > 0 and <= len(std_resid).")
self.std_resid: Float64Array = std_resid
self.start: int = start
self._index = start
if random_state is None:
self._random_state = RandomState()
elif isinstance(random_state, RandomState):
self._random_state = random_state
else:
raise TypeError("random_state must be a NumPy RandomState instance.")
@property
def random_state(self) -> RandomState:
return self._random_state
def rng(self) -> RNGType:
def _rng(size: Union[int, Tuple[int, ...]]) -> Float64Array:
if self._index >= self.std_resid.shape[0]:
raise IndexError("not enough data points.")
index = self._random_state.random_sample(size)
int_index = np.floor((self._index + 1) * index)
int_index = int_index.astype(np.int64)
self._index += 1
return self.std_resid[int_index]
return _rng
def ewma_recursion(
lam: float, resids: Float64Array, sigma2: Float64Array, nobs: int, backcast: float
) -> Float64Array:
"""
Compute variance recursion for EWMA/RiskMetrics Variance
Parameters
----------
lam : float
Smoothing parameter
resids : ndarray
Residuals to use in the recursion
sigma2 : ndarray
Conditional variances with same shape as resids
nobs : int
Length of resids
backcast : float
Value to use when initializing the recursion
"""
# Throw away bounds
var_bounds = np.ones((nobs, 2)) * np.array([-1.0, 1.7e308])
rec.garch_recursion(
np.array([0.0, 1.0 - lam, lam]),
resids ** 2.0,
resids,
sigma2,
1,
0,
1,
nobs,
backcast,
var_bounds,
)
return sigma2
class VarianceForecast(object):
_forecasts = None
_forecast_paths = None
def __init__(
self,
forecasts: Float64Array,
forecast_paths: Optional[Float64Array] = None,
shocks: Optional[Float64Array] = None,
) -> None:
self._forecasts = forecasts
self._forecast_paths = forecast_paths
self._shocks = shocks
@property
def forecasts(self) -> Optional[Float64Array]:
return self._forecasts
@property
def forecast_paths(self) -> Optional[Float64Array]:
return self._forecast_paths
@property
def shocks(self) -> Optional[Float64Array]:
return self._shocks
class VolatilityProcess(metaclass=ABCMeta):
"""
Abstract base class for ARCH models. Allows the conditional mean model to be specified
separately from the conditional variance, even though parameters are estimated jointly.
"""
_updatable: bool = True
def __init__(self) -> None:
self._num_params = 0
self._name = ""
self.closed_form: bool = False
self._normal = Normal()
self._min_bootstrap_obs = 100
self._start = 0
self._stop = -1
self._volatility_updater: Optional[rec.VolatilityUpdater] = None
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return self.__str__() + ", id: " + hex(id(self))
@property
def name(self) -> str:
"""The name of the volatilty process"""
return self._name
@property
def start(self) -> int:
"""Index to use to start variance subarray selection"""
return self._start
@start.setter
def start(self, value: int) -> None:
self._start = value
@property
def stop(self) -> int:
"""Index to use to stop variance subarray selection"""
return self._stop
@stop.setter
def stop(self, value: int) -> None:
self._stop = value
@property
def num_params(self) -> int:
"""The number of parameters in the model"""
return self._num_params
@property
def updateable(self) -> bool:
"""Flag indicating that the volatility process supports update"""
return self._updatable
@property
def volatility_updater(self) -> rec.VolatilityUpdater:
"""
Get the volatility updater associated with the volatility process
Returns
-------
VolatilityUpdater
The updater class
Raises
------
NotImplementedError
If the process is not updateable
"""
if not self._updatable or self._volatility_updater is None:
raise NotImplementedError("Subclasses may optionally implement")
assert self._volatility_updater is not None
return self._volatility_updater
def update(
self,
index: int,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> float:
"""
Compute the variance for a single observation
Parameters
----------
index : int
The numerical index of the variance to compute
variance_params : ndarray
The variance model parameters
resids :
The residual array. Only uses ``resids[:index]`` when computing
``sigma2[index]``
sigma2 : ndarray
The array containing the variances. Only uses ``sigma2[:index]``
when computing ``sigma2[index]``. The computed value is stored
in ``sigma2[index]``.
backcast : {float, ndarray}
Value to use when initializing the recursion
var_bounds : ndarray
Array containing columns of lower and upper bounds
Returns
-------
float
The variance computed for location ``index``
"""
raise NotImplementedError("Subclasses may optionally implement")
@abstractmethod
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
"""
Verify the requested forecasting method as valid for the specification
Parameters
----------
method : str
Forecasting method
horizon : int
Forecast horizon
Raises
------
NotImplementedError
* If method is not known or not supported
"""
def _one_step_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
horizon: int,
start_index: int,
) -> Tuple[Float64Array, Float64Array]:
"""
One-step ahead forecast
Parameters
----------
parameters : ndarray
Parameters required to forecast the volatility model
resids : ndarray
Residuals to use in the recursion
backcast : float
Value to use when initializing the recursion
var_bounds : ndarray
Array containing columns of lower and upper bounds
horizon : int
Forecast horizon. Must be 1 or larger. Forecasts are produced
for horizons in [1, horizon].
Returns
-------
sigma2 : ndarray
t element array containing the one-step ahead forecasts
forecasts : ndarray
t by horizon array containing the one-step ahead forecasts in the first location
"""
t = resids.shape[0]
_resids = np.concatenate((resids, [0]))
_var_bounds = np.concatenate((var_bounds, [[0, np.inf]]))
sigma2 = np.zeros(t + 1)
self.compute_variance(parameters, _resids, sigma2, backcast, _var_bounds)
forecasts = np.zeros((t - start_index, horizon))
forecasts[:, 0] = sigma2[start_index + 1 :]
sigma2 = sigma2[:-1]
return sigma2, forecasts
@abstractmethod
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
"""
Analytic multi-step volatility forecasts from the model
Parameters
----------
parameters : ndarray
Parameters required to forecast the volatility model
resids : ndarray
Residuals to use in the recursion
backcast : float
Value to use when initializing the recursion
var_bounds : ndarray
Array containing columns of lower and upper bounds
start : int
Index of the first observation to use as the starting point for
the forecast. Default is 0.
horizon : int
Forecast horizon. Must be 1 or larger. Forecasts are produced
for horizons in [1, horizon].
Returns
-------
forecasts : VarianceForecast
Class containing the variance forecasts, and, if using simulation
or bootstrap, the simulated paths.
"""
@abstractmethod
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
"""
Simulation-based volatility forecasts from the model
Parameters
----------
parameters : ndarray
Parameters required to forecast the volatility model
resids : ndarray
Residuals to use in the recursion
backcast : float
Value to use when initializing the recursion. The backcast is
assumed to be appropriately transformed so that it can be
used without modification, e.g., log of the variance backcast
in an EGARCH model.
var_bounds : ndarray
Array containing columns of lower and upper bounds
start : int
Index of the first observation to use as the starting point for
the forecast. Default is 0.
horizon : int
Forecast horizon. Must be 1 or larger. Forecasts are produced
for horizons in [1, horizon].
simulations : int
Number of simulations to run when computing the forecast using
either simulation or bootstrap.
rng : callable
Callable random number generator required if method is
'simulation'. Must take a single shape input and return random
samples numbers with that shape.
Returns
-------
forecasts : VarianceForecast
Class containing the variance forecasts, and, if using simulation
or bootstrap, the simulated paths.
"""
def _bootstrap_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
random_state: Optional[RandomState],
) -> VarianceForecast:
"""
Simulation-based volatility forecasts using model residuals
Parameters
----------
parameters : ndarray
Parameters required to forecast the volatility model
resids : ndarray
Residuals to use in the recursion
backcast : {float, ndarray}
Value to use when initializing the recursion
var_bounds : ndarray
Array containing columns of lower and upper bounds
start : int
Index of the first observation to use as the starting point for
the forecast. Default is 0.
horizon : int
Forecast horizon. Must be 1 or larger. Forecasts are produced
for horizons in [1, horizon].
simulations : int
Number of simulations to run when computing the forecast using
either simulation or bootstrap.
random_state : {RandomState, None}
NumPy RandomState instance to use in the BootstrapRng
Returns
-------
forecasts : VarianceForecast
Class containing the variance forecasts, and, if using simulation
or bootstrap, the simulated paths.
"""
sigma2 = np.empty_like(resids)
self.compute_variance(parameters, resids, sigma2, backcast, var_bounds)
std_resid = resids / np.sqrt(sigma2)
if start < self._min_bootstrap_obs:
raise ValueError(
"start must include more than {0} "
"observations".format(self._min_bootstrap_obs)
)
rng = BootstrapRng(std_resid, start, random_state=random_state).rng()
return self._simulation_forecast(
parameters, resids, backcast, var_bounds, start, horizon, simulations, rng
)
def variance_bounds(self, resids: Float64Array, power: float = 2.0) -> Float64Array:
"""
Construct loose bounds for conditional variances.
These bounds are used in parameter estimation to ensure
that the log-likelihood does not produce NaN values.
Parameters
----------
resids : ndarray
Approximate residuals to use to compute the lower and upper bounds
on the conditional variance
power : float, optional
Power used in the model. 2.0, the default corresponds to standard
ARCH models that evolve in squares.
Returns
-------
var_bounds : ndarray
Array containing columns of lower and upper bounds with the same
number of elements as resids
"""
nobs = resids.shape[0]
tau = min(75, nobs)
w = 0.94 ** np.arange(tau)
w = w / sum(w)
var_bound = np.zeros(nobs)
initial_value = w.dot(resids[:tau] ** 2.0)
ewma_recursion(0.94, resids, var_bound, resids.shape[0], initial_value)
var_bounds = np.vstack((var_bound / 1e6, var_bound * 1e6)).T
var = resids.var()
min_upper_bound = 1 + (resids ** 2.0).max()
lower_bound, upper_bound = var / 1e8, 1e7 * (1 + (resids ** 2.0).max())
var_bounds[var_bounds[:, 0] < lower_bound, 0] = lower_bound
var_bounds[var_bounds[:, 1] < min_upper_bound, 1] = min_upper_bound
var_bounds[var_bounds[:, 1] > upper_bound, 1] = upper_bound
if power != 2.0:
var_bounds **= power / 2.0
return np.ascontiguousarray(var_bounds)
@abstractmethod
def starting_values(self, resids: Float64Array) -> Float64Array:
"""
Returns starting values for the ARCH model
Parameters
----------
resids : ndarray
Array of (approximate) residuals to use when computing starting
values
Returns
-------
sv : ndarray
Array of starting values
"""
def backcast(self, resids: Float64Array) -> Union[float, Float64Array]:
"""
Construct values for backcasting to start the recursion
Parameters
----------
resids : ndarray
Vector of (approximate) residuals
Returns
-------
backcast : float
Value to use in backcasting in the volatility recursion
"""
tau = min(75, resids.shape[0])
w = 0.94 ** np.arange(tau)
w = w / sum(w)
return float(np.sum((resids[:tau] ** 2.0) * w))
def backcast_transform(
self, backcast: Union[float, Float64Array]
) -> Union[float, Float64Array]:
"""
Transformation to apply to user-provided backcast values
Parameters
----------
backcast : {float, ndarray}
User-provided ``backcast`` that approximates sigma2[0].
Returns
-------
backcast : {float, ndarray}
Backcast transformed to the model-appropriate scale
"""
if np.any(backcast < 0):
raise ValueError("User backcast value must be strictly positive.")
return backcast
@abstractmethod
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
"""
Returns bounds for parameters
Parameters
----------
resids : ndarray
Vector of (approximate) residuals
Returns
-------
bounds : list[tuple[float,float]]
List of bounds where each element is (lower, upper).
"""
@abstractmethod
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
"""
Compute the variance for the ARCH model
Parameters
----------
parameters : ndarray
Model parameters
resids : ndarray
Vector of mean zero residuals
sigma2 : ndarray
Array with same size as resids to store the conditional variance
backcast : {float, ndarray}
Value to use when initializing ARCH recursion. Can be an ndarray
when the model contains multiple components.
var_bounds : ndarray
Array containing columns of lower and upper bounds
"""
@abstractmethod
def constraints(self) -> Tuple[Float64Array, Float64Array]:
"""
Construct parameter constraints arrays for parameter estimation
Returns
-------
A : ndarray
Parameters loadings in constraint. Shape is number of constraints
by number of parameters
b : ndarray
Constraint values, one for each constraint
Notes
-----
Values returned are used in constructing linear inequality
constraints of the form A.dot(parameters) - b >= 0
"""
def forecast(
self,
parameters: ArrayLike1D,
resids: Float64Array,
backcast: Union[Float64Array, float],
var_bounds: Float64Array,
start: Optional[int] = None,
horizon: int = 1,
method: ForecastingMethod = "analytic",
simulations: int = 1000,
rng: Optional[RNGType] = None,
random_state: Optional[RandomState] = None,
) -> VarianceForecast:
"""
Forecast volatility from the model
Parameters
----------
parameters : {ndarray, Series}
Parameters required to forecast the volatility model
resids : ndarray
Residuals to use in the recursion
backcast : float
Value to use when initializing the recursion
var_bounds : ndarray, 2-d
Array containing columns of lower and upper bounds
start : {None, int}
Index of the first observation to use as the starting point for
the forecast. Default is len(resids).
horizon : int
Forecast horizon. Must be 1 or larger. Forecasts are produced
for horizons in [1, horizon].
method : {'analytic', 'simulation', 'bootstrap'}
Method to use when producing the forecast. The default is analytic.
simulations : int
Number of simulations to run when computing the forecast using
either simulation or bootstrap.
rng : callable
Callable random number generator required if method is
'simulation'. Must take a single shape input and return random
samples numbers with that shape.
random_state : RandomState, optional
NumPy RandomState instance to use when method is 'bootstrap'
Returns
-------
forecasts : VarianceForecast
Class containing the variance forecasts, and, if using simulation
or bootstrap, the simulated paths.
Raises
------
NotImplementedError
* If method is not supported
ValueError
* If the method is not known
Notes
-----
The analytic ``method`` is not supported for all models. Attempting
to use this method when not available will raise a ValueError.
"""
parameters = np.asarray(parameters)
method_name = method.lower()
if method_name not in ("analytic", "simulation", "bootstrap"):
raise ValueError("{0} is not a known forecasting method".format(method))
if not isinstance(horizon, (int, np.integer)) or horizon < 1:
raise ValueError("horizon must be an integer >= 1.")
self._check_forecasting_method(cast(ForecastingMethod, method_name), horizon)
start = len(resids) - 1 if start is None else start
if method_name == "analytic":
return self._analytic_forecast(
parameters, resids, backcast, var_bounds, start, horizon
)
elif method == "simulation":
# TODO: This looks like a design flaw.It is optional above but then must
# be present. This happens because the caller of this function is
# expected to know when to provide the rng or not
assert rng is not None
return self._simulation_forecast(
parameters,
resids,
backcast,
var_bounds,
start,
horizon,
simulations,
rng,
)
else:
if start < 10 or (horizon / start) >= 0.2:
raise ValueError(
"Bootstrap forecasting requires at least 10 initial "
"observations, and the ratio of horizon-to-start < 20%."
)
return self._bootstrap_forecast(
parameters,
resids,
backcast,
var_bounds,
start,
horizon,
simulations,
random_state,
)
@abstractmethod
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
"""
Simulate data from the model
Parameters
----------
parameters : {ndarray, Series}
Parameters required to simulate the volatility model
nobs : int
Number of data points to simulate
rng : callable
Callable function that takes a single integer input and returns
a vector of random numbers
burn : int, optional
Number of additional observations to generate when initializing
the simulation
initial_value : {float, ndarray}, optional
Scalar or array of initial values to use when initializing the
simulation
Returns
-------
resids : ndarray
The simulated residuals
variance : ndarray
The simulated variance
"""
def _gaussian_loglikelihood(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> float:
"""
Private implementation of a Gaussian log-likelihood for use in constructing starting
values or other quantities that do not depend on the distribution used by the model.
"""
sigma2 = np.zeros_like(resids)
self.compute_variance(parameters, resids, sigma2, backcast, var_bounds)
return float(self._normal.loglikelihood([], resids, sigma2))
@abstractmethod
def parameter_names(self) -> List[str]:
"""
Names of model parameters
Returns
-------
names : list (str)
Variables names
"""
class ConstantVariance(VolatilityProcess, metaclass=AbstractDocStringInheritor):
r"""
Constant volatility process
Notes
-----
Model has the same variance in all periods
"""
def __init__(self) -> None:
super().__init__()
self._num_params = 1
self._name = "Constant Variance"
self.closed_form: bool = True
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
sigma2[:] = parameters[0]
return sigma2
def starting_values(self, resids: Float64Array) -> Float64Array:
return np.array([resids.var()])
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
parameters = ensure1d(parameters, "parameters", False)
errors = rng(nobs + burn)
sigma2 = np.ones(nobs + burn) * parameters[0]
data = np.sqrt(sigma2) * errors
return data[burn:], sigma2[burn:]
def constraints(self) -> Tuple[Float64Array, Float64Array]:
return np.ones((1, 1)), np.zeros(1)
def backcast_transform(
self, backcast: Union[float, Float64Array]
) -> Union[float, Float64Array]:
backcast = super().backcast_transform(backcast)
return backcast
def backcast(self, resids: Float64Array) -> Union[float, Float64Array]:
return float(resids.var())
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
v = float(resids.var())
return [(v / 100000.0, 10.0 * (v + float(resids.mean()) ** 2.0))]
def parameter_names(self) -> List[str]:
return ["sigma2"]
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
return
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
t = resids.shape[0]
forecasts = np.full((t - start, horizon), np.nan)
forecasts[:, :] = parameters[0]
forecast_paths = None
return VarianceForecast(forecasts, forecast_paths)
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
t = resids.shape[0]
forecasts = np.empty((t - start, horizon))
forecast_paths = np.empty((t - start, simulations, horizon))
shocks = np.empty((t - start, simulations, horizon))
for i in range(t - start):
shocks[i, :, :] = np.sqrt(parameters[0]) * rng((simulations, horizon))
forecasts[:, :] = parameters[0]
forecast_paths[:, :, :] = parameters[0]
return VarianceForecast(forecasts, forecast_paths, shocks)
class GARCH(VolatilityProcess, metaclass=AbstractDocStringInheritor):
r"""
GARCH and related model estimation
The following models can be specified using GARCH:
* ARCH(p)
* GARCH(p,q)
* GJR-GARCH(p,o,q)
* AVARCH(p)
* AVGARCH(p,q)
* TARCH(p,o,q)
* Models with arbitrary, pre-specified powers
Parameters
----------
p : int
Order of the symmetric innovation
o : int
Order of the asymmetric innovation
q : int
Order of the lagged (transformed) conditional variance
power : float, optional
Power to use with the innovations, abs(e) ** power. Default is 2.0, which produces ARCH
and related models. Using 1.0 produces AVARCH and related models. Other powers can be
specified, although these should be strictly positive, and usually larger than 0.25.
Examples
--------
>>> from arch.univariate import GARCH
Standard GARCH(1,1)
>>> garch = GARCH(p=1, q=1)
Asymmetric GJR-GARCH process
>>> gjr = GARCH(p=1, o=1, q=1)
Asymmetric TARCH process
>>> tarch = GARCH(p=1, o=1, q=1, power=1.0)
Notes
-----
In this class of processes, the variance dynamics are
.. math::
\sigma_{t}^{\lambda}=\omega
+ \sum_{i=1}^{p}\alpha_{i}\left|\epsilon_{t-i}\right|^{\lambda}
+\sum_{j=1}^{o}\gamma_{j}\left|\epsilon_{t-j}\right|^{\lambda}
I\left[\epsilon_{t-j}<0\right]+\sum_{k=1}^{q}\beta_{k}\sigma_{t-k}^{\lambda}
"""
def __init__(self, p: int = 1, o: int = 0, q: int = 1, power: float = 2.0) -> None:
super().__init__()
self.p: int = int(p)
self.o: int = int(o)
self.q: int = int(q)
self.power: float = power
self._num_params = 1 + p + o + q
if p < 0 or o < 0 or q < 0:
raise ValueError("All lags lengths must be non-negative")
if p == 0 and o == 0:
raise ValueError("One of p or o must be strictly positive")
if power <= 0.0:
raise ValueError(
"power must be strictly positive, usually larger than 0.25"
)
self._name = self._generate_name()
self._volatility_updater = rec.GARCHUpdater(self.p, self.o, self.q, self.power)
def __str__(self) -> str:
descr = self.name
if self.power != 1.0 and self.power != 2.0:
descr = descr[:-1] + ", "
else:
descr += "("
for k, v in (("p", self.p), ("o", self.o), ("q", self.q)):
if v > 0:
descr += k + ": " + str(v) + ", "
descr = descr[:-2] + ")"
return descr
def variance_bounds(self, resids: Float64Array, power: float = 2.0) -> Float64Array:
return super().variance_bounds(resids, self.power)
def _generate_name(self) -> str:
p, o, q, power = self.p, self.o, self.q, self.power # noqa: F841
if power == 2.0:
if o == 0 and q == 0:
return "ARCH"
elif o == 0:
return "GARCH"
else:
return "GJR-GARCH"
elif power == 1.0:
if o == 0 and q == 0:
return "AVARCH"
elif o == 0:
return "AVGARCH"
else:
return "TARCH/ZARCH"
else:
if o == 0 and q == 0:
return "Power ARCH (power: {0:0.1f})".format(self.power)
elif o == 0:
return "Power GARCH (power: {0:0.1f})".format(self.power)
else:
return "Asym. Power GARCH (power: {0:0.1f})".format(self.power)
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
v = float(np.mean(abs(resids) ** self.power))
bounds = [(1e-8 * v, 10.0 * float(v))]
bounds.extend([(0.0, 1.0)] * self.p)
for i in range(self.o):
if i < self.p:
bounds.append((-1.0, 2.0))
else:
bounds.append((0.0, 2.0))
bounds.extend([(0.0, 1.0)] * self.q)
return bounds
def constraints(self) -> Tuple[Float64Array, Float64Array]:
p, o, q = self.p, self.o, self.q
k_arch = p + o + q
# alpha[i] >0
# alpha[i] + gamma[i] > 0 for i<=p, otherwise gamma[i]>0
# beta[i] >0
# sum(alpha) + 0.5 sum(gamma) + sum(beta) < 1
a = np.zeros((k_arch + 2, k_arch + 1))
for i in range(k_arch + 1):
a[i, i] = 1.0
for i in range(o):
if i < p:
a[i + p + 1, i + 1] = 1.0
a[k_arch + 1, 1:] = -1.0
a[k_arch + 1, p + 1 : p + o + 1] = -0.5
b = np.zeros(k_arch + 2)
b[k_arch + 1] = -1.0
return a, b
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
# fresids is abs(resids) ** power
# sresids is I(resids<0)
power = self.power
fresids = np.abs(resids) ** power
sresids = np.sign(resids)
p, o, q = self.p, self.o, self.q
nobs = resids.shape[0]
rec.garch_recursion(
parameters, fresids, sresids, sigma2, p, o, q, nobs, backcast, var_bounds
)
inv_power = 2.0 / power
sigma2 **= inv_power
return sigma2
def backcast_transform(
self, backcast: Union[float, Float64Array]
) -> Union[float, Float64Array]:
backcast = super().backcast_transform(backcast)
return np.sqrt(backcast) ** self.power
def backcast(self, resids: Float64Array) -> Union[float, Float64Array]:
power = self.power
tau = min(75, resids.shape[0])
w = 0.94 ** np.arange(tau)
w = w / sum(w)
backcast = np.sum((abs(resids[:tau]) ** power) * w)
return float(backcast)
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
parameters = ensure1d(parameters, "parameters", False)
p, o, q, power = self.p, self.o, self.q, self.power
errors = rng(nobs + burn)
if initial_value is None:
scale = np.ones_like(parameters)
scale[p + 1 : p + o + 1] = 0.5
persistence = np.sum(parameters[1:] * scale[1:])
if (1.0 - persistence) > 0:
initial_value = parameters[0] / (1.0 - persistence)
else:
warn(initial_value_warning, InitialValueWarning)
initial_value = parameters[0]
sigma2 = np.zeros(nobs + burn)
data = np.zeros(nobs + burn)
fsigma = np.zeros(nobs + burn)
fdata = np.zeros(nobs + burn)
max_lag = np.max([p, o, q])
fsigma[:max_lag] = initial_value
sigma2[:max_lag] = initial_value ** (2.0 / power)
data[:max_lag] = np.sqrt(sigma2[:max_lag]) * errors[:max_lag]
fdata[:max_lag] = abs(data[:max_lag]) ** power
for t in range(max_lag, nobs + burn):
loc = 0
fsigma[t] = parameters[loc]
loc += 1
for j in range(p):
fsigma[t] += parameters[loc] * fdata[t - 1 - j]
loc += 1
for j in range(o):
fsigma[t] += parameters[loc] * fdata[t - 1 - j] * (data[t - 1 - j] < 0)
loc += 1
for j in range(q):
fsigma[t] += parameters[loc] * fsigma[t - 1 - j]
loc += 1
sigma2[t] = fsigma[t] ** (2.0 / power)
data[t] = errors[t] * np.sqrt(sigma2[t])
fdata[t] = abs(data[t]) ** power
return data[burn:], sigma2[burn:]
def starting_values(self, resids: Float64Array) -> Float64Array:
p, o, q = self.p, self.o, self.q
power = self.power
alphas = [0.01, 0.05, 0.1, 0.2]
gammas = alphas
abg = [0.5, 0.7, 0.9, 0.98]
abgs = list(itertools.product(*[alphas, gammas, abg]))
target = np.mean(abs(resids) ** power)
scale = np.mean(resids ** 2) / (target ** (2.0 / power))
target *= scale ** (power / 2)
svs = []
var_bounds = self.variance_bounds(resids)
backcast = self.backcast(resids)
llfs = np.zeros(len(abgs))
for i, values in enumerate(abgs):
alpha, gamma, agb = values
sv = (1.0 - agb) * target * np.ones(p + o + q + 1)
if p > 0:
sv[1 : 1 + p] = alpha / p
agb -= alpha
if o > 0:
sv[1 + p : 1 + p + o] = gamma / o
agb -= gamma / 2.0
if q > 0:
sv[1 + p + o : 1 + p + o + q] = agb / q
svs.append(sv)
llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds)
loc = np.argmax(llfs)
return svs[int(loc)]
def parameter_names(self) -> List[str]:
return _common_names(self.p, self.o, self.q)
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
if horizon == 1:
return
if method == "analytic" and self.power != 2.0:
raise ValueError(
"Analytic forecasts not available for horizon > 1 when power != 2"
)
return
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
sigma2, forecasts = self._one_step_forecast(
parameters, resids, backcast, var_bounds, horizon, start
)
if horizon == 1:
return VarianceForecast(forecasts)
t = resids.shape[0]
p, o, q = self.p, self.o, self.q
omega = parameters[0]
alpha = parameters[1 : p + 1]
gamma = parameters[p + 1 : p + o + 1]
beta = parameters[p + o + 1 :]
m = np.max([p, o, q])
_resids = np.zeros(m + horizon)
_asym_resids = np.zeros(m + horizon)
_sigma2 = np.zeros(m + horizon)
for i in range(start, t):
if i - m + 1 >= 0:
_resids[:m] = resids[i - m + 1 : i + 1]
_asym_resids[:m] = _resids[:m] * (_resids[:m] < 0)
_sigma2[:m] = sigma2[i - m + 1 : i + 1]
else: # Back-casting needed
_resids[: m - i - 1] = np.sqrt(backcast)
_resids[m - i - 1 : m] = resids[0 : i + 1]
_asym_resids = cast(np.ndarray, _resids * (_resids < 0))
_asym_resids[: m - i - 1] = np.sqrt(0.5 * backcast)
_sigma2[:m] = backcast
_sigma2[m - i - 1 : m] = sigma2[0 : i + 1]
for h in range(0, horizon):
fcast_loc = i - start
forecasts[fcast_loc, h] = omega
start_loc = h + m - 1
for j in range(p):
forecasts[fcast_loc, h] += alpha[j] * _resids[start_loc - j] ** 2
for j in range(o):
forecasts[fcast_loc, h] += (
gamma[j] * _asym_resids[start_loc - j] ** 2
)
for j in range(q):
forecasts[fcast_loc, h] += beta[j] * _sigma2[start_loc - j]
_resids[h + m] = np.sqrt(forecasts[fcast_loc, h])
_asym_resids[h + m] = np.sqrt(0.5 * forecasts[fcast_loc, h])
_sigma2[h + m] = forecasts[fcast_loc, h]
return VarianceForecast(forecasts)
def _simulate_paths(
self,
m: int,
parameters: Float64Array,
horizon: int,
std_shocks: Float64Array,
scaled_forecast_paths: Float64Array,
scaled_shock: Float64Array,
asym_scaled_shock: Float64Array,
) -> Tuple[Float64Array, Float64Array, Float64Array]:
power = self.power
p, o, q = self.p, self.o, self.q
omega = parameters[0]
alpha = parameters[1 : p + 1]
gamma = parameters[p + 1 : p + o + 1]
beta = parameters[p + o + 1 :]
shock = np.empty_like(scaled_forecast_paths)
for h in range(horizon):
loc = h + m - 1
scaled_forecast_paths[:, h + m] = omega
for j in range(p):
scaled_forecast_paths[:, h + m] += alpha[j] * scaled_shock[:, loc - j]
for j in range(o):
scaled_forecast_paths[:, h + m] += (
gamma[j] * asym_scaled_shock[:, loc - j]
)
for j in range(q):
scaled_forecast_paths[:, h + m] += (
beta[j] * scaled_forecast_paths[:, loc - j]
)
shock[:, h + m] = std_shocks[:, h] * scaled_forecast_paths[:, h + m] ** (
1.0 / power
)
lt_zero = shock[:, h + m] < 0
scaled_shock[:, h + m] = np.abs(shock[:, h + m]) ** power
asym_scaled_shock[:, h + m] = scaled_shock[:, h + m] * lt_zero
forecast_paths = scaled_forecast_paths[:, m:] ** (2.0 / power)
return np.asarray(np.mean(forecast_paths, 0)), forecast_paths, shock[:, m:]
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
sigma2, forecasts = self._one_step_forecast(
parameters, resids, backcast, var_bounds, horizon, start
)
t = resids.shape[0]
paths = np.empty((t - start, simulations, horizon))
shocks = np.empty((t - start, simulations, horizon))
power = self.power
m = np.max([self.p, self.o, self.q])
scaled_forecast_paths = np.zeros((simulations, m + horizon))
scaled_shock = np.zeros((simulations, m + horizon))
asym_scaled_shock = np.zeros((simulations, m + horizon))
for i in range(start, t):
std_shocks = rng((simulations, horizon))
if i - m < 0:
scaled_forecast_paths[:, :m] = backcast ** (power / 2.0)
scaled_shock[:, :m] = backcast ** (power / 2.0)
asym_scaled_shock[:, :m] = (0.5 * backcast) ** (power / 2.0)
# Use actual values where available
count = i + 1
scaled_forecast_paths[:, m - count : m] = sigma2[:count] ** (
power / 2.0
)
scaled_shock[:, m - count : m] = np.abs(resids[:count]) ** power
asym = np.abs(resids[:count]) ** power * (resids[:count] < 0)
asym_scaled_shock[:, m - count : m] = asym
else:
scaled_forecast_paths[:, :m] = sigma2[i - m + 1 : i + 1] ** (
power / 2.0
)
scaled_shock[:, :m] = np.abs(resids[i - m + 1 : i + 1]) ** power
asym_scaled_shock[:, :m] = scaled_shock[:, :m] * (
resids[i - m + 1 : i + 1] < 0
)
f, p, s = self._simulate_paths(
m,
parameters,
horizon,
std_shocks,
scaled_forecast_paths,
scaled_shock,
asym_scaled_shock,
)
loc = i - start
forecasts[loc, :], paths[loc], shocks[loc] = f, p, s
return VarianceForecast(forecasts, paths, shocks)
class HARCH(VolatilityProcess, metaclass=AbstractDocStringInheritor):
r"""
Heterogeneous ARCH process
Parameters
----------
lags : {list, array, int}
List of lags to include in the model, or if scalar, includes all lags up the value
Examples
--------
>>> from arch.univariate import HARCH
Lag-1 HARCH, which is identical to an ARCH(1)
>>> harch = HARCH()
More useful and realistic lag lengths
>>> harch = HARCH(lags=[1, 5, 22])
Notes
-----
In a Heterogeneous ARCH process, variance dynamics are
.. math::
\sigma_{t}^{2}=\omega + \sum_{i=1}^{m}\alpha_{l_{i}}
\left(l_{i}^{-1}\sum_{j=1}^{l_{i}}\epsilon_{t-j}^{2}\right)
In the common case where lags=[1,5,22], the model is
.. math::
\sigma_{t}^{2}=\omega+\alpha_{1}\epsilon_{t-1}^{2}
+\alpha_{5} \left(\frac{1}{5}\sum_{j=1}^{5}\epsilon_{t-j}^{2}\right)
+\alpha_{22} \left(\frac{1}{22}\sum_{j=1}^{22}\epsilon_{t-j}^{2}\right)
A HARCH process is a special case of an ARCH process where parameters in the more general
ARCH process have been restricted.
"""
def __init__(self, lags: Union[int, Sequence[int]] = 1) -> None:
super().__init__()
if np.isscalar(lags):
lag_val = operator.index(lags)
lags = list(range(1, lag_val + 1))
lags_arr = ensure1d(lags, "lags")
self.lags: Int32Array = np.array(lags_arr, dtype=np.int32)
self._num_lags = lags_arr.shape[0]
self._num_params = self._num_lags + 1
self._name = "HARCH"
self._volatility_updater = rec.HARCHUpdater(self.lags)
def __str__(self) -> str:
descr = self.name + "(lags: "
descr += ", ".join([str(lag) for lag in self.lags])
descr += ")"
return descr
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
lags = self.lags
k_arch = lags.shape[0]
bounds = [(0.0, 10 * float(np.mean(resids ** 2.0)))]
bounds.extend([(0.0, 1.0)] * k_arch)
return bounds
def constraints(self) -> Tuple[Float64Array, Float64Array]:
k_arch = self._num_lags
a = np.zeros((k_arch + 2, k_arch + 1))
for i in range(k_arch + 1):
a[i, i] = 1.0
a[k_arch + 1, 1:] = -1.0
b = np.zeros(k_arch + 2)
b[k_arch + 1] = -1.0
return a, b
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
lags = self.lags
nobs = resids.shape[0]
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, var_bounds
)
return sigma2
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
parameters = ensure1d(parameters, "parameters", False)
lags = self.lags
errors = rng(nobs + burn)
if initial_value is None:
if (1.0 - np.sum(parameters[1:])) > 0:
initial_value = parameters[0] / (1.0 - np.sum(parameters[1:]))
else:
warn(initial_value_warning, InitialValueWarning)
initial_value = parameters[0]
sigma2 = np.empty(nobs + burn)
data = np.empty(nobs + burn)
max_lag = np.max(lags)
sigma2[:max_lag] = initial_value
data[:max_lag] = np.sqrt(initial_value)
for t in range(max_lag, nobs + burn):
sigma2[t] = parameters[0]
for i in range(lags.shape[0]):
param = parameters[1 + i] / lags[i]
for j in range(lags[i]):
sigma2[t] += param * data[t - 1 - j] ** 2.0
data[t] = errors[t] * np.sqrt(sigma2[t])
return data[burn:], sigma2[burn:]
def starting_values(self, resids: Float64Array) -> Float64Array:
k_arch = self._num_lags
alpha = 0.9
sv = (1.0 - alpha) * resids.var() * np.ones((k_arch + 1))
sv[1:] = alpha / k_arch
return sv
def parameter_names(self) -> List[str]:
names = ["omega"]
lags = self.lags
names.extend(["alpha[" + str(lags[i]) + "]" for i in range(self._num_lags)])
return names
def _harch_to_arch(self, params: Float64Array) -> Float64Array:
arch_params = np.zeros((1 + int(self.lags.max())))
arch_params[0] = params[0]
for param, lag in zip(params[1:], self.lags):
arch_params[1 : lag + 1] += param / lag
return arch_params
def _common_forecast_components(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
horizon: int,
) -> Tuple[float, Float64Array, Float64Array]:
arch_params = self._harch_to_arch(parameters)
t = resids.shape[0]
m = int(self.lags.max())
resids2 = np.empty((t, m + horizon))
resids2[:m, :m] = backcast
sq_resids = resids ** 2.0
for i in range(m):
resids2[m - i - 1 :, i] = sq_resids[: (t - (m - i - 1))]
const = arch_params[0]
arch = arch_params[1:]
return const, arch, resids2
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
return
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
const, arch, resids2 = self._common_forecast_components(
parameters, resids, backcast, horizon
)
m = int(self.lags.max())
resids2 = resids2[start:]
arch_rev = arch[::-1]
for i in range(horizon):
resids2[:, m + i] = const + resids2[:, i : (m + i)].dot(arch_rev)
return VarianceForecast(resids2[:, m:].copy())
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
const, arch, resids2 = self._common_forecast_components(
parameters, resids, backcast, horizon
)
t, m = resids.shape[0], int(self.lags.max())
paths = np.empty((t - start, simulations, horizon))
shocks = np.empty((t - start, simulations, horizon))
temp_resids2 = np.empty((simulations, m + horizon))
arch_rev = arch[::-1]
for i in range(start, t):
std_shocks = rng((simulations, horizon))
temp_resids2[:, :] = resids2[i : (i + 1)]
path_loc = i - start
for j in range(horizon):
paths[path_loc, :, j] = const + temp_resids2[:, j : (m + j)].dot(
arch_rev
)
shocks[path_loc, :, j] = std_shocks[:, j] * np.sqrt(
paths[path_loc, :, j]
)
temp_resids2[:, m + j] = shocks[path_loc, :, j] ** 2.0
return VarianceForecast(np.asarray(paths.mean(1)), paths, shocks)
class MIDASHyperbolic(VolatilityProcess, metaclass=AbstractDocStringInheritor):
r"""
MIDAS Hyperbolic ARCH process
Parameters
----------
m : int
Length of maximum lag to include in the model
asym : bool
Flag indicating whether to include an asymmetric term
Examples
--------
>>> from arch.univariate import MIDASHyperbolic
22-lag MIDAS Hyperbolic process
>>> harch = MIDASHyperbolic()
Longer 66-period lag
>>> harch = MIDASHyperbolic(m=66)
Asymmetric MIDAS Hyperbolic process
>>> harch = MIDASHyperbolic(asym=True)
Notes
-----
In a MIDAS Hyperbolic process, the variance evolves according to
.. math::
\sigma_{t}^{2}=\omega+
\sum_{i=1}^{m}\left(\alpha+\gamma I\left[\epsilon_{t-j}<0\right]\right)
\phi_{i}(\theta)\epsilon_{t-i}^{2}
where
.. math::
\phi_{i}(\theta) \propto \Gamma(i+\theta)/(\Gamma(i+1)\Gamma(\theta))
where :math:`\Gamma` is the gamma function. :math:`\{\phi_i(\theta)\}` is
normalized so that :math:`\sum \phi_i(\theta)=1`
References
----------
.. [*] Foroni, Claudia, and <NAME>. "A survey of
Econometric Methods for Mixed-Frequency Data". Norges Bank. (2013).
.. [*] Sheppard, Kevin. "Direct volatility modeling". Manuscript. (2018).
"""
def __init__(self, m: int = 22, asym: bool = False) -> None:
super().__init__()
self.m: int = int(m)
self._asym = bool(asym)
self._num_params = 3 + self._asym
self._name = "MIDAS Hyperbolic"
self._volatility_updater = rec.MIDASUpdater(self.m, self._asym)
def __str__(self) -> str:
descr = self.name
descr += "(lags: {0}, asym: {1}".format(self.m, self._asym)
return descr
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
bounds = [(0.0, 10 * float(np.mean(resids ** 2.0)))] # omega
bounds.extend([(0.0, 1.0)]) # 0 <= alpha < 1
if self._asym:
bounds.extend([(-1.0, 2.0)]) # -1 <= gamma < 2
bounds.extend([(0.0, 1.0)]) # theta
return bounds
def constraints(self) -> Tuple[Float64Array, Float64Array]:
"""
Constraints
Notes
-----
Parameters are (omega, alpha, gamma, theta)
A.dot(parameters) - b >= 0
1. omega >0
2. alpha>0 or alpha + gamma > 0
3. alpha<1 or alpha+0.5*gamma<1
4. theta > 0
5. theta < 1
"""
symm = not self._asym
k = 3 + self._asym
a = np.zeros((5, k))
b = np.zeros(5)
# omega
a[0, 0] = 1.0
# alpha >0 or alpha+gamma>0
# alpha<1 or alpha+0.5*gamma<1
if symm:
a[1, 1] = 1.0
a[2, 1] = -1.0
else:
a[1, 1:3] = 1.0
a[2, 1:3] = [-1, -0.5]
b[2] = -1.0
# theta
a[3, k - 1] = 1.0
a[4, k - 1] = -1.0
b[4] = -1.0
return a, b
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
nobs = resids.shape[0]
weights = self._weights(parameters)
if not self._asym:
params = np.zeros(3)
params[:2] = parameters[:2]
else:
params = parameters[:3]
rec.midas_recursion(params, weights, resids, sigma2, nobs, backcast, var_bounds)
return sigma2
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
parameters = ensure1d(parameters, "parameters", False)
if self._asym:
omega, alpha, gamma = parameters[:3]
else:
omega, alpha = parameters[:2]
gamma = 0
weights = self._weights(parameters)
aw = weights * alpha
gw = weights * gamma
errors = rng(nobs + burn)
if initial_value is None:
if (1.0 - alpha - 0.5 * gamma) > 0:
initial_value = parameters[0] / (1.0 - alpha - 0.5 * gamma)
else:
warn(initial_value_warning, InitialValueWarning)
initial_value = parameters[0]
m = weights.shape[0]
burn = max(burn, m)
sigma2 = np.empty(nobs + burn)
data = np.empty(nobs + burn)
sigma2[:m] = initial_value
data[:m] = np.sqrt(initial_value)
for t in range(m, nobs + burn):
sigma2[t] = omega
for i in range(m):
if t - 1 - i < m:
coef = aw[i] + 0.5 * gw[i]
else:
coef = aw[i] + gw[i] * (data[t - 1 - i] < 0)
sigma2[t] += coef * data[t - 1 - i] ** 2.0
data[t] = errors[t] * np.sqrt(sigma2[t])
return data[burn:], sigma2[burn:]
def starting_values(self, resids: Float64Array) -> Float64Array:
theta = [0.1, 0.5, 0.8, 0.9]
alpha = [0.8, 0.9, 0.95, 0.98]
var = (resids ** 2).mean()
var_bounds = self.variance_bounds(resids)
backcast = self.backcast(resids)
llfs = []
svs = []
for a, t in itertools.product(alpha, theta):
gamma = [0.0]
if self._asym:
gamma.extend([0.5, 0.9])
for g in gamma:
total = a + g / 2
o = (1 - min(total, 0.99)) * var
if self._asym:
sv = np.array([o, a, g, t])
else:
sv = np.array([o, a, t])
svs.append(sv)
llf = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds)
llfs.append(llf)
loc = np.argmax(llfs)
return svs[int(loc)]
def parameter_names(self) -> List[str]:
names = ["omega", "alpha", "theta"]
if self._asym:
names.insert(2, "gamma")
return names
def _weights(self, params: Float64Array) -> Float64Array:
m = self.m
# Prevent 0
theta = max(params[-1], np.finfo(np.float64).eps)
j = np.arange(1.0, m + 1)
w = gammaln(theta + j) - gammaln(j + 1) - gammaln(theta)
w = np.exp(w)
return w / w.sum()
def _common_forecast_components(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
horizon: int,
) -> Tuple[int, Float64Array, Float64Array, Float64Array, Float64Array]:
if self._asym:
omega, alpha, gamma = parameters[:3]
else:
omega, alpha = parameters[:2]
gamma = 0.0
weights = self._weights(parameters)
aw = weights * alpha
gw = weights * gamma
t = resids.shape[0]
m = self.m
resids2 = np.empty((t, m + horizon))
resids2[:m, :m] = backcast
indicator = np.empty((t, m + horizon))
indicator[:m, :m] = 0.5
sq_resids = resids ** 2.0
for i in range(m):
resids2[m - i - 1 :, i] = sq_resids[: (t - (m - i - 1))]
indicator[m - i - 1 :, i] = resids[: (t - (m - i - 1))] < 0
return omega, aw, gw, resids2, indicator
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
return
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
omega, aw, gw, resids2, indicator = self._common_forecast_components(
parameters, resids, backcast, horizon
)
m = self.m
resids2 = resids2[start:].copy()
aw_rev = aw[::-1]
gw_rev = gw[::-1]
for i in range(horizon):
resids2[:, m + i] = omega + resids2[:, i : (m + i)].dot(aw_rev)
if self._asym:
resids2_ind = resids2[:, i : (m + i)] * indicator[:, i : (m + i)]
resids2[:, m + i] += resids2_ind.dot(gw_rev)
indicator[:, m + i] = 0.5
return VarianceForecast(resids2[:, m:].copy())
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
omega, aw, gw, resids2, indicator = self._common_forecast_components(
parameters, resids, backcast, horizon
)
t = resids.shape[0]
m = self.m
shocks = np.empty((t - start, simulations, horizon))
paths = np.empty((t - start, simulations, horizon))
temp_resids2 = np.empty((simulations, m + horizon))
temp_indicator = np.empty((simulations, m + horizon))
aw_rev = aw[::-1]
gw_rev = gw[::-1]
for i in range(start, t):
std_shocks = rng((simulations, horizon))
temp_resids2[:, :] = resids2[i : (i + 1)]
temp_indicator[:, :] = indicator[i : (i + 1)]
path_loc = i - start
for j in range(horizon):
paths[path_loc, :, j] = omega + temp_resids2[:, j : (m + j)].dot(aw_rev)
if self._asym:
temp_resids2_ind = (
temp_resids2[:, j : (m + j)] * temp_indicator[:, j : (m + j)]
)
paths[path_loc, :, j] += temp_resids2_ind.dot(gw_rev)
shocks[path_loc, :, j] = std_shocks[:, j] * np.sqrt(
paths[path_loc, :, j]
)
temp_resids2[:, m + j] = shocks[path_loc, :, j] ** 2.0
temp_indicator[:, m + j] = (shocks[path_loc, :, j] < 0).astype(
np.double
)
return VarianceForecast(np.asarray(paths.mean(1)), paths, shocks)
class ARCH(GARCH):
r"""
ARCH process
Parameters
----------
p : int
Order of the symmetric innovation
Examples
--------
ARCH(1) process
>>> from arch.univariate import ARCH
ARCH(5) process
>>> arch = ARCH(p=5)
Notes
-----
The variance dynamics of the model estimated
.. math::
\sigma_t^{2}=\omega+\sum_{i=1}^{p}\alpha_{i}\epsilon_{t-i}^{2}
"""
def __init__(self, p: int = 1) -> None:
super().__init__(p, 0, 0, 2.0)
self._num_params = p + 1
def starting_values(self, resids: Float64Array) -> Float64Array:
p = self.p
alphas = np.arange(0.1, 0.95, 0.05)
svs = []
backcast = self.backcast(resids)
llfs = alphas.copy()
var_bounds = self.variance_bounds(resids)
for i, alpha in enumerate(alphas):
sv = (1.0 - alpha) * resids.var() * np.ones((p + 1))
sv[1:] = alpha / p
svs.append(sv)
llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds)
loc = np.argmax(llfs)
return svs[int(loc)]
class EWMAVariance(VolatilityProcess, metaclass=AbstractDocStringInheritor):
r"""
Exponentially Weighted Moving-Average (RiskMetrics) Variance process
Parameters
----------
lam : {float, None}, optional
Smoothing parameter. Default is 0.94. Set to None to estimate lam
jointly with other model parameters
Examples
--------
Daily RiskMetrics EWMA process
>>> from arch.univariate import EWMAVariance
>>> rm = EWMAVariance(0.94)
Notes
-----
The variance dynamics of the model
.. math::
\sigma_t^{2}=\lambda\sigma_{t-1}^2 + (1-\lambda)\epsilon^2_{t-1}
When lam is provided, this model has no parameters since the smoothing
parameter is treated as fixed. Set lam to ``None`` to jointly estimate this
parameter when fitting the model.
"""
def __init__(self, lam: Optional[float] = 0.94) -> None:
super().__init__()
self.lam: Optional[float] = lam
self._estimate_lam = lam is None
self._num_params = 1 if self._estimate_lam else 0
if lam is not None and not 0.0 < lam < 1.0:
raise ValueError("lam must be strictly between 0 and 1")
self._name = "EWMA/RiskMetrics"
self._volatility_updater = rec.EWMAUpdater(self.lam)
def __str__(self) -> str:
if self._estimate_lam:
descr = self.name + "(lam: Estimated)"
else:
assert self.lam is not None
descr = self.name + "(lam: " + "{0:0.2f}".format(self.lam) + ")"
return descr
def starting_values(self, resids: Float64Array) -> Float64Array:
if self._estimate_lam:
return np.array([0.94])
return np.array([])
def parameter_names(self) -> List[str]:
if self._estimate_lam:
return ["lam"]
return []
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
if self._estimate_lam:
return [(0, 1)]
return []
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
lam = parameters[0] if self._estimate_lam else self.lam
return ewma_recursion(lam, resids, sigma2, resids.shape[0], float(backcast))
def constraints(self) -> Tuple[Float64Array, Float64Array]:
if self._estimate_lam:
a = np.ones((1, 1))
b = np.zeros((1,))
return a, b
return np.empty((0, 0)), np.empty((0,))
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
parameters = ensure1d(parameters, "parameters", False)
errors = rng(nobs + burn)
if initial_value is None:
initial_value = 1.0
sigma2 = np.zeros(nobs + burn)
data = np.zeros(nobs + burn)
sigma2[0] = initial_value
data[0] = np.sqrt(sigma2[0])
if self._estimate_lam:
lam = parameters[0]
else:
lam = self.lam
one_m_lam = 1.0 - lam
for t in range(1, nobs + burn):
sigma2[t] = lam * sigma2[t - 1] + one_m_lam * data[t - 1] ** 2.0
data[t] = np.sqrt(sigma2[t]) * errors[t]
return data[burn:], sigma2[burn:]
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
return
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
_, forecasts = self._one_step_forecast(
parameters, resids, backcast, var_bounds, horizon, start_index=start
)
for i in range(1, horizon):
forecasts[:, i] = forecasts[:, 0]
return VarianceForecast(forecasts)
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
one_step = self._analytic_forecast(
parameters, resids, backcast, var_bounds, start, 1
)
t = resids.shape[0]
paths = np.empty((t - start, simulations, horizon))
shocks = np.empty((t - start, simulations, horizon))
if self._estimate_lam:
lam = parameters[0]
else:
lam = self.lam
assert one_step.forecasts is not None
for i in range(start, t):
std_shocks = rng((simulations, horizon))
path_loc = i - start
paths[path_loc, :, 0] = one_step.forecasts[path_loc]
shocks[path_loc, :, 0] = (
np.sqrt(one_step.forecasts[path_loc]) * std_shocks[:, 0]
)
for h in range(1, horizon):
paths[path_loc, :, h] = (1 - lam) * shocks[
path_loc, :, h - 1
] ** 2.0 + lam * paths[path_loc, :, h - 1]
shocks[path_loc, :, h] = (
np.sqrt(paths[path_loc, :, h]) * std_shocks[:, h]
)
return VarianceForecast(np.asarray(paths.mean(1)), paths, shocks)
class RiskMetrics2006(VolatilityProcess, metaclass=AbstractDocStringInheritor):
"""
RiskMetrics 2006 Variance process
Parameters
----------
tau0 : {int, float}, optional
Length of long cycle. Default is 1560.
tau1 : {int, float}, optional
Length of short cycle. Default is 4.
kmax : int, optional
Number of components. Default is 14.
rho : float, optional
Relative scale of adjacent cycles. Default is sqrt(2)
Examples
--------
Daily RiskMetrics 2006 process
>>> from arch.univariate import RiskMetrics2006
>>> rm = RiskMetrics2006()
Notes
-----
The variance dynamics of the model are given as a weighted average of kmax EWMA variance
processes where the smoothing parameters and weights are determined by tau0, tau1 and rho.
This model has no parameters since the smoothing parameter is fixed.
"""
def __init__(
self,
tau0: float = 1560,
tau1: float = 4,
kmax: int = 14,
rho: float = 1.4142135623730951,
) -> None:
super().__init__()
self.tau0: float = tau0
self.tau1: float = tau1
self.kmax: int = kmax
self.rho: float = rho
self._num_params = 0
if tau0 <= tau1 or tau1 <= 0:
raise ValueError("tau0 must be greater than tau1 and tau1 > 0")
if tau1 * rho ** (kmax - 1) > tau0:
raise ValueError("tau1 * rho ** (kmax-1) smaller than tau0")
if not kmax >= 1:
raise ValueError("kmax must be a positive integer")
if not rho > 1:
raise ValueError("rho must be a positive number larger than 1")
self._name = "RiskMetrics2006"
self._volatility_updater = rec.RiskMetrics2006Updater(
self.kmax,
self._ewma_combination_weights(),
self._ewma_smoothing_parameters(),
)
def __str__(self) -> str:
descr = self.name
descr += (
f"(tau0: {self.tau0:0.1f}, tau1: {self.tau1:0.1f}, "
f"kmax: {self.kmax:d}, rho: {self.rho:0.3f}"
)
descr += ")"
return descr
def _ewma_combination_weights(self) -> Float64Array:
"""
Returns
-------
weights : ndarray
Combination weights for EWMA components
"""
tau0, tau1, kmax, rho = self.tau0, self.tau1, self.kmax, self.rho
taus = tau1 * (rho ** np.arange(kmax))
w = 1 - np.log(taus) / np.log(tau0)
w = w / w.sum()
return w
def _ewma_smoothing_parameters(self) -> Float64Array:
tau1, kmax, rho = self.tau1, self.kmax, self.rho
taus = tau1 * (rho ** np.arange(kmax))
mus = cast(Float64Array, np.exp(-1.0 / taus))
return mus
def backcast(self, resids: Float64Array) -> Union[float, Float64Array]:
"""
Construct values for backcasting to start the recursion
Parameters
----------
resids : ndarray
Vector of (approximate) residuals
Returns
-------
backcast : ndarray
Backcast values for each EWMA component
"""
nobs = resids.shape[0]
mus = self._ewma_smoothing_parameters()
resids2 = resids ** 2.0
backcast = np.zeros(mus.shape[0])
for k in range(int(self.kmax)):
mu = mus[k]
end_point = int(max(min(np.floor(np.log(0.01) / np.log(mu)), nobs), k))
weights = mu ** np.arange(end_point)
weights = weights / weights.sum()
backcast[k] = weights.dot(resids2[:end_point])
return backcast
def backcast_transform(
self, backcast: Union[float, Float64Array]
) -> Union[float, Float64Array]:
backcast = super().backcast_transform(backcast)
mus = self._ewma_smoothing_parameters()
backcast_arr = np.asarray(backcast)
if backcast_arr.ndim == 0:
backcast_arr = cast(np.ndarray, backcast * np.ones(mus.shape[0]))
if backcast_arr.shape[0] != mus.shape[0] and backcast_arr.ndim != 0:
raise ValueError(
"User backcast must be either a scalar or an vector containing the "
"number of\ncomponent EWMAs in the model."
)
return backcast_arr
def starting_values(self, resids: Float64Array) -> Float64Array:
return np.empty((0,))
def parameter_names(self) -> List[str]:
return []
def variance_bounds(self, resids: Float64Array, power: float = 2.0) -> Float64Array:
return np.ones((resids.shape[0], 1)) * np.array(
[-1.0, np.finfo(np.float64).max]
)
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
return []
def constraints(self) -> Tuple[Float64Array, Float64Array]:
return np.empty((0, 0)), np.empty((0,))
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
nobs = resids.shape[0]
kmax = self.kmax
w = self._ewma_combination_weights()
mus = self._ewma_smoothing_parameters()
sigma2_temp = np.zeros_like(sigma2)
backcast = cast(Float64Array, backcast)
for k in range(kmax):
mu = mus[k]
ewma_recursion(mu, resids, sigma2_temp, nobs, backcast[k])
if k == 0:
sigma2[:] = w[k] * sigma2_temp
else:
sigma2 += w[k] * sigma2_temp
return sigma2
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
errors = rng(nobs + burn)
kmax = self.kmax
w = self._ewma_combination_weights()
mus = self._ewma_smoothing_parameters()
if initial_value is None:
initial_value = 1.0
sigma2s = np.zeros((nobs + burn, kmax))
sigma2s[0, :] = initial_value
sigma2 = np.zeros(nobs + burn)
data = np.zeros(nobs + burn)
data[0] = np.sqrt(initial_value)
sigma2[0] = w.dot(sigma2s[0])
for t in range(1, nobs + burn):
sigma2s[t] = mus * sigma2s[t - 1] + (1 - mus) * data[t - 1] ** 2.0
sigma2[t] = w.dot(sigma2s[t])
data[t] = np.sqrt(sigma2[t]) * errors[t]
return data[burn:], sigma2[burn:]
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
return
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
_, forecasts = self._one_step_forecast(
parameters, resids, backcast, var_bounds, horizon, start_index=start
)
for i in range(1, horizon):
forecasts[:, i] = forecasts[:, 0]
return VarianceForecast(forecasts)
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
kmax = self.kmax
w = self._ewma_combination_weights()
mus = self._ewma_smoothing_parameters()
backcast = cast(Float64Array, np.asarray(backcast))
t = resids.shape[0]
paths = np.empty((t - start, simulations, horizon))
shocks = np.empty((t - start, simulations, horizon))
temp_paths = np.empty((kmax, simulations, horizon))
# We use the transpose here to get C-contiguous arrays
component_one_step = np.empty((kmax, t + 1))
_resids = np.ascontiguousarray(resids)
for k in range(kmax):
mu = mus[k]
ewma_recursion(mu, _resids, component_one_step[k, :], t + 1, backcast[k])
# Transpose to be (t+1, kmax)
component_one_step = component_one_step.T
for i in range(start, t):
std_shocks = rng((simulations, horizon))
for k in range(kmax):
temp_paths[k, :, 0] = component_one_step[i, k]
path_loc = i - start
paths[path_loc, :, 0] = w.dot(temp_paths[:, :, 0])
shocks[path_loc, :, 0] = std_shocks[:, 0] * np.sqrt(paths[path_loc, :, 0])
for j in range(1, horizon):
for k in range(kmax):
mu = mus[k]
temp_paths[k, :, j] = (
mu * temp_paths[k, :, j - 1]
+ (1 - mu) * shocks[path_loc, :, j - 1] ** 2.0
)
paths[path_loc, :, j] = w.dot(temp_paths[:, :, j])
shocks[path_loc, :, j] = std_shocks[:, j] * np.sqrt(
paths[path_loc, :, j]
)
return VarianceForecast(np.asarray(paths.mean(1)), paths, shocks)
class EGARCH(VolatilityProcess, metaclass=AbstractDocStringInheritor):
r"""
EGARCH model estimation
Parameters
----------
p : int
Order of the symmetric innovation
o : int
Order of the asymmetric innovation
q : int
Order of the lagged (transformed) conditional variance
Examples
--------
>>> from arch.univariate import EGARCH
Symmetric EGARCH(1,1)
>>> egarch = EGARCH(p=1, q=1)
Standard EGARCH process
>>> egarch = EGARCH(p=1, o=1, q=1)
Exponential ARCH process
>>> earch = EGARCH(p=5)
Notes
-----
In this class of processes, the variance dynamics are
.. math::
\ln\sigma_{t}^{2}=\omega
+\sum_{i=1}^{p}\alpha_{i}
\left(\left|e_{t-i}\right|-\sqrt{2/\pi}\right)
+\sum_{j=1}^{o}\gamma_{j} e_{t-j}
+\sum_{k=1}^{q}\beta_{k}\ln\sigma_{t-k}^{2}
where :math:`e_{t}=\epsilon_{t}/\sigma_{t}`.
"""
def __init__(self, p: int = 1, o: int = 0, q: int = 1) -> None:
super().__init__()
self.p: int = int(p)
self.o: int = int(o)
self.q: int = int(q)
self._num_params = 1 + p + o + q
if p < 0 or o < 0 or q < 0:
raise ValueError("All lags lengths must be non-negative")
if p == 0 and o == 0:
raise ValueError("One of p or o must be strictly positive")
self._name = "EGARCH" if q > 0 else "EARCH"
# Helpers for fitting variance
self._arrays: Optional[Tuple[Float64Array, Float64Array, Float64Array]] = None
self._volatility_updater = rec.EGARCHUpdater(self.p, self.o, self.q)
def __str__(self) -> str:
descr = self.name + "("
for k, v in (("p", self.p), ("o", self.o), ("q", self.q)):
if v > 0:
descr += k + ": " + str(v) + ", "
descr = descr[:-2] + ")"
return descr
def variance_bounds(self, resids: Float64Array, power: float = 2.0) -> Float64Array:
return super().variance_bounds(resids, 2.0)
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
v = np.mean(resids ** 2.0)
log_const = np.log(10000.0)
lnv = np.log(v)
bounds = [(lnv - log_const, lnv + log_const)]
bounds.extend([(-np.inf, np.inf)] * (self.p + self.o))
bounds.extend([(0.0, float(self.q))] * self.q)
return bounds
def constraints(self) -> Tuple[Float64Array, Float64Array]:
p, o, q = self.p, self.o, self.q
k_arch = p + o + q
a = np.zeros((1, k_arch + 1))
a[0, p + o + 1 :] = -1.0
b = np.zeros((1,))
b[0] = -1.0
return a, b
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
p, o, q = self.p, self.o, self.q
nobs = resids.shape[0]
if (self._arrays is not None) and (self._arrays[0].shape[0] == nobs):
lnsigma2, std_resids, abs_std_resids = self._arrays
else:
lnsigma2 = np.empty(nobs)
abs_std_resids = np.empty(nobs)
std_resids = np.empty(nobs)
self._arrays = (lnsigma2, abs_std_resids, std_resids)
rec.egarch_recursion(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
return sigma2
def backcast_transform(
self, backcast: Union[float, Float64Array]
) -> Union[float, Float64Array]:
backcast = super().backcast_transform(backcast)
return float(np.log(backcast))
def backcast(self, resids: Float64Array) -> Union[float, Float64Array]:
return float(np.log(super().backcast(resids)))
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
parameters = ensure1d(parameters, "parameters", False)
p, o, q = self.p, self.o, self.q
errors = rng(nobs + burn)
if initial_value is None:
if q > 0:
beta_sum = np.sum(parameters[p + o + 1 :])
else:
beta_sum = 0.0
if beta_sum < 1:
initial_value = parameters[0] / (1.0 - beta_sum)
else:
warn(initial_value_warning, InitialValueWarning)
initial_value = parameters[0]
sigma2 = np.zeros(nobs + burn)
data = np.zeros(nobs + burn)
lnsigma2 = np.zeros(nobs + burn)
abserrors = cast(Float64Array, np.abs(errors))
norm_const = np.sqrt(2 / np.pi)
max_lag = np.max([p, o, q])
lnsigma2[:max_lag] = initial_value
sigma2[:max_lag] = np.exp(initial_value)
data[:max_lag] = errors[:max_lag] * np.sqrt(sigma2[:max_lag])
for t in range(max_lag, nobs + burn):
loc = 0
lnsigma2[t] = parameters[loc]
loc += 1
for j in range(p):
lnsigma2[t] += parameters[loc] * (abserrors[t - 1 - j] - norm_const)
loc += 1
for j in range(o):
lnsigma2[t] += parameters[loc] * errors[t - 1 - j]
loc += 1
for j in range(q):
lnsigma2[t] += parameters[loc] * lnsigma2[t - 1 - j]
loc += 1
sigma2 = cast(Float64Array, np.exp(lnsigma2))
data = errors * np.sqrt(sigma2)
return data[burn:], sigma2[burn:]
def starting_values(self, resids: Float64Array) -> Float64Array:
p, o, q = self.p, self.o, self.q
alphas = [0.01, 0.05, 0.1, 0.2]
gammas = [-0.1, 0.0, 0.1]
betas = [0.5, 0.7, 0.9, 0.98]
agbs = list(itertools.product(*[alphas, gammas, betas]))
target = np.log(np.mean(resids ** 2))
svs = []
var_bounds = self.variance_bounds(resids)
backcast = self.backcast(resids)
llfs = np.zeros(len(agbs))
for i, values in enumerate(agbs):
alpha, gamma, beta = values
sv = (1.0 - beta) * target * np.ones(p + o + q + 1)
if p > 0:
sv[1 : 1 + p] = alpha / p
if o > 0:
sv[1 + p : 1 + p + o] = gamma / o
if q > 0:
sv[1 + p + o : 1 + p + o + q] = beta / q
svs.append(sv)
llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds)
loc = np.argmax(llfs)
return svs[int(loc)]
def parameter_names(self) -> List[str]:
return _common_names(self.p, self.o, self.q)
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
if method == "analytic" and horizon > 1:
raise ValueError("Analytic forecasts not available for horizon > 1")
return
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
_, forecasts = self._one_step_forecast(
parameters, resids, backcast, var_bounds, horizon, start
)
return VarianceForecast(forecasts)
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
sigma2, forecasts = self._one_step_forecast(
parameters, resids, backcast, var_bounds, horizon, start
)
t = resids.shape[0]
p, o, q = self.p, self.o, self.q
m = np.max([p, o, q])
lnsigma2 = cast(Float64Array, np.log(sigma2))
e = resids / np.sqrt(sigma2)
lnsigma2_mat = np.full((t, m), backcast)
e_mat = np.zeros((t, m))
abs_e_mat = np.full((t, m), np.sqrt(2 / np.pi))
for i in range(m):
lnsigma2_mat[m - i - 1 :, i] = lnsigma2[: (t - (m - 1) + i)]
e_mat[m - i - 1 :, i] = e[: (t - (m - 1) + i)]
abs_e_mat[m - i - 1 :, i] = np.abs(e[: (t - (m - 1) + i)])
paths = | np.empty((t - start, simulations, horizon)) | numpy.empty |
#!/usr/bin/env python
import pickle
import datetime
import numpy as np
import os
data_filename = "/Yep/data/exp_data_%s.pckl"
pay_methods = ["Compte Commun", "Cash", "Card Tony", "Card AC", "Cheques Repas"]
benefs = ["Both", "Tony", "AC"]
types = ["Groceries", "Car", "Holidays", "Restos", "Health", "Telecom", "Fast Food", "Epargne", "Insurance", "Gifts", "Books", "Entertainment", "Actis", "Work", "Drank & Drugs", "Cat", "Extra", "Clothing", "Appart"]
def get_year_month_day(date_str):
fields = date_str.split('-')
return [int(fields[0]), int(fields[1]), int(fields[2])]
class exp_handler:
def __init__(self, user):
self.user = user
try:
f = open(data_filename % self.user, 'rb')
self.dic = pickle.load(f)
f.close()
except IOError:
self.dic = {'Date' : [], 'Type' : [], 'Method': [], 'Benef': [], 'Amount' : np.array([])}
def update_dic(self, colname, data):
self.dic[colname].append(data)
def get_types(self):
return types
def get_benefs(self):
return benefs
def get_pay_methods(self):
return pay_methods
def get_dic(self):
return self.dic
def save_data(self, dic=None):
if dic is None:
dic = self.dic
f = open(data_filename % self.user, 'wb')
pickle.dump(dic, f)
f.close()
os.sync()
def add_new_exp(self, date_str, amount_float, exp_type, pay_method, benef):
self.today = datetime.date(get_year_month_day(date_str)[0], get_year_month_day(date_str)[1], get_year_month_day(date_str)[2])
self.update_dic('Date', self.today)
self.update_dic('Type', exp_type)
self.update_dic('Method', pay_method)
self.update_dic('Benef', benef)
self.dic['Amount'] = | np.append(self.dic['Amount'], amount_float) | numpy.append |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Applies online refinement while running inference.
Instructions: Run static inference first before calling this script. Make sure
to point output_dir to the same folder where static inference results were
saved previously.
For example use, please refer to README.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import random
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import model
import nets
import reader
import util
gfile = tf.gfile
SAVE_EVERY = 1 # Defines the interval that predictions should be saved at.
SAVE_PREVIEWS = True # If set, while save image previews of depth predictions.
FIXED_SEED = 8964 # Fixed seed for repeatability.
flags.DEFINE_string('output_dir', None, 'Directory to store predictions. '
'Assumes that regular inference has been executed before '
'and results were stored in this folder.')
flags.DEFINE_string('data_dir', None, 'Folder pointing to preprocessed '
'triplets to fine-tune on.')
flags.DEFINE_string('triplet_list_file', None, 'Text file containing paths to '
'image files to process. Paths should be relative with '
'respect to the list file location. Every line should be '
'of the form [input_folder_name] [input_frame_num] '
'[output_path], where [output_path] is optional to specify '
'a different path to store the prediction.')
flags.DEFINE_string('triplet_list_file_remains', None, 'Optional text file '
'containing relative paths to image files which should not '
'be fine-tuned, e.g. because of missing adjacent frames. '
'For all files listed, the static prediction will be '
'copied instead. File can be empty. If not, every line '
'should be of the form [input_folder_name] '
'[input_frame_num] [output_path], where [output_path] is '
'optional to specify a different path to take and store '
'the unrefined prediction from/to.')
flags.DEFINE_string('model_ckpt', None, 'Model checkpoint to optimize.')
flags.DEFINE_string('ft_name', '', 'Optional prefix for temporary files.')
flags.DEFINE_string('file_extension', 'png', 'Image data file extension.')
flags.DEFINE_float('learning_rate', 0.0001, 'Adam learning rate.')
flags.DEFINE_float('beta1', 0.9, 'Adam momentum.')
flags.DEFINE_float('reconstr_weight', 0.85, 'Frame reconstruction loss weight.')
flags.DEFINE_float('ssim_weight', 0.15, 'SSIM loss weight.')
flags.DEFINE_float('smooth_weight', 0.01, 'Smoothness loss weight.')
flags.DEFINE_float('icp_weight', 0.0, 'ICP loss weight.')
flags.DEFINE_float('size_constraint_weight', 0.0005, 'Weight of the object '
'size constraint loss. Use only with motion handling.')
flags.DEFINE_integer('batch_size', 1, 'The size of a sample batch')
flags.DEFINE_integer('img_height', 128, 'Input frame height.')
flags.DEFINE_integer('img_width', 416, 'Input frame width.')
flags.DEFINE_integer('seq_length', 3, 'Number of frames in sequence.')
flags.DEFINE_enum('architecture', nets.RESNET, nets.ARCHITECTURES,
'Defines the architecture to use for the depth prediction '
'network. Defaults to ResNet-based encoder and accompanying '
'decoder.')
flags.DEFINE_boolean('imagenet_norm', True, 'Whether to normalize the input '
'images channel-wise so that they match the distribution '
'most ImageNet-models were trained on.')
flags.DEFINE_float('weight_reg', 0.05, 'The amount of weight regularization to '
'apply. This has no effect on the ResNet-based encoder '
'architecture.')
flags.DEFINE_boolean('exhaustive_mode', False, 'Whether to exhaustively warp '
'from any frame to any other instead of just considering '
'adjacent frames. Where necessary, multiple egomotion '
'estimates will be applied. Does not have an effect if '
'compute_minimum_loss is enabled.')
flags.DEFINE_boolean('random_scale_crop', False, 'Whether to apply random '
'image scaling and center cropping during training.')
flags.DEFINE_bool('depth_upsampling', True, 'Whether to apply depth '
'upsampling of lower-scale representations before warping to '
'compute reconstruction loss on full-resolution image.')
flags.DEFINE_bool('depth_normalization', True, 'Whether to apply depth '
'normalization, that is, normalizing inverse depth '
'prediction maps by their mean to avoid degeneration towards '
'small values.')
flags.DEFINE_bool('compute_minimum_loss', True, 'Whether to take the '
'element-wise minimum of the reconstruction/SSIM error in '
'order to avoid overly penalizing dis-occlusion effects.')
flags.DEFINE_bool('use_skip', True, 'Whether to use skip connections in the '
'encoder-decoder architecture.')
flags.DEFINE_bool('joint_encoder', False, 'Whether to share parameters '
'between the depth and egomotion networks by using a joint '
'encoder architecture. The egomotion network is then '
'operating only on the hidden representation provided by the '
'joint encoder.')
flags.DEFINE_float('egomotion_threshold', 0.01, 'Minimum egomotion magnitude '
'to apply finetuning. If lower, just forwards the ordinary '
'prediction.')
flags.DEFINE_integer('num_steps', 20, 'Number of optimization steps to run.')
flags.DEFINE_boolean('handle_motion', True, 'Whether the checkpoint was '
'trained with motion handling.')
flags.DEFINE_bool('flip', False, 'Whether images should be flipped as well as '
'resulting predictions (for test-time augmentation). This '
'currently applies to the depth network only.')
FLAGS = flags.FLAGS
flags.mark_flag_as_required('output_dir')
flags.mark_flag_as_required('data_dir')
flags.mark_flag_as_required('model_ckpt')
flags.mark_flag_as_required('triplet_list_file')
def main(_):
"""Runs fine-tuning and inference.
There are three categories of images.
1) Images where we have previous and next frame, and that are not filtered
out by the heuristic. For them, we will use the fine-tuned predictions.
2) Images where we have previous and next frame, but that were filtered out
by our heuristic. For them, we will use the ordinary prediction instead.
3) Images where we have at least one missing adjacent frame. For them, we will
use the ordinary prediction as indicated by triplet_list_file_remains (if
provided). They will also not be part of the generated inference list in
the first place.
Raises:
ValueError: Invalid parameters have been passed.
"""
if FLAGS.handle_motion and FLAGS.joint_encoder:
raise ValueError('Using a joint encoder is currently not supported when '
'modeling object motion.')
if FLAGS.handle_motion and FLAGS.seq_length != 3:
raise ValueError('The current motion model implementation only supports '
'using a sequence length of three.')
if FLAGS.handle_motion and not FLAGS.compute_minimum_loss:
raise ValueError('Computing the minimum photometric loss is required when '
'enabling object motion handling.')
if FLAGS.size_constraint_weight > 0 and not FLAGS.handle_motion:
raise ValueError('To enforce object size constraints, enable motion '
'handling.')
if FLAGS.icp_weight > 0.0:
raise ValueError('ICP is currently not supported.')
if FLAGS.compute_minimum_loss and FLAGS.seq_length % 2 != 1:
raise ValueError('Compute minimum loss requires using an odd number of '
'images in a sequence.')
if FLAGS.compute_minimum_loss and FLAGS.exhaustive_mode:
raise ValueError('Exhaustive mode has no effect when compute_minimum_loss '
'is enabled.')
if FLAGS.img_width % (2 ** 5) != 0 or FLAGS.img_height % (2 ** 5) != 0:
logging.warn('Image size is not divisible by 2^5. For the architecture '
'employed, this could cause artefacts caused by resizing in '
'lower dimensions.')
if FLAGS.output_dir.endswith('/'):
FLAGS.output_dir = FLAGS.output_dir[:-1]
# Create file lists to prepare fine-tuning, save it to unique_file.
unique_file_name = (str(datetime.datetime.now().date()) + '_' +
str(datetime.datetime.now().time()).replace(':', '_'))
unique_file = os.path.join(FLAGS.data_dir, unique_file_name + '.txt')
with gfile.FastGFile(FLAGS.triplet_list_file, 'r') as f:
files_to_process = f.readlines()
files_to_process = [line.rstrip() for line in files_to_process]
files_to_process = [line for line in files_to_process if len(line)]
logging.info('Creating unique file list %s with %s entries.', unique_file,
len(files_to_process))
with gfile.FastGFile(unique_file, 'w') as f_out:
fetches_network = FLAGS.num_steps * FLAGS.batch_size
fetches_saves = FLAGS.batch_size * int(np.floor(FLAGS.num_steps/SAVE_EVERY))
repetitions = fetches_network + 3 * fetches_saves
for i in range(len(files_to_process)):
for _ in range(repetitions):
f_out.write(files_to_process[i] + '\n')
# Read remaining files.
remaining = []
if gfile.Exists(FLAGS.triplet_list_file_remains):
with gfile.FastGFile(FLAGS.triplet_list_file_remains, 'r') as f:
remaining = f.readlines()
remaining = [line.rstrip() for line in remaining]
remaining = [line for line in remaining if len(line)]
logging.info('Running fine-tuning on %s files, %s files are remaining.',
len(files_to_process), len(remaining))
# Run fine-tuning process and save predictions in id-folders.
tf.set_random_seed(FIXED_SEED)
| np.random.seed(FIXED_SEED) | numpy.random.seed |
from random import random
import pybullet as p
import pybullet_data
import numpy as np
from scipy.spatial.transform import Rotation as R
import cv2 as cv
from tf_agents.environments import py_environment
from tf_agents.environments import tf_py_environment
from tf_agents.specs import array_spec
from tf_agents.trajectories import time_step as ts
from env.objs import plane, ohmni, obstacle
VELOCITY_COEFFICIENT = 10
class Env:
def __init__(self, gui=False, num_of_obstacles=20, dst_rad=3, image_shape=(96, 96)):
# Env constants
self.gui = gui
self.timestep = 0.1
self._left_wheel_id = 0
self._right_wheel_id = 1
# Env specs
self.image_shape = image_shape
self.num_of_obstacles = num_of_obstacles
self.dst_rad = dst_rad
self.destination = np.array([3, 0], dtype=np.float32)
# Init
self.client_id = self._init_ws()
def _init_ws(self):
"""
Create server and start, there are two modes:
1. GUI: it visualizes the environment and allow controlling
ohmni via sliders.
2. Headless: by running everything in background, it's suitable
for ai/ml/rl development.
"""
# Init server
client_id = p.connect(p.GUI if self.gui else p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setTimeStep(self.timestep, physicsClientId=client_id)
p.configureDebugVisualizer(
p.COV_ENABLE_GUI, 0, physicsClientId=client_id)
# Return
return client_id
def _randomize_destination(self):
x = random() * self.dst_rad * (-1 if random() > 0.5 else 1)
y = random() * self.dst_rad * (-1 if random() > 0.5 else 1)
# vibe = random() * 4 * (-1 if random() > 0.5 else 1) # Level 1
# destination = np.array([5, vibe], dtype=np.float32) # Level 1
destination = np.array([x, y], dtype=np.float32) # Level 2
p.addUserDebugLine(
np.append(destination, 0.), # From
np.append(destination, 3.), # To
[1, 0, 0], # Red
physicsClientId=self.client_id
)
return destination
def _build(self):
""" Including plane, ohmni, obstacles into the environment """
# Add gravity
p.setGravity(0, 0, -10, physicsClientId=self.client_id)
# Add plane and ohmni
plane(self.client_id)
ohmni_id, _capture_image = ohmni(self.client_id)
# Add obstacles at random positions
# vibe = random() * 1.5 * (-1 if random() > 0.5 else 1) # Level 1
# obstacle(self.client_id, pos=[3+vibe, 0, 0.5]) # Level 1
for _ in range(self.num_of_obstacles): # Level 2
obstacle(self.client_id, avoids=[
[0, 0], self.destination]) # Level 2
# Return
return ohmni_id, _capture_image
def _reset(self):
""" Remove all objects, then rebuild them """
p.resetSimulation(physicsClientId=self.client_id)
self.destination = self._randomize_destination()
self.ohmni_id, self._capture_image = self._build()
def capture_image(self):
""" Get image from navigation camera """
if self._capture_image is None:
raise ValueError('_capture_image is undefined')
return self._capture_image(self.image_shape)
def getContactPoints(self):
""" Get Ohmni contacts """
return p.getContactPoints(self.ohmni_id, physicsClientId=self.client_id)
def getBasePositionAndOrientation(self):
""" Get Ohmni position and orientation """
return p.getBasePositionAndOrientation(self.ohmni_id, physicsClientId=self.client_id)
def reset(self):
""" Reset the environment """
self._reset()
def step(self, action):
""" Controllers for left/right wheels which are separate """
# Normalize velocities
[left_wheel, right_wheel] = action
left_wheel = left_wheel * VELOCITY_COEFFICIENT
right_wheel = right_wheel * VELOCITY_COEFFICIENT
# Step
p.setJointMotorControl2(self.ohmni_id, self._left_wheel_id,
p.VELOCITY_CONTROL,
targetVelocity=left_wheel,
physicsClientId=self.client_id)
p.setJointMotorControl2(self.ohmni_id, self._right_wheel_id,
p.VELOCITY_CONTROL,
targetVelocity=right_wheel,
physicsClientId=self.client_id)
p.stepSimulation(physicsClientId=self.client_id)
class PyEnv(py_environment.PyEnvironment):
def __init__(self, gui=False, image_shape=(96, 96)):
super(PyEnv, self).__init__()
# Parameters
self.image_shape = image_shape
self.input_shape = self.image_shape + (4,)
self.max_steps = 500
self._fix_vanish_hyperparam = 0.15
self._num_of_obstacles = 25
self._dst_rad = 6
# Actions
self._num_values = 5
self._values = np.linspace(-1, 1, self._num_values)
self._actions = np.transpose([
np.tile(self._values, self._num_values),
np.repeat(self._values, self._num_values)
])
self._num_actions = len(self._actions)
# PyEnvironment variables
self._action_spec = array_spec.BoundedArraySpec(
shape=(), dtype=np.int32,
minimum=0,
maximum=self._num_actions - 1,
name='action')
self._observation_spec = array_spec.BoundedArraySpec(
shape=self.input_shape, dtype=np.float32,
minimum=0,
maximum=1,
name='observation')
# Init bullet server
self._env = Env(
gui,
num_of_obstacles=self._num_of_obstacles,
dst_rad=self._dst_rad,
image_shape=self.image_shape
)
# Internal states
self._state = None
self._episode_ended = False
self._num_steps = 0
# Reset
self._reset()
def _get_image_state(self):
_, _, rgb_img, _, seg_img = self._env.capture_image()
img = np.array(rgb_img, dtype=np.float32) / 255
# We add a constant to fix the problem of black pixels which vanish all the parameters
mask = np.minimum(
seg_img + self._fix_vanish_hyperparam,
1 - self._fix_vanish_hyperparam,
dtype=np.float32)
mask = cv.cvtColor(mask, cv.COLOR_GRAY2RGB)
return img, mask
def _get_pose_state(self):
position, orientation = self._env.getBasePositionAndOrientation()
position = np.array(position, dtype=np.float32)
destination_posistion = np.append(self._env.destination, 0.)
rotation = R.from_quat(
[-orientation[0], -orientation[1], -orientation[2], orientation[3]])
rel_position = rotation.apply(destination_posistion - position)
_pose = rel_position[0:2]
cosine_sim = np.dot([1, 0], _pose) / \
(np.linalg.norm([1, 0]) * np.linalg.norm(_pose))
return _pose.astype(dtype=np.float32), cosine_sim
def _is_finished(self):
""" Compute the distance from agent to destination """
position, _ = self._env.getBasePositionAndOrientation()
position = np.array(position[0:2], dtype=np.float32)
distance = np.linalg.norm(position - self._env.destination)
return distance < 0.5
def _is_fatal(self):
""" Predict a fall """
position, orientation = self._env.getBasePositionAndOrientation()
position = np.array(position, dtype=np.float32)
# Ohmni exceeds the number of steps
if self._num_steps > self.max_steps:
return True
# Ohmni felt out of the environment
if abs(position[2]) >= 0.5:
return True
# Ohmni is falling down
if abs(orientation[0]) > 0.2 or abs(orientation[1]) > 0.2:
return True
return False
def _is_collided(self):
""" Predict collisions """
collision = self._env.getContactPoints()
for contact in collision:
# Contact with things different from floor
if contact[2] != 0:
return True
return False
def _compute_reward(self):
""" Compute reward and return (<stopped>, <reward>) """
# Reaching the destination
pose, cosine_sim = self._get_pose_state()
if self._is_finished():
return True, 10
# Dead
if self._is_fatal():
return True, -10
# Colliding
if self._is_collided():
return False, -0.1
# Ohmni on his way
return False, (cosine_sim - min(1, np.linalg.norm(pose)/10))/20
def _reset(self):
""" Reset environment"""
self._env.reset()
self._state = None
self._episode_ended = False
self._num_steps = 0
self.set_state()
return ts.restart(self._state)
def action_spec(self):
""" Return action specs """
return self._action_spec
def observation_spec(self):
""" Return observation specs """
return self._observation_spec
def get_info(self):
return {}
def get_state(self):
return self._state
def set_state(self, state=None):
# Gamifying
(h, w) = self.image_shape
_, mask = self._get_image_state() # Image state
pose, _ = self._get_pose_state() # Pose state
cent = np.array([w / 2, h / 2], dtype=np.float32)
dest = -pose * 32 + cent # Transpose/Scale/Tranform
color = min(10, np.linalg.norm(pose))/20 + 0.25 # [0.25, 0.75]
mask = cv.line(mask,
(int(cent[1]), int(cent[0])),
(int(dest[1]), int(dest[0])),
(color, color, color), thickness=3)
observation = cv.cvtColor(mask, cv.COLOR_RGB2GRAY)
observation = np.reshape(observation, self.image_shape + (1,))
# Set state
if self._state is None:
init_state = observation
(_, _, stack_channel) = self.input_shape
for _ in range(stack_channel - 1):
init_state = np.append(init_state, observation, axis=2)
self._state = | np.array(init_state, dtype=np.float32) | numpy.array |
#!/usr/bin/python3
# number of output figures = 1
import matplotlib as mpl
import numpy as np
from helper.figure import Figure
import helper.grid
import helper.plot
def isParent1D(l1, i1, l2, i2):
if l1 + 1 == l2:
if l1 == 0:
if i2 == 1: return True
else:
if i2 in [2*i1-1, 2*i1+1]: return True
return False
def getParentDimension(l1, i1, l2, i2):
t = l1.shape[0]
parentDimension = None
for t in range(d):
if isParent1D(l1[t], i1[t], l2[t], i2[t]):
if parentDimension is not None: return None
parentDimension = t
else:
if (l1[t] != l2[t]) or (i1[t] != i2[t]): return None
return parentDimension
def plotLine(ax, xx, yy, l, n, colormap="viridis", maxIntensity=1):
#ax.plot(xx, yy, "k-")
N = xx.shape[0]
segments = [np.array([[xx[k], yy[k]], [xx[k+1], yy[k+1]]])
for k in range(N - 1)]
lineCollection = mpl.collections.LineCollection(
segments, cmap=colormap, norm=mpl.colors.Normalize(0, 1))
t = maxIntensity * np.linspace(0, 1, n+1)
tt = np.linspace(t[l], t[l+1], N)
lineCollection.set_array(tt)
ax.add_collection(lineCollection)
n = 4
d = 2
b = 1
grid = helper.grid.RegularSparseBoundary(n, d, b)
X, L, I = grid.generate()
K = np.lexsort((*L.T, np.sum(L, axis=1)))
X, L, I = X[K,:], L[K,:], I[K,:]
N = X.shape[0]
fig = Figure.create(figsize=(3, 3), scale=1.5)
ax = fig.gca()
angle = 35 / 180 * np.pi
colormap = mpl.cm.get_cmap("Blues_r")
maxIntensity = 0.55
normalize = mpl.colors.Normalize(0, n / maxIntensity)
for k1 in range(N):
for k2 in range(N):
parentDimension = getParentDimension(L[k1,:], I[k1,:], L[k2,:], I[k2,:])
if parentDimension is not None:
l = L[k1,parentDimension]
nn = int(125 * np.linalg.norm(X[k2,:] - X[k1,:]))
tt = np.linspace(0, 1, nn)
XX = helper.plot.getQuadraticBezierCurveViaAngle(
X[k1,:], X[k2,:], angle, tt)
XX = XX[:-2,:]
curPlotLine = (lambda ax, xx, yy:
plotLine(ax, xx, yy, | np.sum(L[k1,:]) | numpy.sum |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
# landmarks line mask
def generate_mask(image, landmarks):
'''
generate face mask according to landmarks
Args:
image: numpy.ndarray
landmarks: 68x2 numpy.ndarray
Return:
a mask map with
'''
# layer1: line
# layer2: region without expansion
# layer3: wider mask
linemask = generate_line_mask(image, landmarks)
regionmask = generate_region_mask(image, landmarks)
widermask = generate_wider_mask(image, landmarks)
mask = np.stack([linemask, regionmask, widermask]).transpose(1, 2, 0)
# return channel: BGR(linemask, regionmask, widermask)channel0:
return mask
def generate_line_mask(image, landmarks):
linemask = image.copy() # np.zeros_like(image).astype(np.uint8)
# face
linemask = connect_line(linemask, landmarks[0:17])
# eyebow
linemask = connect_line(linemask, landmarks[17:22])
linemask = connect_line(linemask, landmarks[22:27])
# nose
linemask = connect_line(linemask, np.vstack([landmarks[27:31], landmarks[33]]))
linemask = connect_line(linemask, landmarks[31:36])
# eyes
linemask = connect_line(linemask, np.vstack([landmarks[36:42], landmarks[36]]))
linemask = connect_line(linemask, np.vstack([landmarks[42:48], landmarks[42]]))
# mouth
linemask = connect_line(linemask, np.vstack([landmarks[48:60], landmarks[48]]))
linemask = connect_line(linemask, np.vstack([landmarks[60:68], landmarks[60]]))
return linemask
def connect_line(input, landmarks):
img = input.copy()
size= len(landmarks)
for i in range(0, size-1):
img = cv2.line(img,
(landmarks[i, 0], landmarks[i, 1]),
(landmarks[i+1, 0], landmarks[i+1, 1]),
(255, 255, 255),
1,
cv2.LINE_AA)
return img
# face landmarks origin
def generate_region_mask(image, landmarks):
regionmask = | np.zeros_like(image[:, :, 0]) | numpy.zeros_like |
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
import keras
from keras.models import load_model
from keras import datasets
import math
from PIL import Image
import mnist_manifold
import mnist_model
import warnings
warnings.filterwarnings("ignore")
'''
Load MNIST model, manifold model, and transformed adversarial examples
Loop through adversarial examples, find prediction, adjust prediction
based on manifold and norms
Report statistics
'''
PATH_TO_MODEL = "models/mnist_model.h5"
PATH_TO_DATA = "datasets/test"
def l1(point, manifold):
l1_distances = [
abs(point[0] - man_point[0]) + abs(point[1] - man_point[1])
for man_point in manifold
]
return np.mean(l1_distances)
def l2(point, manifold):
l2_distances = [
np.sqrt(
(point[0] - man_point[0]) * (point[0] - man_point[0])
+ (point[1] - man_point[1]) * (point[1] - man_point[1])
)
for man_point in manifold
]
return np.mean(l2_distances)
def fractional_norm(point, manifold, denominator):
frac_distances = [
(
(point[0] - man_point[0]) ** (1 / denominator)
+ (point[1] - man_point[1]) ** (1 / denominator)
)
** denominator
for man_point in manifold
]
return np.nanmean(frac_distances)
def similarity(point, manifold, method, denominator=1):
distance = 0.0
if method == "L1":
distance = l1(point, manifold)
elif method == "L2":
distance = l2(point, manifold)
elif method == "fraction":
distance = fractional_norm(point, manifold, denominator)
return distance
def mnist_preprocessing():
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
color_list = [
"red",
"orange",
"yellow",
"lime",
"green",
"cyan",
"blue",
"purple",
"fuchsia",
"peru",
]
colors = [color_list[y_train[j]] for j in range(len(y_train))]
x_train, y_train, x_test, y_test, input_shape = mnist_model.preprocessing(
x_train, y_train, x_test, y_test
)
x_test = x_test[:500]
y_test = y_test[:500]
return x_test, y_test, colors
def setup_model():
model = load_model(PATH_TO_MODEL)
model = mnist_manifold.convert_to_model(model)
model.trainable = True
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=["accuracy"],
)
return model
def pred_confidence(model, example, label):
orig_confs = model.predict(example)
pred = np.argmax(orig_confs)
pred_conf = orig_confs[0, pred]
print(label + " prediction: " + str(pred) + " with confidence: " + str(pred_conf))
return pred
def main():
# load data, manifold, and embedding
x_test, y_test, colors = mnist_preprocessing()
manifold = np.load(
os.path.join(os.path.dirname(__file__), "mnist-ptsne-20000-2d.npy")
)
embedding = pickle.load(
open(os.path.join(os.path.dirname(__file__), "mnist-ptsne-20000-2d.pkl"), "rb")
)
# for later statistical analysis
net_incorrect = 0
net_correct = 0
net_incorrect_sim_incorrect = 0
net_incorrect_sim_correct = 0
net_correct_sim_incorrect = 0
net_correct_sim_correct = 0
if os.path.exists(PATH_TO_MODEL):
model = setup_model()
# how good the model is without adversarial examples
score = model.evaluate(x_test, y_test, verbose=0)
print("Original test loss:", score[0])
print("Original test accuracy:", score[1])
adversarials = []
for i in range(0, 500):
adversarial = np.load(os.path.join(PATH_TO_DATA, str(i) + ".npy"))
# adversarial = np.random.randint(2, size=(1, 28, 28, 1))
adversarials.append(adversarial)
adv_pred = pred_confidence(model, adversarial, "Adversarial")
orig_pred = pred_confidence(model, np.array([x_test[i]]), "Original")
print("Ground truth: " + str(np.argmax(y_test[i])))
adv_mx = embedding.transform(
adversarial.reshape(1, adversarial.shape[1] * adversarial.shape[2])
)
int_labels = | np.argmax(y_test, axis=1) | numpy.argmax |
from redback.constants import *
from redback.transient_models.magnetar_models import magnetar_only, basic_magnetar, _evolving_gw_and_em_magnetar
import numpy as np
from astropy.cosmology import Planck18 as cosmo # noqa
from scipy.interpolate import interp1d
from collections import namedtuple
import astropy.units as uu # noqa
import astropy.constants as cc # noqa
from redback.utils import calc_kcorrected_properties, interpolated_barnes_and_kasen_thermalisation_efficiency, \
electron_fraction_from_kappa, citation_wrapper
from redback.sed import blackbody_to_flux_density
def _ejecta_dynamics_and_interaction(time, mej, beta, ejecta_radius, kappa, n_ism,
magnetar_luminosity, pair_cascade_switch, use_gamma_ray_opacity, **kwargs):
"""
:param time: time in source frame
:param mej: ejecta mass in solar masses
:param beta: initial ejecta velocity in c
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param magnetar_luminosity: evaluated magnetar luminosity in source frame
:param pair_cascade_switch: whether to account for pair cascade losses
:param use_gamma_ray_opacity: whether to use gamma ray opacity to calculate thermalisation efficiency
:param kwargs: Additional parameters
:param kappa_gamma: Gamma-ray opacity for leakage efficiency, only used if use_gamma_ray_opacity = True
:param thermalisation_efficiency: magnetar thermalisation efficiency only used if use_gamma_ray_opacity = False
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:return: named tuple with 'lorentz_factor', 'bolometric_luminosity', 'comoving_temperature',
'radius', 'doppler_factor', 'tau', 'time', 'kinetic_energy',
'erad_total', 'thermalisation_efficiency'
"""
mag_lum = magnetar_luminosity
ejecta_albedo = kwargs.get('ejecta_albedo', 0.5)
pair_cascade_fraction = kwargs.get('pair_cascade_fraction', 0.05)
mej = mej * solar_mass
lorentz_factor = []
radius = []
doppler_factor = []
lbol_ejecta = []
lbol_rest = []
comoving_temperature = []
tau = []
teff = []
internal_energy = 0.5 * beta ** 2 * mej * speed_of_light ** 2
comoving_volume = (4 / 3) * np.pi * ejecta_radius ** 3
gamma = 1 / np.sqrt(1 - beta ** 2)
t0_comoving = 1.3
tsigma_comoving = 0.11
for i in range(len(time)):
beta = np.sqrt(1 - 1 / gamma ** 2)
doppler_factor_temp = 1 / (gamma * (1 - beta))
if i > 0:
dt = time[i] - time[i - 1]
gamma = gamma + dgamma_dt * dt
ejecta_radius = ejecta_radius + drdt * dt
comoving_volume = comoving_volume + dcomoving_volume_dt * dt
internal_energy = internal_energy + dinternal_energy_dt * dt
swept_mass = (4 / 3) * np.pi * ejecta_radius ** 3 * n_ism * proton_mass
comoving_pressure = internal_energy / (3 * comoving_volume)
comoving_time = doppler_factor_temp * time[i]
comoving_dvdt = 4 * np.pi * ejecta_radius ** 2 * beta * speed_of_light
rad_denom = (1 / 2) - (1 / 3.141592654) * np.arctan((comoving_time - t0_comoving) / tsigma_comoving)
comoving_radiative_luminosity = (4 * 10 ** 49 * (mej / (2 * 10 ** 33) * 10 ** 2) * rad_denom ** 1.3)
tau_temp = kappa * (mej / comoving_volume) * (ejecta_radius / gamma)
if tau_temp <= 1:
comoving_emitted_luminosity = (internal_energy * speed_of_light) / (ejecta_radius / gamma)
comoving_temp_temperature = (internal_energy / (radiation_constant * comoving_volume)) ** (1./4.)
else:
comoving_emitted_luminosity = (internal_energy * speed_of_light) / (tau_temp * ejecta_radius / gamma)
comoving_temp_temperature = (internal_energy / (radiation_constant * comoving_volume * tau_temp)) ** (1./4.)
emitted_luminosity = comoving_emitted_luminosity * doppler_factor_temp ** 2
vej = ((1 / gamma) ** 2 + 1) ** 0.5 * speed_of_light
if use_gamma_ray_opacity:
kappa_gamma = kwargs["kappa_gamma"]
prefactor = 3 * kappa_gamma * mej / (4 * np.pi * vej**2)
thermalisation_efficiency = 1 - np.exp(-prefactor * time[i] ** -2)
else:
thermalisation_efficiency = kwargs["thermalisation_efficiency"]
drdt = (beta * speed_of_light) / (1 - beta)
dswept_mass_dt = 4 * np.pi * ejecta_radius ** 2 * n_ism * proton_mass * drdt
dedt = thermalisation_efficiency * mag_lum[
i] + doppler_factor_temp ** 2 * comoving_radiative_luminosity - doppler_factor_temp ** 2 * comoving_emitted_luminosity
comoving_dinternal_energydt = thermalisation_efficiency * doppler_factor_temp ** (-2) * mag_lum[
i] + comoving_radiative_luminosity - comoving_emitted_luminosity - comoving_pressure * comoving_dvdt
dcomoving_volume_dt = comoving_dvdt * doppler_factor_temp
dinternal_energy_dt = comoving_dinternal_energydt * doppler_factor_temp
dgamma_dt = (dedt - gamma * doppler_factor_temp * comoving_dinternal_energydt - (
gamma ** 2 - 1) * speed_of_light ** 2 * dswept_mass_dt) / (
mej * speed_of_light ** 2 + internal_energy + 2 * gamma * swept_mass * speed_of_light ** 2)
lorentz_factor.append(gamma)
lbol_ejecta.append(comoving_emitted_luminosity)
lbol_rest.append(emitted_luminosity)
comoving_temperature.append(comoving_temp_temperature)
radius.append(ejecta_radius)
tau.append(tau_temp)
doppler_factor.append(doppler_factor_temp)
teff.append(thermalisation_efficiency)
lorentz_factor = np.array(lorentz_factor)
v0 = ((1/lorentz_factor)**2 + 1)**0.5 * speed_of_light
bolometric_luminosity = np.array(lbol_rest)
radius = np.array(radius)
if pair_cascade_switch:
tlife_t = (0.6/(1 - ejecta_albedo))*(pair_cascade_fraction/0.1)**0.5 * (mag_lum/1.0e45)**0.5 \
* (v0/(0.3*speed_of_light))**(0.5) * (time/86400)**(-0.5)
bolometric_luminosity = bolometric_luminosity / (1.0 + tlife_t)
comoving_temperature = (bolometric_luminosity / (4.0 * np.pi * np.array(radius) ** (2.0) * sigma_sb)) ** (0.25)
dynamics_output = namedtuple('dynamics_output', ['lorentz_factor', 'bolometric_luminosity', 'comoving_temperature',
'radius', 'doppler_factor', 'tau', 'time', 'kinetic_energy',
'erad_total', 'thermalisation_efficiency'])
dynamics_output.lorentz_factor = lorentz_factor
dynamics_output.bolometric_luminosity = bolometric_luminosity
dynamics_output.comoving_temperature = np.array(comoving_temperature)
dynamics_output.radius = radius
dynamics_output.doppler_factor = np.array(doppler_factor)
dynamics_output.tau = tau
dynamics_output.time = time
dynamics_output.kinetic_energy = (lorentz_factor - 1)*mej*speed_of_light**2
dynamics_output.erad_total = np.trapz(bolometric_luminosity, x=time)
dynamics_output.thermalisation_efficiency = teff
return dynamics_output
def _comoving_blackbody_to_flux_density(dl, frequency, radius, temperature, doppler_factor):
"""
:param dl: luminosity distance in cm
:param frequency: frequency to calculate in Hz - Must be same length as time array or a single number
:param radius: ejecta radius in cm
:param temperature: comoving temperature in K
:param doppler_factor: doppler_factor
:return: flux_density
"""
## adding units back in to ensure dimensions are correct
frequency = frequency * uu.Hz
radius = radius * uu.cm
dl = dl * uu.cm
temperature = temperature * uu.K
planck = cc.h.cgs
speed_of_light = cc.c.cgs
boltzmann_constant = cc.k_B.cgs
num = 2 * np.pi * planck * frequency ** 3 * radius ** 2
denom = dl ** 2 * speed_of_light ** 2 * doppler_factor ** 2
frac = 1. / (np.exp((planck * frequency) / (boltzmann_constant * temperature * doppler_factor)) - 1)
flux_density = num / denom * frac
return flux_density
def _comoving_blackbody_to_luminosity(frequency, radius, temperature, doppler_factor):
"""
:param frequency: frequency to calculate in Hz - Must be same length as time array or a single number
:param radius: ejecta radius in cm
:param temperature: comoving temperature in K
:param doppler_factor: doppler_factor
:return: luminosity
"""
## adding units back in to ensure dimensions are correct
frequency = frequency * uu.Hz
radius = radius * uu.cm
temperature = temperature * uu.K
planck = cc.h.cgs
speed_of_light = cc.c.cgs
boltzmann_constant = cc.k_B.cgs
num = 8 * np.pi ** 2 * planck * frequency ** 4 * radius ** 2
denom = speed_of_light ** 2 * doppler_factor ** 2
frac = 1. / (np.exp((planck * frequency) / (boltzmann_constant * temperature * doppler_factor)) - 1)
luminosity = num / denom * frac
return luminosity
@citation_wrapper('https://ui.adsabs.harvard.edu/abs/2013ApJ...776L..40Y/abstract')
def basic_mergernova(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, p0, logbp,
mass_ns, theta_pb, thermalisation_efficiency, **kwargs):
"""
:param time: time in observer frame in days
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param p0: initial spin period in seconds
:param bp: polar magnetic field strength in Gauss
:param mass_ns: mass of neutron star in solar masses
:param theta_pb: angle between spin and magnetic field axes
:param thermalisation_efficiency: magnetar thermalisation efficiency
:param kwargs: Additional parameters
:param pair_cascade_switch: whether to account for pair cascade losses, default is False
:param output_format: whether to output flux density or AB magnitude
:param frequency: (frequency to calculate - Must be same length as time array or a single number)
:return: flux density or AB magnitude
"""
pair_cascade_switch = kwargs.get('pair_cascade_switch', False)
frequency = kwargs['frequency']
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
dl = cosmo.luminosity_distance(redshift).cgs.value
bp = 10**logbp
magnetar_luminosity = basic_magnetar(time=time_temp, p0=p0, bp=bp, mass_ns=mass_ns, theta_pb=theta_pb)
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
thermalisation_efficiency=thermalisation_efficiency,
pair_cascade_switch=pair_cascade_switch,
use_gamma_ray_opacity=False, **kwargs)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
# convert to source frame time and frequency
time = time * day_to_s
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
flux_density = _comoving_blackbody_to_flux_density(dl=dl, frequency=frequency, radius=rad, temperature=temp,
doppler_factor=df)
if kwargs['output_format'] == 'flux_density':
return flux_density.to(uu.mJy).value
elif kwargs['output_format'] == 'magnitude':
return flux_density.to(uu.ABmag).value
@citation_wrapper('Sarin et al. in prep.')
def general_mergernova(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn,
thermalisation_efficiency, **kwargs):
"""
:param time: time in observer frame in days
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param l0: initial magnetar X-ray luminosity
:param tau_sd: magnetar spin down damping timescale
:param nn: braking index
:param thermalisation_efficiency: magnetar thermalisation efficiency
:param kwargs: Additional parameters
:param pair_cascade_switch: whether to account for pair cascade losses, default is True
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:param output_format: whether to output flux density or AB magnitude
:param frequency: (frequency to calculate - Must be same length as time array or a single number)
:return: flux density or AB magnitude
"""
frequency = kwargs['frequency']
pair_cascade_switch = kwargs.get('pair_cascade_switch', True)
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
dl = cosmo.luminosity_distance(redshift).cgs.value
magnetar_luminosity = magnetar_only(time=time_temp, l0=l0, tau=tau_sd, nn=nn)
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
thermalisation_efficiency=thermalisation_efficiency,
pair_cascade_switch=pair_cascade_switch,
use_gamma_ray_opacity=False, **kwargs)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
# convert to source frame time and frequency
time = time * day_to_s
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
flux_density = _comoving_blackbody_to_flux_density(dl=dl, frequency=frequency, radius=rad, temperature=temp,
doppler_factor=df)
if kwargs['output_format'] == 'flux_density':
return flux_density.to(uu.mJy).value
elif kwargs['output_format'] == 'magnitude':
return flux_density.to(uu.ABmag).value
@citation_wrapper('Sarin et al. in prep.')
def general_mergernova_thermalisation(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn,
kappa_gamma, **kwargs):
"""
:param time: time in observer frame in days
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param l0: initial magnetar X-ray luminosity
:param tau_sd: magnetar spin down damping timescale
:param nn: braking index
:param kappa_gamma: gamma-ray opacity used to calculate magnetar thermalisation efficiency
:param kwargs: Additional parameters
:param pair_cascade_switch: whether to account for pair cascade losses, default is True
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:param output_format: whether to output flux density or AB magnitude
:param frequency: (frequency to calculate - Must be same length as time array or a single number)
:return: flux density or AB magnitude
"""
frequency = kwargs['frequency']
pair_cascade_switch = kwargs.get('pair_cascade_switch', True)
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
dl = cosmo.luminosity_distance(redshift).cgs.value
magnetar_luminosity = magnetar_only(time=time_temp, l0=l0, tau=tau_sd, nn=nn)
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
kappa_gamma=kappa_gamma, pair_cascade_switch=pair_cascade_switch,
use_gamma_ray_opacity=True, **kwargs)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
# convert to source frame time and frequency
time = time * day_to_s
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
flux_density = _comoving_blackbody_to_flux_density(dl=dl, frequency=frequency, radius=rad, temperature=temp,
doppler_factor=df)
if kwargs['output_format'] == 'flux_density':
return flux_density.to(uu.mJy).value
elif kwargs['output_format'] == 'magnitude':
return flux_density.to(uu.ABmag).value
@citation_wrapper('Sarin et al. in prep.')
def general_mergernova_evolution(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, logbint,
logbext, p0, chi0, radius, logmoi, kappa_gamma, **kwargs):
"""
:param time: time in observer frame in days
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param logbint: log10 internal magnetic field in G
:param logbext: log10 external magnetic field in G
:param p0: spin period in s
:param chi0: initial inclination angle
:param radius: radius of NS in KM
:param logmoi: log10 moment of inertia of NS
:param kappa_gamma: gamma-ray opacity used to calculate magnetar thermalisation efficiency
:param kwargs: Additional parameters
:param pair_cascade_switch: whether to account for pair cascade losses, default is True
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:param output_format: whether to output flux density or AB magnitude
:param frequency: (frequency to calculate - Must be same length as time array or a single number)
:return: flux density or AB magnitude
"""
frequency = kwargs['frequency']
pair_cascade_switch = kwargs.get('pair_cascade_switch', True)
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
dl = cosmo.luminosity_distance(redshift).cgs.value
bint = 10 ** logbint
bext = 10 ** logbext
radius = radius * km_cgs
moi = 10 ** logmoi
output = _evolving_gw_and_em_magnetar(time=time_temp, bint=bint, bext=bext, p0=p0, chi0=chi0, radius=radius, moi=moi)
magnetar_luminosity = output.Edot_d
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
kappa_gamma=kappa_gamma, pair_cascade_switch=pair_cascade_switch,
use_gamma_ray_opacity=True, **kwargs)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
# convert to source frame time and frequency
time = time * day_to_s
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
flux_density = _comoving_blackbody_to_flux_density(dl=dl, frequency=frequency, radius=rad, temperature=temp,
doppler_factor=df)
if kwargs['output_format'] == 'flux_density':
return flux_density.to(uu.mJy).value
elif kwargs['output_format'] == 'magnitude':
return flux_density.to(uu.ABmag).value
def _trapped_magnetar_lum(time, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn, thermalisation_efficiency,
**kwargs):
"""
:param time: time in source frame
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param l0: initial magnetar X-ray luminosity
:param tau_sd: magnetar spin down damping timescale
:param nn: braking index
:param thermalisation_efficiency: magnetar thermalisation efficiency
:param kwargs: 'output_format' - whether to output flux density or AB magnitude
:param kwargs: 'frequency' in Hertz to evaluate the mergernova emission - use a typical X-ray frequency
:return: luminosity
"""
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
magnetar_luminosity = magnetar_only(time=time_temp, l0=l0, tau=tau_sd, nn=nn)
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
thermalisation_efficiency=thermalisation_efficiency,
pair_cascade_switch=False, use_gamma_ray_opacity=False)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
tau_func = interp1d(time_temp, y=output.tau)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
optical_depth = tau_func(time)
frequency = kwargs['frequency']
trapped_ejecta_lum = _comoving_blackbody_to_luminosity(frequency=frequency, radius=rad,
temperature=temp, doppler_factor=df)
lsd = magnetar_only(time, l0=l0, tau=tau_sd, nn=nn)
lum = np.exp(-optical_depth) * lsd + trapped_ejecta_lum
return lum
def _trapped_magnetar_flux(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn,
thermalisation_efficiency, photon_index, **kwargs):
"""
:param time: time in observer frame in seconds
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param l0: initial magnetar X-ray luminosity
:param tau_sd: magnetar spin down damping timescale
:param nn: braking index
:param thermalisation_efficiency: magnetar thermalisation efficiency
:param kwargs: 'output_format' - whether to output flux density or AB magnitude
:param kwargs: 'frequency' in Hertz to evaluate the mergernova emission - use a typical X-ray frequency
:param kwargs: 'photon_index' used to calculate k correction and convert from luminosity to flux
:return: integrated flux
"""
frequency = kwargs['frequency']
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
kwargs['frequency'] = frequency
lum = _trapped_magnetar_lum(time, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn, thermalisation_efficiency,
**kwargs)
dl = cosmo.luminosity_distance(redshift).cgs.value
kcorr = (1. + redshift) ** (photon_index - 2)
flux = lum / (4 * np.pi * dl ** 2 * kcorr)
return flux
@citation_wrapper('https://ui.adsabs.harvard.edu/abs/2017ApJ...835....7S/abstract')
def trapped_magnetar(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn, thermalisation_efficiency,
**kwargs):
"""
:param time: time in source frame or observer frame depending on output format in seconds
:param redshift: redshift - not used if evaluating luminosity
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param l0: initial magnetar X-ray luminosity
:param tau_sd: magnetar spin down damping timescale
:param nn: braking index
:param thermalisation_efficiency: magnetar thermalisation efficiency
:param kwargs: 'output_format' - whether to output luminosity or flux
:param kwargs: 'frequency' in Hertz to evaluate the mergernova emission - use a typical X-ray frequency
:param kwargs: 'photon_index' only used if calculating the flux lightcurve
:return: luminosity or integrated flux
"""
if kwargs['output_format'] == 'luminosity':
return _trapped_magnetar_lum(time, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn,
thermalisation_efficiency, **kwargs)
elif kwargs['output_format'] == 'flux':
return _trapped_magnetar_flux(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn,
thermalisation_efficiency, **kwargs)
def _general_metzger_magnetar_driven_kilonova_model(time, mej, vej, beta, kappa, magnetar_luminosity,
use_gamma_ray_opacity, **kwargs):
"""
:param time: time array to evaluate model on in source frame in seconds
:param redshift: redshift
:param mej: ejecta mass in solar masses
:param vej: minimum initial velocity
:param beta: velocity power law slope (M=v^-beta)
:param kappa: opacity
:param magnetar_luminosity: evaluated magnetar luminosity in source frame
:param pair_cascade_switch: whether to account for pair cascade losses
:param use_gamma_ray_opacity: whether to use gamma ray opacity to calculate thermalisation efficiency
:param kwargs: Additional parameters
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:param kappa_gamma: Gamma-ray opacity for leakage efficiency, only used if use_gamma_ray_opacity = True
:param thermalisation_efficiency: magnetar thermalisation efficiency only used if use_gamma_ray_opacity = False
:param neutron_precursor_switch: whether to have neutron precursor emission, default True
:param pair_cascade_switch: whether to account for pair cascade losses, default is True
:param magnetar_heating: whether magnetar heats all layers or just the bottom layer.
:param vmax: maximum initial velocity of mass layers, default is 0.7c
:return: named tuple with 'lorentz_factor', 'bolometric_luminosity', 'temperature',
'r_photosphere', 'kinetic_energy','erad_total', 'thermalisation_efficiency'
"""
pair_cascade_switch = kwargs.get('pair_cascade_switch', True)
ejecta_albedo = kwargs.get('ejecta_albedo', 0.5)
pair_cascade_fraction = kwargs.get('pair_cascade_fraction', 0.01)
neutron_precursor_switch = kwargs.get('neutron_precursor_switch', True)
magnetar_heating = kwargs.get('magnetar_heating', 'first_layer')
vmax = kwargs.get('vmax', 0.7)
tdays = time/day_to_s
time_len = len(time)
mass_len = 200
# set up kilonova physics
av, bv, dv = interpolated_barnes_and_kasen_thermalisation_efficiency(mej, vej)
# thermalisation from Barnes+16
e_th = 0.36 * (np.exp(-av * tdays) + np.log1p(2.0 * bv * tdays ** dv) / (2.0 * bv * tdays ** dv))
electron_fraction = electron_fraction_from_kappa(kappa)
t0 = 1.3 #seconds
sig = 0.11 #seconds
tau_neutron = 900 #seconds
# convert to astrophysical units
m0 = mej * solar_mass
v0 = vej * speed_of_light
ek_tot_0 = 0.5 * m0 * v0 ** 2
# set up mass and velocity layers
vmin = vej
vel = np.linspace(vmin, vmax, mass_len)
m_array = mej * (vel/vmin)**(-beta)
v_m = vel * speed_of_light
# set up arrays
time_array = np.tile(time, (mass_len, 1))
e_th_array = np.tile(e_th, (mass_len, 1))
edotr = np.zeros((mass_len, time_len))
time_mask = time > t0
time_1 = time_array[:, time_mask]
time_2 = time_array[:, ~time_mask]
edotr[:,time_mask] = 2.1e10 * e_th_array[:, time_mask] * ((time_1/ (3600. * 24.)) ** (-1.3))
edotr[:, ~time_mask] = 4.0e18 * (0.5 - (1. / np.pi) * np.arctan((time_2 - t0) / sig)) ** (1.3) * e_th_array[:,~time_mask]
lsd = magnetar_luminosity
# set up empty arrays
energy_v = | np.zeros((mass_len, time_len)) | numpy.zeros |
from ._contents import Contents
import copy
import numbers
import numpy as np
import more_itertools
class Arraytainer(Contents, np.lib.mixins.NDArrayOperatorsMixin):
_arrays = (np.ndarray,)
#
# Constructor Methods
#
def __init__(self, contents, convert_arrays=True, greedy=False, nested=True):
# All arraytainers must minimally comprise of a list:
if not isinstance(contents, (list, dict, tuple, Arraytainer)):
contents = [contents]
# To prevent array converter converting outer list to array:
greedy=True
if convert_arrays:
contents = self._convert_contents_to_arrays(contents, greedy)
super().__init__(contents, nested, greedy=greedy) #convert_arrays=convert_arrays
@classmethod
def from_array(cls, array, shapes, order='C', convert_arrays=True, greedy=False, nested=True):
# Concatenate shapes tuple into a single arraytainer:
if not isinstance(shapes, tuple):
shapes = tuple([shapes])
shapes = cls._concatenate_elements_to_array(shapes)
if not isinstance(shapes, Arraytainer):
raise ValueError('shapes must container at least one arraytainer.')
# Ensure correct number of elements in array:
total_size = np.prod(shapes).sum_all()
if total_size != array.size:
raise ValueError(f'Array contains {array.size} elements, but shapes '
f'contains {total_size} elements.')
vector = array.flatten(order=order)
contents = cls._create_contents_from_vector(vector, shapes, order)
return cls(contents, convert_arrays, greedy, nested)
@classmethod
def _concatenate_elements_to_array(cls, val_tuple):
val_list = list(val_tuple)
for idx, val_i in enumerate(val_list):
if not isinstance(val_i, (Arraytainer, *cls._arrays)):
val_i = cls._convert_to_array(val_i)
# Shape arrays must have at least one dimension for concatenate:
if val_i.ndim == 0:
val_i = val_i[None]
elif isinstance(val_i, Arraytainer):
# 0 dimensional arrays in arraytainer cause concatenate to throw errors:
val_i = np.atleast_1d(val_i)
val_list[idx] = val_i
return np.concatenate(val_list)
@classmethod
def _create_contents_from_vector(cls, vector, shapes, order, elem_idx=None, first_call=True):
if elem_idx is None:
elem_idx = 0
new_contents = {}
for key, shape in shapes.items():
if isinstance(shape, Arraytainer):
new_contents[key], elem_idx = cls._create_contents_from_vector(vector, shape, order, elem_idx, first_call=False)
else:
array_vals, elem_idx = cls._extract_array_vals(vector, elem_idx, shape)
new_contents[key] = array_vals.reshape(shape, order=order)
if shapes._type is list:
new_contents = list(new_contents.values())
if first_call:
output = new_contents
else:
output = (new_contents, elem_idx)
return output
@staticmethod
def _extract_array_vals(vector, elem_idx, shape):
# Jaxtainer version of this method uses jnp methods instead of np:
num_elem = np.prod(shape)
array_vals = vector[elem_idx:elem_idx+num_elem]
elem_idx += num_elem
return array_vals, elem_idx
#
# Array Conversion Methods
#
@staticmethod
def _convert_to_array(val):
# Note that Jaxtainer uses jnp.array:
return np.array(val)
def _convert_contents_to_arrays(self, contents, greedy):
contents = self._unpack_if_arraytainer(contents)
if isinstance(contents, tuple):
contents = list(contents)
contents_iter = contents.items() if isinstance(contents, dict) else enumerate(contents)
for key, val in contents_iter:
val = self._unpack_if_arraytainer(val)
if isinstance(val, dict):
contents[key] = self._convert_contents_to_arrays(val, greedy)
elif isinstance(val, (list, tuple)):
# Check before altering contents on val:
any_arrays_in_val = any(isinstance(val_i, self._arrays) for val_i in val)
# List of numbers should be directly converted to an array:
if all(isinstance(val_i, numbers.Number) for val_i in val):
converted_vals = self._convert_to_array(val)
else:
converted_vals = self._convert_contents_to_arrays(val, greedy)
if not greedy and self._can_combine_list_into_single_array(converted_vals, any_arrays_in_val):
converted_vals = self._convert_to_array(converted_vals)
contents[key] = converted_vals
else:
contents[key] = self._convert_to_array(val)
return contents
@staticmethod
def _unpack_if_arraytainer(val):
if isinstance(val, Arraytainer):
val = val.unpack()
return val
def _can_combine_list_into_single_array(self, converted_list, any_arrays_in_val):
can_convert = not any_arrays_in_val and \
self._all_elems_are_arrays(converted_list) and \
self._all_arrays_of_equal_shape(converted_list)
return can_convert
def _all_elems_are_arrays(self, converted_list):
return all(isinstance(val_i, self._arrays) for val_i in converted_list)
def _all_arrays_of_equal_shape(self, converted_list):
# Converted list could be empty:
if converted_list:
first_array_shape = converted_list[0].shape
all_equal = all(array_i.shape == first_array_shape for array_i in converted_list)
else:
all_equal = True
return all_equal
#
# Getter Methods
#
def __getitem__(self, key):
if isinstance(key, self._arrays) or self._is_slice(key):
item = self._get_with_array(key)
else:
item = super().__getitem__(key, greedy=True)
return item
def _get_with_array(self, array_key):
item = {key: self._contents[key][array_key] for key in self.keys()}
if self._type is list:
item = list(item.values())
return self.__class__(item, greedy=True)
#
# Setter Methods
#
def update(self, new_elem, *key_iterable):
new_elem = self._convert_to_array_or_arraytainer(new_elem)
super().update(new_elem, *key_iterable)
def assign(self, new_val, *key_iterable):
new_val = self._convert_to_array_or_arraytainer(new_val)
super().assign(new_val, *key_iterable)
def _convert_to_array_or_arraytainer(self, val):
if isinstance(val, numbers.Number):
val = self._convert_to_array(val)
if not isinstance(val, self._arrays):
val = self.__class__(val)
return val
def __setitem__(self, key, new_value):
if isinstance(key, self._arrays) or self._is_slice(key):
self._set_with_array(key)
elif isinstance(key, Arraytainer):
self._set_with_arraytainer(key, new_value)
else:
super().__setitem__(key, new_value)
@staticmethod
def _is_slice(val):
if isinstance(val, slice):
is_slice = True
# Slices accross multiple dimensions appear as tuples of ints/slices/Nones (e.g. my_array[3, 1:2, :])
elif isinstance(val, tuple) and all(isinstance(val_i, (type(None), slice, int)) for val_i in val):
is_slice = True
elif val is None:
is_slice = True
else:
is_slice = False
return is_slice
def _set_with_array(self, array_key, new_value):
for key in self.keys():
value_i = new_value[key] if isinstance(new_value, Arraytainer) else new_value
# Note that Jaxtainers use different _set_array_values method:
self._set_array_values(key, array_key, value_i)
def _set_array_values(self, key, idx, new_value):
self._contents[key][idx] = new_value
def _set_with_arraytainer(self, arraytainer_key, new_value):
for key, val in arraytainer_key.items():
new_value_i = new_value[key] if isinstance(new_value, Arraytainer) else new_value
if isinstance(val, self._arrays):
self._set_array_values(key, val, new_value_i)
else:
self._contents[key][val] = new_value_i
#
# Array Methods and Properties
#
@property
def T(self):
return np.transpose(self)
def all(self):
for key in self.keys():
# Numpy/Jax arrays also have an 'all' method:
if not self.contents[key].all():
return False
return True
def any(self):
for key in self.keys():
if self.contents[key].any():
return True
return False
def sum(self):
return sum(self.values())
def sum_arrays(self):
return sum(self.list_elements())
def sum_all(self):
arraytainer_of_scalars = np.sum(self)
return sum(arraytainer_of_scalars.list_elements())
def get_shape(self, return_tuples=False):
shapes = {}
for key, val in self.items():
shapes[key] = val.shape
if self._type is list:
shapes = list(shapes.values())
if not return_tuples:
shapes = self.__class__(shapes, greedy=True)
return shapes
#
# Array-Like Methods
#
@property
def shape(self):
return self.get_shape()
@property
def ndim(self):
return np.ndim(self)
@property
def sizes(self):
return np.prod(self.shape)
@property
def size(self):
size = self.sizes.sum_all()
if isinstance(size, self._arrays):
size = size.item()
return size
def reshape(self, *new_shapes, order='C'):
new_shapes = self._concatenate_elements_to_array(new_shapes)
return np.reshape(self, new_shapes, order=order)
def flatten(self, order='C', return_array=True):
output = np.squeeze(np.ravel(self, order=order))
if return_array:
# Zero-dimensional elements cause concatenate to throw error:
elem_list = output.list_elements()
for idx, elem in enumerate(elem_list):
if elem.ndim == 0:
elem_list[idx] = elem[None]
output = | np.concatenate(elem_list) | numpy.concatenate |
#!/usr/bin/env python
#
# Created by: <NAME>, April 2002
#
from __future__ import division, print_function, absolute_import
__usage__ = """
Build linalg:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
"""
import math
import numpy as np
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises, assert_)
from scipy.linalg import _fblas as fblas, get_blas_funcs
try:
from scipy.linalg import _cblas as cblas
except ImportError:
cblas = None
def test_get_blas_funcs():
# check that it returns Fortran code for arrays that are
# fortran-ordered
f1, f2, f3 = get_blas_funcs(
('axpy', 'axpy', 'axpy'),
(np.empty((2,2), dtype=np.complex64, order='F'),
np.empty((2,2), dtype=np.complex128, order='C'))
)
# get_blas_funcs will choose libraries depending on most generic
# array
assert_equal(f1.typecode, 'z')
assert_equal(f2.typecode, 'z')
if cblas is not None:
assert_equal(f1.module_name, 'cblas')
assert_equal(f2.module_name, 'cblas')
# check defaults.
f1 = get_blas_funcs('rotg')
assert_equal(f1.typecode, 'd')
# check also dtype interface
f1 = get_blas_funcs('gemm', dtype=np.complex64)
assert_equal(f1.typecode, 'c')
f1 = get_blas_funcs('gemm', dtype='F')
assert_equal(f1.typecode, 'c')
# extended precision complex
f1 = get_blas_funcs('gemm', dtype=np.longcomplex)
assert_equal(f1.typecode, 'z')
# check safe complex upcasting
f1 = get_blas_funcs('axpy',
(np.empty((2,2), dtype=np.float64),
np.empty((2,2), dtype=np.complex64))
)
assert_equal(f1.typecode, 'z')
def test_get_blas_funcs_alias():
# check alias for get_blas_funcs
f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64)
assert f.typecode == 'c'
assert g.typecode == 'c'
f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64)
assert f is g
assert f is h
class TestCBLAS1Simple(TestCase):
def test_axpy(self):
for p in 'sd':
f = getattr(cblas,p+'axpy',None)
if f is None:
continue
assert_array_almost_equal(f([1,2,3],[2,-1,3],a=5),[7,9,18])
for p in 'cz':
f = getattr(cblas,p+'axpy',None)
if f is None:
continue
assert_array_almost_equal(f([1,2j,3],[2,-1,3],a=5),[7,10j-1,18])
class TestFBLAS1Simple(TestCase):
def test_axpy(self):
for p in 'sd':
f = getattr(fblas,p+'axpy',None)
if f is None:
continue
assert_array_almost_equal(f([1,2,3],[2,-1,3],a=5),[7,9,18])
for p in 'cz':
f = getattr(fblas,p+'axpy',None)
if f is None:
continue
assert_array_almost_equal(f([1,2j,3],[2,-1,3],a=5),[7,10j-1,18])
def test_copy(self):
for p in 'sd':
f = getattr(fblas,p+'copy',None)
if f is None:
continue
assert_array_almost_equal(f([3,4,5],[8]*3),[3,4,5])
for p in 'cz':
f = getattr(fblas,p+'copy',None)
if f is None:
continue
assert_array_almost_equal(f([3,4j,5+3j],[8]*3),[3,4j,5+3j])
def test_asum(self):
for p in 'sd':
f = getattr(fblas,p+'asum',None)
if f is None:
continue
assert_almost_equal(f([3,-4,5]),12)
for p in ['sc','dz']:
f = getattr(fblas,p+'asum',None)
if f is None:
continue
assert_almost_equal(f([3j,-4,3-4j]),14)
def test_dot(self):
for p in 'sd':
f = getattr(fblas,p+'dot',None)
if f is None:
continue
assert_almost_equal(f([3,-4,5],[2,5,1]),-9)
def test_complex_dotu(self):
for p in 'cz':
f = getattr(fblas,p+'dotu',None)
if f is None:
continue
assert_almost_equal(f([3j,-4,3-4j],[2,3,1]),-9+2j)
def test_complex_dotc(self):
for p in 'cz':
f = getattr(fblas,p+'dotc',None)
if f is None:
continue
assert_almost_equal(f([3j,-4,3-4j],[2,3j,1]),3-14j)
def test_nrm2(self):
for p in 'sd':
f = getattr(fblas,p+'nrm2',None)
if f is None:
continue
assert_almost_equal(f([3,-4,5]),math.sqrt(50))
for p in ['c', 'z', 'sc','dz']:
f = getattr(fblas,p+'nrm2',None)
if f is None:
continue
assert_almost_equal(f([3j,-4,3-4j]),math.sqrt(50))
def test_scal(self):
for p in 'sd':
f = getattr(fblas,p+'scal',None)
if f is None:
continue
assert_array_almost_equal(f(2,[3,-4,5]),[6,-8,10])
for p in 'cz':
f = getattr(fblas,p+'scal',None)
if f is None:
continue
assert_array_almost_equal(f(3j,[3j,-4,3-4j]),[-9,-12j,12+9j])
for p in ['cs','zd']:
f = getattr(fblas,p+'scal',None)
if f is None:
continue
assert_array_almost_equal(f(3,[3j,-4,3-4j]),[9j,-12,9-12j])
def test_swap(self):
for p in 'sd':
f = getattr(fblas,p+'swap',None)
if f is None:
continue
x,y = [2,3,1],[-2,3,7]
x1,y1 = f(x,y)
assert_array_almost_equal(x1,y)
assert_array_almost_equal(y1,x)
for p in 'cz':
f = getattr(fblas,p+'swap',None)
if f is None:
continue
x,y = [2,3j,1],[-2,3,7-3j]
x1,y1 = f(x,y)
assert_array_almost_equal(x1,y)
assert_array_almost_equal(y1,x)
def test_amax(self):
for p in 'sd':
f = getattr(fblas,'i'+p+'amax')
assert_equal(f([-2,4,3]),1)
for p in 'cz':
f = getattr(fblas,'i'+p+'amax')
assert_equal(f([-5,4+3j,6]),1)
#XXX: need tests for rot,rotm,rotg,rotmg
class TestFBLAS2Simple(TestCase):
def test_gemv(self):
for p in 'sd':
f = getattr(fblas,p+'gemv',None)
if f is None:
continue
assert_array_almost_equal(f(3,[[3]],[-4]),[-36])
assert_array_almost_equal(f(3,[[3]],[-4],3,[5]),[-21])
for p in 'cz':
f = getattr(fblas,p+'gemv',None)
if f is None:
continue
assert_array_almost_equal(f(3j,[[3-4j]],[-4]),[-48-36j])
assert_array_almost_equal(f(3j,[[3-4j]],[-4],3,[5j]),[-48-21j])
def test_ger(self):
for p in 'sd':
f = getattr(fblas,p+'ger',None)
if f is None:
continue
assert_array_almost_equal(f(1,[1,
2],[3,4]),[[3,4],[6,8]])
assert_array_almost_equal(f(2,[1,
2,
3],[3,4]),[[6,8],[12,16],[18,24]])
assert_array_almost_equal(f(1,[1,
2],[3,4],
a=[[1,2],[3,4]]
),[[4,6],[9,12]])
for p in 'cz':
f = getattr(fblas,p+'geru',None)
if f is None:
continue
assert_array_almost_equal(f(1,[1j,
2],[3,4]),[[3j,4j],[6,8]])
assert_array_almost_equal(f(-2,[1j,
2j,
3j],[3j,4j]),[[6,8],[12,16],[18,24]])
for p in 'cz':
for name in ('ger', 'gerc'):
f = getattr(fblas,p+name,None)
if f is None:
continue
assert_array_almost_equal(f(1,[1j,
2],[3,4]),[[3j,4j],[6,8]])
assert_array_almost_equal(f(2,[1j,
2j,
3j],[3j,4j]),[[6,8],[12,16],[18,24]])
class TestFBLAS3Simple(TestCase):
def test_gemm(self):
for p in 'sd':
f = getattr(fblas,p+'gemm',None)
if f is None:
continue
assert_array_almost_equal(f(3,[3],[-4]),[[-36]])
assert_array_almost_equal(f(3,[3],[-4],3,[5]),[-21])
for p in 'cz':
f = getattr(fblas,p+'gemm',None)
if f is None:
continue
assert_array_almost_equal(f(3j,[3-4j],[-4]),[[-48-36j]])
assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j])
def _get_func(func, ps='sdzc'):
"""Just a helper: return a specified BLAS function w/typecode."""
for p in ps:
f = getattr(fblas, p+func, None)
if f is None:
continue
yield f
class TestBLAS3Symm(TestCase):
def setUp(self):
self.a = np.array([[1., 2.],
[0., 1.]])
self.b = np.array([[1., 0., 3.],
[0., -1., 2.]])
self.c = np.ones((2,3))
self.t = np.array([[2., -1., 8.],
[3., 0., 9.]])
def test_symm(self):
for f in _get_func('symm'):
res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.)
assert_array_almost_equal(res, self.t)
res = f(a=self.a.T, b=self.b, lower=1, c=self.c, alpha=1., beta=1.)
assert_array_almost_equal(res, self.t)
res = f(a=self.a, b=self.b.T, side=1, c=self.c.T, alpha=1., beta=1.)
assert_array_almost_equal(res, self.t.T)
def test_summ_wrong_side(self):
f = getattr(fblas, 'dsymm', None)
if f is not None:
assert_raises(Exception, f, **{'a': self.a, 'b': self.b, 'alpha': 1,
'side': 1})
# `side=1` means C <- B*A, hence shapes of A and B are to be
# compatible. Otherwise, f2py exception is raised
def test_symm_wrong_uplo(self):
"""SYMM only considers the upper/lower part of A. Hence setting
wrong value for `lower` (default is lower=0, meaning upper triangle)
gives a wrong result.
"""
f = getattr(fblas,'dsymm',None)
if f is not None:
res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.)
assert np.allclose(res, self.t)
res = f(a=self.a, b=self.b, lower=1, c=self.c, alpha=1., beta=1.)
assert not np.allclose(res, self.t)
class TestBLAS3Syrk(TestCase):
def setUp(self):
self.a = np.array([[1., 0.],
[0., -2.],
[2., 3.]])
self.t = np.array([[1., 0., 2.],
[0., 4., -6.],
[2., -6., 13.]])
self.tt = np.array([[5., 6.],
[6., 13.]])
def test_syrk(self):
for f in _get_func('syrk'):
c = f(a=self.a, alpha=1.)
assert_array_almost_equal(np.triu(c), np.triu(self.t))
c = f(a=self.a, alpha=1., lower=1)
assert_array_almost_equal(np.tril(c), np.tril(self.t))
c0 = np.ones(self.t.shape)
c = f(a=self.a, alpha=1., beta=1., c=c0)
assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
c = f(a=self.a, alpha=1., trans=1)
assert_array_almost_equal(np.triu(c), np.triu(self.tt))
#prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress?
# FIXME: how to catch the _fblas.error?
def test_syrk_wrong_c(self):
f = getattr(fblas, 'dsyrk', None)
if f is not None:
assert_raises(Exception, f, **{'a': self.a, 'alpha': 1.,
'c': np.ones((5, 8))})
# if C is supplied, it must have compatible dimensions
class TestBLAS3Syr2k(TestCase):
def setUp(self):
self.a = np.array([[1., 0.],
[0., -2.],
[2., 3.]])
self.b = np.array([[0., 1.],
[1., 0.],
[0, 1.]])
self.t = np.array([[0., -1., 3.],
[-1., 0., 0.],
[3., 0., 6.]])
self.tt = np.array([[0., 1.],
[1., 6]])
def test_syr2k(self):
for f in _get_func('syr2k'):
c = f(a=self.a, b=self.b, alpha=1.)
assert_array_almost_equal(np.triu(c), np.triu(self.t))
c = f(a=self.a, b=self.b, alpha=1., lower=1)
assert_array_almost_equal(np.tril(c), np.tril(self.t))
c0 = np.ones(self.t.shape)
c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0)
assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
c = f(a=self.a, b=self.b, alpha=1., trans=1)
assert_array_almost_equal(np.triu(c), np.triu(self.tt))
#prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress?
def test_syr2k_wrong_c(self):
f = getattr(fblas, 'dsyr2k', None)
if f is not None:
assert_raises(Exception, f, **{'a': self.a, 'b': self.b, 'alpha': 1.,
'c': np.zeros((15, 8))})
# if C is supplied, it must have compatible dimensions
class TestSyHe(TestCase):
"""Quick and simple tests for (zc)-symm, syrk, syr2k."""
def setUp(self):
self.sigma_y = np.array([[0., -1.j],
[1.j, 0.]])
def test_symm_zc(self):
for f in _get_func('symm', 'zc'):
# NB: a is symmetric w/upper diag of ONLY
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, -1]))
def test_hemm_zc(self):
for f in _get_func('hemm', 'zc'):
# NB: a is hermitian w/upper diag of ONLY
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
def test_syrk_zr(self):
for f in _get_func('syrk', 'zc'):
res = f(a=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([-1, -1]))
def test_herk_zr(self):
for f in _get_func('herk', 'zc'):
res = f(a=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
def test_syr2k_zr(self):
for f in _get_func('syr2k', 'zc'):
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), 2.* | np.diag([-1, -1]) | numpy.diag |
#!/usr/bin/env python
##############################################################################
#
# QUANTINUUM LLC CONFIDENTIAL & PROPRIETARY.
# This work and all information and expression are the property of
# Quantinuum LLC, are Quantinuum LLC Confidential & Proprietary,
# contain trade secrets and may not, in whole or in part, be licensed,
# used, duplicated, disclosed, or reproduced for any purpose without prior
# written permission of Quantinuum LLC.
#
# In the event of publication, the following notice shall apply:
# (c) 2022 Quantinuum LLC. All Rights Reserved.
#
##############################################################################
''' Preset optimization passes. '''
from typing import Union
import numpy as np
# transpile options
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.transpiler import Layout
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler import PassManager
from qiskit.circuit.library import SwapGate, CXGate
from qiskit.circuit.library import RXXGate, RYYGate, RZZGate
from qiskit.quantum_info.operators import Operator
from qiskit.quantum_info.synthesis import TwoQubitBasisDecomposer
from qiskit.quantum_info.synthesis.two_qubit_decompose import TwoQubitWeylDecomposition
from qiskit.extensions import UnitaryGate
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.passes import Unroller, Optimize1qGates
from qiskit.transpiler.passes import BarrierBeforeFinalMeasurements, RemoveDiagonalGatesBeforeMeasure
from qiskit.transpiler.passes import Collect2qBlocks, ConsolidateBlocks
def preset_passes(optimization_level: str,
transpile_options: dict):
"""
Preset pass options.
Args:
optimization_level: 'low' = combine SQ gates, relabel qubits to avoid first transport
'medium' = low + combine adjacent SU(4) blocks
'high' = medium + approximate blocks
'rob' = SU(4) gates with robustness to rotation errors (untested)
0, 1, 2 = standard qiskit level of optimization
transpile_options: dictionary of othe transpiler options for 'high' or 'rob'
Returns:
(PassManager): PassManager object for compilation
"""
pm = PassManager()
if optimization_level == 'low':
pm.append([
Unroller(['u1','u2','u3','cx']),
Optimize1qGates(),
RemoveDiagonalGatesBeforeMeasure(),
BarrierBeforeFinalMeasurements()
])
elif optimization_level == 'medium':
pm.append([
Unroller(['u1','u2','u3','cx']),
Collect2qBlocks(),
ConsolidateBlocks(),
Unroller(['u1','u2','u3','cx']),
Optimize1qGates(),
RemoveDiagonalGatesBeforeMeasure(),
BarrierBeforeFinalMeasurements()
])
elif optimization_level == 'high':
pm.append([
Unroller(['u1','u2','u3','cx']),
Collect2qBlocks(),
ApproxBlocks(**transpile_options),
Optimize1qGates(),
RemoveDiagonalGatesBeforeMeasure(),
BarrierBeforeFinalMeasurements()
])
else:
ValueError('Invalid optimization_level selected.')
return pm
class ApproxBlocks(TransformationPass):
"""
Replace each block with approximate version
Based on ConsolidateBlocks pass
"""
def __init__(self,
tol: float,
mirror: bool = False,
arbitrary_angles: bool = False,
comp_method: str = 'fid',
kak_basis_gate = CXGate(),
force_consolidate: bool = False):
"""
Args:
tol: tolerance of approximation
mirror: if true then check same SU(4) approx with flipped qubits
arbitrary_angles: if true then use arbitrary angles
comp_method: function used to compare approximations
kak_basis_gate: gate used in decomposition
"""
super().__init__()
self.decomposer = TwoQubitBasisDecomposer(kak_basis_gate)
self.tol = tol
self.mirror = mirror
self.comp_method = comp_method
self.arbitrary_angles = arbitrary_angles
self.force_consolidate = force_consolidate
def run(self, dag):
"""Run the ConsolidateBlocks pass on `dag`.
Iterate over each block and replace it with an equivalent Unitary
on the same wires.
"""
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
# compute ordered indices for the global circuit wires
global_index_map = {wire: idx for idx, wire in enumerate(dag.qubits)}
qubit_map = {wire: wire for wire in dag.qubits}
blocks = self.property_set['block_list']
# just to make checking if a node is in any block easier
all_block_nodes = {nd for bl in blocks for nd in bl}
for node in dag.topological_op_nodes():
if node not in all_block_nodes:
# need to add this node to find out where in the list it goes
preds = [nd for nd in dag.predecessors(node) if nd.type == 'op']
block_count = 0
while preds:
if block_count < len(blocks):
block = blocks[block_count]
# if any of the predecessors are in the block, remove them
preds = [p for p in preds if p not in block]
else:
# should never occur as this would mean not all
# nodes before this one topologically had been added
# so not all predecessors were removed
raise TranspilerError(
"Not all predecessors removed due to error"
" in topological order"
)
block_count += 1
# we have now seen all predecessors
# so update the blocks list to include this block
blocks = blocks[:block_count] + [[node]] + blocks[block_count:]
# create the dag from the updated list of blocks
basis_gate_name = self.decomposer.gate.name
for block in blocks:
if len(block) == 1 and block[0].name != 'cx':
# an intermediate node that was added into the overall list
new_dag.apply_operation_back(
block[0].op,
[qubit_map[q] for q in block[0].qargs],
block[0].cargs, block[0].condition
)
else:
# find the qubits involved in this block
block_qargs = set()
for nd in block:
block_qargs |= set(nd.qargs)
# convert block to a sub-circuit, then simulate unitary and add
block_width = len(block_qargs)
q = QuantumRegister(block_width)
subcirc = QuantumCircuit(q)
block_index_map = self._block_qargs_to_indices(
block_qargs,
global_index_map
)
inv_block_index_map = {
val:key
for key, val in block_index_map.items()
}
basis_count = 0
for nd in block:
if nd.op.name == basis_gate_name:
basis_count += 1
subcirc.append(nd.op, [q[block_index_map[i]] for i in nd.qargs])
unitary = UnitaryGate(Operator(subcirc)) # simulates the circuit
qc, flip_qubits = mirror_decomposer(
unitary.to_matrix(),
self.tol,
self.mirror,
self.arbitrary_angles,
self.comp_method,
self.decomposer
)
if not (self.force_consolidate or unitary.num_qubits > 2):
for inst in qc.data:
qubit_list = [qubit.index for qubit in inst[1]]
new_dag.apply_operation_back(
inst[0],
[qubit_map[inv_block_index_map[i]] for i in qubit_list]
)
if flip_qubits:
qubits = (
qubit_map[inv_block_index_map[1]],
qubit_map[inv_block_index_map[0]]
)
qubit_map.update({
inv_block_index_map[0]: qubits[0],
inv_block_index_map[1]: qubits[1]
})
else:
for nd in block:
new_dag.apply_operation_back(
nd.op,
[qubit_map[q] for q in nd.qargs],
nd.cargs,
nd.condition
)
return new_dag
def _block_qargs_to_indices(self,
block_qargs,
global_index_map):
"""
Map each qubit in block_qargs to its wire position among the block's wires.
Args:
block_qargs (list): list of qubits that a block acts on
global_index_map (dict): mapping from each qubit in the
circuit to its wire position within that circuit
Returns:
dict: mapping from qarg to position in block
"""
block_indices = [global_index_map[q] for q in block_qargs]
ordered_block_indices = sorted(block_indices)
block_positions = {q: ordered_block_indices.index(global_index_map[q])
for q in block_qargs}
return block_positions
def mirror_decomposer(U: np.ndarray,
fidelity: float,
mirror: bool,
arbitrary_angles: bool,
comp_method: str,
basis_decomp: TwoQubitBasisDecomposer):
"""
Decompose unitary in standard and mirror option and return most efficient.
Args:
U: unitary to decomposer
fidelity: tolerance fidelity of decomposition
mirror: option to allow search over U * SWAP in addition
arbitrary_angles: option to do decomposition with arbitrary angles
comp_method: method used to compare decompositions
basis_decomp: SU(4) decomposition object
Returns:
(QuantumCircuit): circuit describing decoposition
"""
target_decomposed = TwoQubitWeylDecomposition(U)
decomp_success = comparison_func(
target_decomposed,
basis_decomp,
fidelity,
comp_method
)
if mirror:
target_decomposed_rev = TwoQubitWeylDecomposition(
SwapGate().to_matrix().dot(U)
)
#traces = get_traces(target_decomposed_rev)
#expected_fidelities += [fidelity_comp(trace_to_fid(traces[i]), fidelity, i) for i in range(4)]
decomp_success += comparison_func(
target_decomposed_rev,
basis_decomp,
fidelity,
comp_method
)
best_decomp = np.argmax(decomp_success)
if best_decomp >= 4:
flip_future = True
best_decomp = best_decomp % 4
target_decomposed = target_decomposed_rev
else:
flip_future = False
return_circuit = write_circuit(
best_decomp,
basis_decomp,
target_decomposed,
arbitrary_angles
)
return return_circuit, flip_future
def write_circuit(best_decomp: int,
decomp: TwoQubitBasisDecomposer,
target_decomposed: TwoQubitWeylDecomposition,
arbitrary_angles: bool):
"""
Make qiskit circuit out of selected decomp.
Args:
best_decomp: index for decomposition based on criteria
decomp: decomposition object
target_decomposed: decomposition of target unitary
arbitrary_angles: option if using arbitrary angles
Returns:
(QuantumCircuit): Circuit describing target_decomposed
"""
q = QuantumRegister(2)
qc = QuantumCircuit(q)
pm = PassManager()
pm.append(Optimize1qGates())
if not arbitrary_angles:
decomposition = decomp.decomposition_fns[best_decomp](target_decomposed)
decomposition_euler = [decomp._decomposer1q(x) for x in decomposition]
for i in range(best_decomp):
qc.compose(decomposition_euler[2*i], [q[0]], inplace=True)
qc.compose(decomposition_euler[2*i+1], [q[1]], inplace=True)
qc.append(decomp.gate, [q[0], q[1]])
qc.compose(decomposition_euler[2*best_decomp], [q[0]], inplace=True)
qc.compose(decomposition_euler[2*best_decomp+1], [q[1]], inplace=True)
elif arbitrary_angles:
gate_list = [RXXGate, RYYGate, RZZGate]
tq_angles = [-target_decomposed.a, -target_decomposed.b, -target_decomposed.c]
qc.compose(decomp._decomposer1q(target_decomposed.K2r), [q[0]], inplace=True)
qc.compose(decomp._decomposer1q(target_decomposed.K2l), [q[1]], inplace=True)
for i in range(best_decomp):
qc.append(gate_list[i](2*tq_angles[i]), [q[0], q[1]])
qc.compose(decomp._decomposer1q(target_decomposed.K1r), [q[0]], inplace=True)
qc.compose(decomp._decomposer1q(target_decomposed.K1l), [q[1]], inplace=True)
qc_new = pm.run(qc)
return qc_new
def comparison_func(target: TwoQubitWeylDecomposition,
basis: TwoQubitBasisDecomposer,
base_fid: float,
comp_method: str):
"""
Decompose traces for arbitrary angle rotations.
This assumes that the tq angles go from highest to lowest.
"""
dep_param = (4 * base_fid - 1)/3
if comp_method == 'fid':
traces = fixed_traces(target, basis)
values = [((abs(tr)**2 - 1) * dep_param**i + 1)/ 16
for i, tr in enumerate(traces)]
elif comp_method == 'arb_fid':
traces = arb_traces(target)
values = [((abs(tr)**2 - 1) * dep_param**i + 1)/ 16
for i, tr in enumerate(traces)]
elif comp_method == 'arb_total':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**(a/np.pi) + 1)/ 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'arb_total_quad':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**((a/np.pi)**2) + 1) / 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'arb_total_sqrt':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**(np.sqrt(a/np.pi)) + 1) / 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'total_angle':
traces = arb_traces(target)
# negate to find smallest total angle (uses max later)
values = [-10, -10, -10, -abs(target.a) - abs(target.b) - abs(target.c)]
return values
def arb_traces(target: TwoQubitWeylDecomposition):
"""Returns normalized traces for arbitrary angle decomps."""
traces = [
4*(np.cos(target.a)*np.cos(target.b)*np.cos(target.c) +
1j*np.sin(target.a)*np.sin(target.b)*np.sin(target.c)),
4*(np.cos(target.b)*np.cos(target.c)),
4*np.cos(target.c),
4
]
return traces
def fixed_traces(target: TwoQubitWeylDecomposition,
basis: TwoQubitBasisDecomposer):
"""Returns traces for fixed angle decomps."""
traces = [
4*(np.cos(target.a)*np.cos(target.b)* | np.cos(target.c) | numpy.cos |
# -*- coding: utf-8 -*-
"""Trajectory cleaner
This module relies heavily on the example scripts in
the Example gallery of the Mayavi documentation
link : https://tinyurl.com/p6ecx6n
Created on Mon Mar 19 13:17:09 2018
@author: tbeleyur
"""
import easygui as eg
import numpy as np
import pandas as pd
from traits.api import HasTraits, Range, Instance, \
on_trait_change
from traitsui.api import View, Item, Group
from mayavi import mlab
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor, \
MlabSceneModel
from tvtk.api import tvtk
class TrajAssigner(HasTraits):
'''
Creates a Mayavi Visualisation window with options to :
1) Display a time range of the trajectory datasets
2) View trajectory point information when a point is left-button clicked
3) Re-assign the *labelled* trajectory points when the point is
right-button clicked. If 'Cancel' is pressed OR the window is closed
then the trajectory tag is set to nan.
Usage :
# Initiate a TrajCleaner instance
traj_cleaner = TrajCleaner()
# assign the labelled and known trajectory datasets to the instance
traj_cleaner.knwntraj_data = kn_data
traj_cleaner.labtraj_data = lab_data
# begin the Mayavi interactive visualisation
traj_cleaner.configure_traits()
# After checking the trajectory assignment close the
# Mayavi window and save the labld_traj pd.DataFrame to a csv
traj_cleaner.labld_traj.to_csv('labelled_traj_verified.csv')
User-controlled parameters :
tag_offset : the distance between the numeric trajectory tag and
the displayed trajectory points
tag_size : size of the numeric trajectory tag
'''
Time_range_start = Range(0, 30.0, 0.000)
Time_range_end = Range(0, 30.0, 29.99)
scene = Instance(MlabSceneModel, ())
labld_glyphs = None
known_glyphs = None
outline = None
labld_glyphcolors = None
trajtags = [0, 1, 2]
tag_size = 0.05
tag_offset = 2*10**-2
@on_trait_change('scene.activated')
def setup(self):
print('running setup')
self.generate_color_and_size()
self.fig = mlab.figure(figure=mlab.gcf())
self.fig.scene.interactor.interactor_style = tvtk.InteractorStyleTerrain()
self.update_plot()
# The general mouse based clicker - which reveals point information
# of the known and labelled datapoints
self.info_picker = self.fig.on_mouse_pick(self.view_point_information)
self.info_picker.tolerance = 0.01
# picker which allows to re-assign the point trajectory number
self.reassign_picker = self.fig.on_mouse_pick(self.reassign_callback,
type='point',
button='Right')
self.reassign_picker.tolerance = 0.01
# outline which indicates which point has been clicked on
self.outline = mlab.outline(line_width=3, color=(0.9, 0.9, 0.9))
self.outline.outline_mode = 'cornered'
self.outline.bounds = (0.05, 0.05,
0.05, 0.05,
0.05, 0.05)
self.click_text = mlab.text(0.8, 0.8, 'STARTING INFO')
self.traj_text = mlab.text(0.8, 0.6, 'Trajectory number')
self.pointtype_text = mlab.text(0.8, 0.87, 'Point Type')
self.pointtype_info = mlab.text(0.8, 0.82, '')
mlab.axes()
@on_trait_change(['Time_range_start', 'Time_range_end'])
def update_plot(self):
'''Makes a 3d plot with known/verified trajecory points
as circles and the corresponding auto/manually labelled points as
squares.
TODO:
1) allow for interactive choosing of points even with tsubsetting - DONE
2) the POINTCOLORS should remain the same
Instance parameters used :
knwn_trajdata : pd.DataFrame with following columns:
x_knwn,y_knwn,z_knwn,t_knwn, traj_num
lab_trajdata : pd.DataFrame with following columns:
x,y,z,t,traj_num
'''
print('updating plotted data')
self.tsubset_knwntraj = self.subset_in_time(self.knwntraj_data)
self.tsubset_labldtraj = self.subset_in_time(self.labtraj_data, False)
self.x_knwn, self.y_knwn, self.z_knwn = conv_to_XYZ(self.tsubset_knwntraj[['x_knwn', 'y_knwn', 'z_knwn']])
self.x, self.y, self.z = conv_to_XYZ(self.tsubset_labldtraj[['x', 'y', 'z']])
#set colors for each point
self.known_glyphcolors = np.array(self.tsubset_knwntraj['colors'])
self.labld_glyphcolors = np.array(self.tsubset_labldtraj['colors'])
# verified points
if self.known_glyphs is None:
# if the glyphs are being called the first time
self.known_glyphs = mlab.points3d(self.x_knwn, self.y_knwn,
self.z_knwn,
scale_factor=0.05,
mode='sphere', colormap='hsv',
figure=self.fig)
# thanks goo.gl/H9mdao
self.known_glyphs.glyph.scale_mode = 'scale_by_vector'
self.known_glyphs.mlab_source.dataset.point_data.scalars = self.known_glyphcolors
else:
# only change the traits of the object while keeping its
# identity in the scene
self.known_glyphs.mlab_source.reset(x=self.x_knwn,
y=self.y_knwn,
z=self.z_knwn,
scale_factor=0.05,
mode='sphere', colormap='hsv',
figure=self.fig)
self.known_glyphs.glyph.scale_mode = 'scale_by_vector'
self.known_glyphs.mlab_source.dataset.point_data.scalars = self.known_glyphcolors
#auto/manually labelled points which need to be checked
if self.labld_glyphs is None:
self.labld_glyphs = mlab.points3d(self.x, self.y, self.z,
scale_factor=0.05,
mode='cube', colormap='hsv',
figure=self.fig)
self.labld_glyphs.glyph.scale_mode = 'scale_by_vector'
self.labld_glyphs.mlab_source.dataset.point_data.scalars = self.labld_glyphcolors
else:
self.labld_glyphs.mlab_source.reset(x=self.x,
y=self.y,
z=self.z,
scale_factor=0.05,
mode='cube', colormap='hsv',
figure=self.fig,
scalars=self.labld_glyphcolors)
self.labld_glyphs.glyph.scale_mode = 'scale_by_vector'
self.labld_glyphs.mlab_source.dataset.point_data.scalars = self.labld_glyphcolors
# get the xyz points of the plotted points
self.labld_points = self.labld_glyphs.glyph.glyph_source.glyph_source.output.points.to_array()
self.knwn_points = self.known_glyphs.glyph.glyph_source.glyph_source.output.points.to_array()
self.create_trajectorytags()
#mlab.gcf().scene.disable_render = False
#mlab.draw(figure=self.fig)
def view_point_information(self, picker):
'''Callback function when a glyph is left-button clicked.
Information on the xyz and time of recording/emission is displayed
'''
#print('MOUSE CALLBACK')
self.click_text.text = ''
all_glyphs = [self.known_glyphs.actor.actors,
self.labld_glyphs.actor.actors]
closest_glyph = [picker.actor in disp_glyphs for disp_glyphs in all_glyphs]
all_pointsxyz = [self.knwn_points, self.labld_points]
try:
which_glyph = int(np.argwhere(closest_glyph))
points_xyz = all_pointsxyz[which_glyph]
except:
return()
if which_glyph == 0:
time_col = 't_knwn'
elif which_glyph == 1:
time_col = 't'
if picker.actor in all_glyphs[which_glyph]:
point_id = picker.point_id/points_xyz.shape[0]
# If the no points have been selected, we have '-1'
if point_id != -1:
# Retrieve the coordinnates coorresponding to that data
# point
if which_glyph == 0:
#print('known point chosen')
x_pt, y_pt, z_pt = self.x_knwn[point_id], self.y_knwn[point_id], self.z_knwn[point_id]
pt_type = 'Known'
else:
#print('labelled point chosen')
x_pt, y_pt, z_pt = self.x[point_id], self.y[point_id], self.z[point_id]
pt_type = 'Labelled'
# Move the outline to the data point.
self.outline.bounds = (x_pt-0.05, x_pt+0.05,
y_pt-0.05, y_pt+0.05,
z_pt-0.05, z_pt+0.05)
self.outline.visible = True
#display the x,y,z and time info on the selected point #
if which_glyph == 0:
time_stamp = np.around(self.tsubset_knwntraj[time_col][point_id], 4)
traj_num = self.tsubset_knwntraj['traj_num'][point_id]
else:
time_stamp = np.around(self.tsubset_labldtraj[time_col][point_id], 4)
traj_num = self.tsubset_labldtraj['traj_num'][point_id]
self.click_text.text = str([np.around(x_pt, 2),
np.around(y_pt, 2),
np.around(z_pt, 2),
time_stamp])
#display the trajectory number of the selected point
self.traj_text.text = 'Traj number: ' + str(traj_num)
self.pointtype_info.text = pt_type
else:
print('failed :', point_id)
def reassign_callback(self, picker):
""" Picker callback: this get called when on pick events.
A user prompt appears when the picker is triggered for
entry of the trajectory number. Input >=1 and <=99 is expected.
If the trajectory number needs to be set to a NaN, then simply click
on 'Cancel'
"""
if picker.actor in self.labld_glyphs.actor.actors:
point_id = picker.point_id/self.labld_points.shape[0]
# If the no points have been selected, we have '-1'
if point_id != -1:
# Retrieve the coordinnates coorresponding to that data
# point
print('labelled point chosen')
x_pt, y_pt, z_pt = self.x[point_id], self.y[point_id], self.z[point_id]
# Move the outline to the data point.
self.outline.bounds = (x_pt-0.15, x_pt+0.15,
y_pt-0.15, y_pt+0.15,
z_pt-0.15, z_pt+0.15)
self.outline.visible = True
try:
new_trajnum = eg.integerbox('Please enter the re-assigned trajectory number',
lowerbound=1, upperbound=99,
default=None)
print('New traj num', new_trajnum)
self.trajectory_reassignment(new_trajnum, point_id)
except:
print('Unable to re-assign point')
def subset_in_time(self, traj_df, known=True):
'''Make a subset of the knwon and labelled trajectory datasets
such that the points displayed fall wihtin the start and end time
of the user input.
Parameters:
traj_df : pd.DataFrame with at least one column named either 't'
or 't_knwn'
known : Boolean. Defaults to True.
If True:
the column used for subsetting should be called 't'
If False:
the column used for subsetting should be called 't_knwn'
Returns:
tsubset_df : pd.DataFrame with at least one column named
either 't' or 't_knwn'. See 'known'.
'''
colname = {True:'t_knwn', False:'t'}
if self.Time_range_end <= self.Time_range_start:
print('invalid Time range!')
return(None)
try:
time_after = traj_df[colname[known]] >= self.Time_range_start
time_before = traj_df[colname[known]] <= self.Time_range_end
tsubset_df = traj_df[(time_after) & (time_before)]
tsubset_df = tsubset_df.reset_index(drop=True)
return(tsubset_df)
except:
print('Wrong time ranges !! ')
# The layout of the dialog created
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
Group('_', 'Time_range_start', 'Time_range_end'),
resizable=True)
def identify_orig_rowindex(self, orig_df, df_row):
'''When a point has been chosen for trajectory re-assignment,
find its original row index in the dataset and change the value there
Parameters:
orig_df : pd.DataFrame with multiple rows and columns
df_row : 1 x Ncolumns pd.DataFrame.
Returns:
orig_index : int. Row index of the original DataFrame pd1 with
values that match df_row
'''
x_match = orig_df['x'] == df_row.x
y_match = orig_df['y'] == df_row.y
z_match = orig_df['z'] == df_row.z
try:
row_index = orig_df.loc[x_match & y_match & z_match].index
return(row_index)
except:
print('Matching row not found !! Returning None')
def generate_color_and_size(self):
for each_trajtype in [self.knwntraj_data, self.labtraj_data]:
each_trajtype['colors'] = each_trajtype['traj_num'].apply(assign_colors_float, 1)
each_trajtype['size'] = np.tile(0.05, each_trajtype.shape[0])
self.end_time = np.max([np.max(self.labtraj_data['t']),
np.max(self.knwntraj_data['t_knwn'])])
def trajectory_reassignment(self, new_trajnum, pt_id):
'''Re-assigns the trajectory number of a labelled point in the original
labld_traj pd.DataFrame
Parameters:
new_trajnum: int. New trajectory number
pt_id : int. row number of the tsubset_labdtraj which needs to be
accessed
'''
self.current_row = self.tsubset_labldtraj.loc[pt_id]
orig_index = self.identify_orig_rowindex(self.labtraj_data, self.current_row)
try:
self.labtraj_data['traj_num'][orig_index] = new_trajnum
print('Trajectory succesfully re-assigned for point #'+str(orig_index))
self.generate_color_and_size()
self.update_plot()
except:
print('Unable to re-assign !!')
def create_trajectorytags(self):
'''Make a label which shows the trajectory number for each plotted point
'''
for each_tag in self.trajtags:
try:
each_tag.visible = False # clear out all traj labels
except:
print('Could not set each_tag.visible to False')
pass
self.trajtags[:] = []
known_data = self.tsubset_knwntraj[['x_knwn', 'y_knwn', 'z_knwn',
'traj_num']]
labld_data = self.tsubset_labldtraj[['x', 'y', 'z', 'traj_num']]
for point_collection in [known_data, labld_data]:
for i, each_row in point_collection.iterrows():
try:
trajtag = mlab.text3d(each_row.x_knwn + self.tag_offset,
each_row.y_knwn + self.tag_offset,
each_row.z_knwn + self.tag_offset,
str(each_row.traj_num),
scale=self.tag_size,
figure=mlab.gcf())
except:
trajtag = mlab.text3d(each_row.x+ self.tag_offset,
each_row.y+ self.tag_offset,
each_row.z+ self.tag_offset,
str(each_row.traj_num),
scale=self.tag_size,
figure=mlab.gcf())
self.trajtags.append(trajtag)
num_colors = 20
traj_2_color_float = {i+1 : (i+0.01)/num_colors for i in range(1, num_colors+1)}
def assign_colors_float(X):
'''Outputs a float value between 0 and 1
at
'''
try:
color = traj_2_color_float[X]
return(color)
except:
color = 0.99
return(color)
def conv_to_XYZ(pd_df):
'''
Parameters:
pd_df : npoints x 3 columns with some kind of xyz data
Returns:
x,y,z : 3 columns of npoints length each
'''
xyz_dict = {}
for i, axis in enumerate(['x', 'y', 'z']):
xyz_dict[axis] = np.array(pd_df.iloc[:, i])
return(xyz_dict['x'], xyz_dict['y'], xyz_dict['z'])
if __name__ == '__main__':
lin_inc = np.linspace(0,1.5,25)
lin_inc = np.random.normal(0,1,lin_inc.size)
xyz = | np.column_stack((lin_inc,lin_inc,lin_inc)) | numpy.column_stack |
# model.distributions.py
# copyright 2021 <NAME>
import numpy as np
import pymc3 as pm
from scipy import stats, special
import theano.tensor as tt
from pymc3.distributions.dist_math import bound, logpow, alltrue_elemwise
from pymc3.distributions.continuous import assert_negative_support, PositiveContinuous
from pymc3.distributions.distribution import draw_values, generate_samples
from pymc3.theanof import floatX
RANDOM_SEED = 42
rng = np.random.default_rng(seed=RANDOM_SEED)
# NOTE hack to clip values away from {0, 1} for invcdfs
# Whilst value = {0, 1} is theoretically allowed, is seems to cause a
# numeric compuational issue somewhere in tt.erfcinv which throws infs.
# This screws up the downstream, so clip slightly away from {0, 1}
CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS = 1e-15 #1e-18 too small
def boundzero_numpy(vals, *conditions):
""" Bound natural unit distribution params, return 0 for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return np.where(alltrue_elemwise(conditions), vals, 0.)
def boundzero_theano(vals, *conditions):
""" Bound natural unit distribution params, return 0 for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return tt.switch(alltrue_elemwise(conditions), vals, 0.)
def boundlog_numpy(vals, *conditions):
""" Bound log unit distribution params, return -inf for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return np.where(alltrue_elemwise(conditions), vals, -np.inf)
def logpow_numpy(x, m):
""" Copy from pymc3
Safe calc log(x**m) since m*log(x) will fail when m, x = 0.
"""
return np.where(x == 0, np.where(m == 0, 0.0, -np.inf), m * np.log(x))
class Gamma(pm.Gamma):
"""Inherit the pymc class, add cdf and invcdf """
def __init__(self):
raise NotImplementedError(
"""Consider that InvCDF is hard to calculate: even scipy uses C functions
Recommend use different dist in practice""")
class GammaNumpy():
"""Gamma PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations used in pymc3 custom distributions
Helpful to compare these to scipy to confirm my correct implementation
Ref: https://en.wikipedia.org/wiki/Gamma_distribution
Params: x > 0, u in [0, 1], a (shape) > 0, b (rate) > 0
"""
def __init__(self):
self.name = 'Gamma'
self.notation = {'notation': r'x \sim Gamma(\alpha, \beta)'}
self.dist_natural = {
'pdf': r'f(x \mid \alpha, \beta) = \frac{1}{\Gamma(\alpha)} \beta^{\alpha} x^{\alpha-1} e^{- \beta x}',
'cdf': r'F(x \mid \alpha, \beta) = \frac{1}{\Gamma(\alpha)} \gamma(\alpha, \beta x)',
'invcdf': r'F^{-1}(u \mid \alpha, \beta) = '}
self.dist_log = {
'logpdf': r'\log f(x \mid \alpha, \beta) = -\log \Gamma(\alpha) + \log \beta^{\alpha} + \log x^{\alpha-1} - \beta x',
'logcdf': r'\log F(x \mid \alpha, \beta) = -\log \Gamma(\alpha) + \log \gamma(\alpha, \beta x)',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, \beta) = '}
self.conditions = {
'parameters': r'\alpha > 0 \, \text{(shape)}, \; \beta > 0 \, \text{(rate)}',
'support': r'x \in (0, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r'\frac{\alpha}{\beta}',
'mode': r'\frac{\alpha - 1}{\beta}, \; \text{for} \alpha \geq 1',
'variance': r'\frac{\alpha}{\beta^{2}}'
}
def pdf(self, x, a, b):
"""Gamma PDF
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2595
"""
fn = (1 / special.gamma(a)) * np.power(b, a) * np.power(x, a-1) * np.exp(-b * x)
return boundzero_numpy(fn, a > 0, b > 0, x >= 0)
def cdf(self, x, a, b):
"""Gamma CDF:
where $\gamma(a, bx)$ is lower incomplete gamma function [0, lim)
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2602
"""
# fn = (1 / special.gamma(a)) * special.gammainc(a, b * x)
fn = special.gammainc(a, b * x)
return boundzero_numpy(fn, a > 0, b > 0, x >= 0)
def invcdf(self, u, a, b):
"""Gamma Inverse CDF aka PPF:
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2608
see sc.gammainc()
"""
raise NotImplementedError('TODO gamma inverse CDF')
def logpdf(self, x, a, b):
"""Gamma log PDF
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2599
"""
fn = -special.gammaln(a) + logpow_numpy(b, a) + logpow_numpy(x, a-1) - b * x
return boundlog_numpy(fn, a > 0, b > 0, x > 0)
def logcdf(self, x, a, b):
"""Gamma log CDF:
where $\gamma(a, bx)$ is lower incomplete gamma function [0, lim)
compare to https://github.com/pymc-devs/pymc3/blob/41a25d561b3aa40c75039955bf071b9632064a66/pymc3/distributions/continuous.py#L2614
"""
return boundlog_numpy((-special.gammaln(a)) + special.gammainc(a, b * x),
a > 0, b > 0, x > 0)
def loginvcdf(self, u, a, b):
"""Gamma log Inverse CDF aka log PPF:
see sc.gammaincinv()
"""
raise NotImplementedError('TODO gamma log inverse CDF')
class Gumbel(pm.Gumbel):
"""Inherit the pymc class, add cdf, logcdf and invcdf, loginvcdf
Also clobber logp (!)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
def logp(self, value):
"""
JS patch refactored code to align with other distributions
Calculate log-probability of Gumbel distribution at specified value.
z = (x - mu) / b
pdf = (1 / b) * exp(-z - exp(-z))
logpdf = -log(b) - z - exp(-z)
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the
log probabilities for multiple values are desired the values must
be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
beta = self.beta
z = (value - mu) / beta
logp = -tt.log(beta) - z - tt.exp(-z)
return bound(logp, beta > 0)
def logcdf(self, value):
"""
JS patch refactored code to align with other distributions
cdf = exp(-exp(-(X - mu) / b))
logcdf = -exp(-(X-mu)/b)
Compute the log of the cumulative distribution function for
Gumbel distribution at the specified value.
Parameters
----------
value: numeric
Value(s) for which log CDF is calculated. If the log CDF for
multiple values are desired the values must be provided in a
numpy array or theano tensor.
Returns
-------
TensorVariable
"""
beta = self.beta
mu = self.mu
logcdf = -tt.exp(-(value - mu)/beta)
return bound(logcdf, beta > 0)
def loginvcdf(self, value):
"""
JS new function
invcdf = mu - b * log(-log(u))
loginvcdf = log(mu) + log(1 - (b * log(-log(u))/mu))
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the
log probabilities for multiple values are desired the values must
be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
beta = self.beta
mu = self.mu
loginvcdf = tt.log(mu) + tt.log(1 - (beta * tt.log(-tt.log(value))/mu))
return bound(loginvcdf, beta > 0)
class InverseWeibull(PositiveContinuous):
r"""
Inverse Weibull log-likelihood, the reciprocal of the Weibull distribution,
also known as the Fréchet distribution, a special case of the generalized
extreme value distribution.
See scipy for reference
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html
https://github.com/scipy/scipy/blob/v1.6.0/scipy/stats/_continuous_distns.py
The pdf of this distribution is
.. math::
f(x \mid \alpha, s, m) =
\frac{\alpha }{s}} \; \left({\frac{x-m}{s}}\right)^{{-1-\alpha }}\;e^{{-({\frac{x-m}{s}})^{{-\alpha }}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 500)
alphas = [1., 2., 3., 3.]
betas = [1., 1., 1., .5]
for a, b in zip(alphas, betas):
pdf = st.invgamma.pdf(x, a, scale=b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ======================================================
Support :math:`x \in (-\infty, \infty)`
Mean :math:`{\begin{cases}\ m+s\Gamma \left(1-{\frac {1}{\alpha }}\right)&{\text{for }}\alpha >1\\\ \infty &{\text{otherwise}}\end{cases}}`
Variance :math:`{\begin{cases}\ s^{2}\left(\Gamma \left(1-{\frac {2}{\alpha }}\right)-\left(\Gamma \left(1-{\frac{1}{\alpha }}\right)\right)^{2}\right)&{\text{for }}\alpha >2\\\ \infty &{\text{otherwise}}\end{cases}}`
======== ======================================================
Parameters
----------
alpha: float
Shape parameter (alpha > 0).
s: float
Scale parameter (s > 0), default = 1
## m: float
## Location parameter (mu in (-inf, inf)), default = 0
"""
def __init__(self, alpha=None, s=1., *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.s = s = tt.as_tensor_variable(floatX(s))
self.mode = s * tt.power(alpha / (1. + alpha), 1. / alpha)
assert_negative_support(alpha, "alpha", "InverseWeibull")
assert_negative_support(s, "s", "InverseWeibull")
def _distr_parameters_for_repr(self):
return ["alpha", 's']
def random(self, point=None, size=None):
"""
Draw random values from InverseWeibull PDF distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, s = draw_values([self.alpha, self.s], point=point, size=size)
return generate_samples(stats.invweibull.rvs, c=alpha, scale=s, loc=0.,
dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of InverseWeibull distribution at specified value.
pdf: https://www.wolframalpha.com/input/?i=%28a%2Fs%29+*+%28x%2Fs%29**%28-1-a%29+*+exp%28-%28x%2Fs%29**-a%29
alt form according to WA: a e^(-(s/x)^a) s^a x^(-1 - a)
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
s = self.s
return bound(
(
tt.log(alpha) -
tt.log(s) +
logpow(s / value, 1. + alpha) -
tt.power(s / value, alpha) # this term grossly dominates if alpha >> 2
),
value > 0.,
alpha > 0.,
s > 0.
)
def cdf(self, value):
"""InverseWeibull CDF"""
alpha = self.alpha
s = self.s
fn = tt.exp(-tt.power(value / s, -alpha))
return boundzero_theano(fn, alpha > 0, s > 0, value > 0)
def logcdf(self, value):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
alpha = self.alpha
s = self.s
fn = -tt.power(value / s, -alpha)
return bound(fn, alpha > 0, s > 0, value > 0)
def invcdf(self, value):
"""InverseWeibull Inverse CDF aka PPF"""
alpha = self.alpha
s = self.s
value = tt.clip(value, CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS,
1-CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS)
fn = s * tt.power(-tt.log(value), -1. / alpha)
return boundzero_theano(fn, alpha > 0, s > 0, value >= 0, value <= 1)
def loginvcdf(self, value):
"""InverseWeibull log Inverse CDF aka log PPF
ref: ? manually calced and confirmed vs scipy
"""
alpha = self.alpha
s = self.s
fn = tt.log(s) - (1./ alpha ) * tt.log(-tt.log(value))
return bound(fn, alpha > 0, s > 0, value >= 0, value <= 1)
class InverseWeibullNumpy():
"""Inverse Weibull PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations potentially used if needed in pymc3 custom distributions
Helpful to compare these to scipy to confirm my correct implementation
NOTE: I'm lazy and have set m=0 throughout: this suits my usecase anyhow
Ref: https://en.wikipedia.org/wiki/Fréchet_distribution
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html?highlight=inverse%20weibull
Params: alpha (shape) > 0, s (scale) > 0, m (location of minimum) = 0
Support: x > 0, u in [0, 1]
"""
def __init__(self):
self.name = 'InverseWeibull'
self.notation = {'notation': r'x \sim InverseWeibull(\alpha, s, m=0)'}
self.dist_natural = {
'pdf': r"""f(x \mid \alpha, s, m=0) = \frac{\alpha}{s} \;
\left( \frac{x}{s} \right)^{-1-\alpha} \;
\exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)""",
'cdf': r'F(x \mid \alpha, s, m=0) = \exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)',
'invcdf': r"""F^{-1}(u \mid \alpha, s, m=0) = s \log(u)^{-\frac{1}{\alpha}}"""}
self.dist_log = {
'logpdf': r"""\log f(x \mid \alpha, s, m=0) = \log{\alpha} - (1+\alpha)\log{x} +
\alpha \log{s} - \left( \frac{x}{s} \right)^{-\alpha}""",
'logcdf': r'\log F(x \mid \alpha, s, m=0) = - \left( \frac{x}{s} \right)^{-\alpha}',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, s, m=0) = \log(s) - \frac{1}{\alpha} * \log(-\log(u))'}
self.conditions = {
'parameters': r"""\alpha > 0 \, \text{(shape)}, \;
s > 0 \, \text{(scale, default } s=1 \text{)}, \;
m \in (-\infty, \infty) \, \text{(location of minimum, default } m=0 \text{)}""",
'support': r'x \in (m, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r"""
\begin{cases}
m + s \Gamma \left( 1 - \frac{1}{\alpha} \right) & \text{for } \alpha > 1 \\
\infty & \text{otherwise} \\
\end{cases}""",
'mode': r'm + s \left( \frac{\alpha}{1+\alpha} \right)^{1/\alpha}',
'variance': r"""
\begin{cases}
s^{2} \left( \Gamma \left( 1-\frac{2}{\alpha} \right) -
\left( \Gamma \left( 1-\frac{1}{\alpha} \right) \right)^{2}
\right) & \text{for } \alpha > 2 \\
\infty & \text{otherwise}
\end{cases}"""
}
def pdf(self, x, a, s):
"""InverseWeibull PDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3919
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
(a/s) *
np.power(x/s, -1.-a) *
np.exp(-np.power(x/s, -a))
)
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def cdf(self, x, a, s):
"""InverseWeibull CDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3926
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.exp(-np.power(x/s, -a))
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def invcdf(self, u, a, s):
"""InverseWeibull Inverse CDF aka PPF:
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3930
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = s * np.power(-np.log(u), -1./a)
return boundzero_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
def logpdf(self, x, a, s):
"""InverseWeibull log PDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
np.log(a) - np.log(s) +
logpow_numpy(x/s, -1.-a) -
np.power(x/s, -a) # this term grossly dominates if a >> 2
)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def logcdf(self, x, a, s):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = -np.power(x/s, -a)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def loginvcdf(self, u, a, s):
"""InverseWeibull log Inverse CDF aka log PPF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.log(s) - (1./a) * np.log(-np.log(u))
return boundlog_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
class ZeroInflatedInverseWeibull(PositiveContinuous):
r"""
ZeroInflatedInvserseWeibull log-likelihood
WIP! Mixture model to allow for observations dominated by zeros such as sev
also see
+ McElreath 2014, http://xcelab.net/rmpubs/Mcelreath%20Koster%202014.pdf,
https://github.com/rmcelreath/mcelreath-koster-human-nature-2014
+ Jones 2013, https://royalsocietypublishing.org/doi/10.1098/rspb.2013.1210
+ https://stackoverflow.com/questions/42409761/pymc3-nuts-has-difficulty-sampling-from-a-hierarchical-zero-inflated-gamma-mode
The pmf of this distribution is
.. math::
f(x \mid \psi, \alpha, s) = \left\{
\begin{array}{l}
(1 - \psi), & \text{if } x = 0 \\
\psi \, \text{InverseWeibull}(\alpha, s), & \text{if } x > 0
\end{array}
\right.
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi \, \text{InverseWeibull}(\mu, \sigma)`
Variance :math: TODO
======== ==========================
Parameters
----------
psi: float
Expected proportion of InverseWeibull variates (0 <= psi <= 1)
alpha: float
s: float
"""
def __init__(self, psi, alpha, s, *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
self.psi = psi = tt.as_tensor_variable(floatX(psi))
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.s = s = tt.as_tensor_variable(floatX(s))
self.invweibull = InverseWeibull.dist(alpha=alpha, s=s)
# TODO
#self.mean = self.psi * self.invweibull.mean
self.mode = self.psi * self.invweibull.mode
assert_negative_support(alpha, "alpha", "ZeroInflatedInverseWeibull")
assert_negative_support(s, "s", "ZeroInflatedInverseWeibull")
# def _random(self, psi, size=None):
# """Note by definition any rvs_ from invweibull that are zero will
# correctly remain zero, covering the case x = 0"""
# rvs_ = self.invweibull.random(size=size)
# return rvs_ * psi
def _random(self, psi, size=None):
"""Inputs are numpy arrays"""
rvs_ = self.invweibull.random(size=size)
pi = stats.binom(n=np.repeat([1], len(psi)), p=psi).rvs(len(psi))
return rvs_ * pi
def random(self, point=None, size=None):
"""
Draw random values from ZeroInflatedInverseWeibull PDF distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
psi, alpha, s = draw_values([self.psi, self.alpha, self.s],
point=point, size=size)
return generate_samples(self._random, psi,
dist_shape=self.shape, size=size)
def logp(self, value):
"""LogPDF"""
psi = self.psi
logp_ = tt.switch(tt.neq(value, 0), # or use tt.gt(value, 0), dunno which faster
tt.log(psi) + self.invweibull.logp(value),
tt.log1p(-psi))
return bound(logp_, value >=0, psi > 0, psi < 1)
def cdf(self, value):
"""CDF"""
psi = self.psi
cdf_ = (1. - psi) * 1 + psi * self.invweibull.cdf(value)
return boundzero_theano(cdf_, value >=0, psi > 0, psi < 1)
def invcdf(self, value):
"""InvCDF aka PPF"""
psi = self.psi
invcdf_ = self.invweibull.invcdf((value + psi - 1) / psi)
return boundzero_theano(invcdf_, value>=0, value<=1, psi > 0, psi < 1)
class ZeroInflatedInverseWeibullNumpy():
"""Zero-inflated Inverse Weibull PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations potentially used if needed in pymc3 custom distributions
Helpful to compare these ? seems rare
NOTE: I'm lazy and have set m=0 throughout: this suits my usecase anyhow
Ref: https://en.wikipedia.org/wiki/Fréchet_distribution
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html?highlight=inverse%20weibull
Params: 0 < psi < 1 (prop invweibull), alpha (shape) > 0, s (scale) > 0, m (location of minimum) = 0
Support: x > 0, u in [0, 1]
"""
def __init__(self):
self.name = 'InverseWeibull'
self.notation = {'notation': r'x \sim InverseWeibull(\alpha, s, m=0)'}
self.dist_natural = {
'pdf': r"""f(x \mid \alpha, s, m=0) = \frac{\alpha}{s} \;
\left( \frac{x}{s} \right)^{-1-\alpha} \;
\exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)""",
'cdf': r'F(x \mid \alpha, s, m=0) = \exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)',
'invcdf': r"""F^{-1}(u \mid \alpha, s, m=0) = s \log(u)^{-\frac{1}{\alpha}}"""}
self.dist_log = {
'logpdf': r"""\log f(x \mid \alpha, s, m=0) = \log{\alpha} - (1+\alpha)\log{x} +
\alpha \log{s} - \left( \frac{x}{s} \right)^{-\alpha}""",
'logcdf': r'\log F(x \mid \alpha, s, m=0) = - \left( \frac{x}{s} \right)^{-\alpha}',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, s, m=0) = \log(s) - \frac{1}{\alpha} * \log(-\log(u))'}
self.conditions = {
'parameters': r"""\alpha > 0 \, \text{(shape)}, \;
s > 0 \, \text{(scale, default } s=1 \text{)}, \;
m \in (-\infty, \infty) \, \text{(location of minimum, default } m=0 \text{)}""",
'support': r'x \in (m, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r"""
\begin{cases}
m + s \Gamma \left( 1 - \frac{1}{\alpha} \right) & \text{for } \alpha > 1 \\
\infty & \text{otherwise} \\
\end{cases}""",
'mode': r'm + s \left( \frac{\alpha}{1+\alpha} \right)^{1/\alpha}',
'variance': r"""
\begin{cases}
s^{2} \left( \Gamma \left( 1-\frac{2}{\alpha} \right) -
\left( \Gamma \left( 1-\frac{1}{\alpha} \right) \right)^{2}
\right) & \text{for } \alpha > 2 \\
\infty & \text{otherwise}
\end{cases}"""
}
def pdf(self, x, a, s):
"""InverseWeibull PDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3919
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
(a/s) *
np.power(x/s, -1.-a) *
np.exp(-np.power(x/s, -a))
)
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def cdf(self, x, a, s):
"""InverseWeibull CDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3926
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.exp(-np.power(x/s, -a))
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def invcdf(self, u, a, s):
"""InverseWeibull Inverse CDF aka PPF:
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3930
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = s * np.power(-np.log(u), -1./a)
return boundzero_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
def logpdf(self, x, a, s):
"""InverseWeibull log PDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
np.log(a) - np.log(s) +
logpow_numpy(x/s, -1.-a) -
np.power(x/s, -a) # this term grossly dominates if a >> 2
)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def logcdf(self, x, a, s):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = -np.power(x/s, -a)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def loginvcdf(self, u, a, s):
"""InverseWeibull log Inverse CDF aka log PPF
ref: ? manually calced and confirmed vs scipy
"""
a = | np.array(a) | numpy.array |
import h5py
import numpy as np
import datetime
import matplotlib.pyplot as plt
from matplotlib import dates
import pyresample as pr
from scipy.spatial import cKDTree
from pyproj import Proj
from scipy.interpolate import interp1d
import scipy
import pandas as pd
import netCDF4
def apr3tocit(apr3filename,fl,sphere_size,psd_filename_2ds,psd_filename_HVPS,query_k = 1,plotson=False,QC=False,slimfast=True,cit_aver=False,cit_aver2=False,
attenuation_correct=False,O2H2O={},per_for_atten = 50,
return_indices=False,BB=True,bbguess=500,
cal_adj_bool = False,cal_adj=0,
cloudtop=True,rollfix=True):
"""
=================
This function finds either the closest gate or averages over a number of gates (query_k) nearest to
the citation aircraft in the radar volume of the APR3. It can return a dict of the original full length
arrays and the matched arrays.
=====
Vars:
=====
apr3filename = str, filename of the apr hdf file
fl = awot object, the citation awot object
sphere_size = int, maximum distance allowed in the kdTree search
psd_filename_2ds = str, filename of the processed 2DS file
psd_filename_HVPS = str, filename of the processed HVPS3 file
query_k = int, number of gates considered in the average (if 1, use closest)
plotson = boolean, will create some premade plots that describe the matched data
QC = boolean, will apply a simple QC method: eliminates any gate within 0.5 km to the surface and the outliers
(plus/minus 1.5IQR)
slimfast = boolean, will not save original data. Cuts down on output file size by only outputting the matched data and the citation data.
cit_aver = boolean, averages the ciation data varibles using a 5 second moving average (there is overlap)
cit_aver2 = boolean, averages the ciation data varibles using a 5 second discrete average (there is NO overlap)
O2H20 = dict, data from sounding to correct for attenuation from O2 and H2O vapor
attenuation_correct = boolean, corrects for attenuation using LWC prof and Sounding. Uses 50th percentile of LWC Prof
per_for_atten = int, the percentile for the supercooled liquid water profile used in the attenuation correction.
return_indeices of matches = boolean, returns the matched gates in 1d coords
BB = boolean, mask gates from the BB and lower. Masks data using the BB_alt algorithm
bbguess = int, give your first guess of where the Bright Band is to assist the BB_alt algorithm
cal_adj_bool = bool, turn on calibration adjustment or not.
cal_adj = array, array of the adjustment needed for correct calibration between frequencies. [ka_adj, w_adj]
cloudtop = bool, eliminates sensativity issues with the Ku-band data (~ < 10 dBZ) by masking out the cloudtop noise using a gausian filter
rollfix = bool, turn on or off the masking of data where the plane is rolling more than 10 degrees (can change the degree of degrees).
=================
"""
#get citation times (datetimes)
cit_time = fl['time']['data']
#Eliminate BB?
if BB:
#Get rid of anything below the melting level + 250 m
apr = apr3read(apr3filename)
#there are two methods to this. One is more conservative (using mean Ku) the other more intense with LDR Ku
#apr = BB_alt(apr,bbguess) #old
if cloudtop:
print('Removing cloudtop noise..')
apr = cloudtopmask(apr)
###new BB tech 2/27/18 RJC
print('Removing BB and below')
apr = mask_surf(apr)
apr['ldr'] = np.ma.masked_where(apr['Ku'].mask,apr['ldr'])
#find bb profs
bb = precip_echo_filt3D(apr['ldr'],thresh=7)
ind1 = np.where(bb[12,:] == 1) #BB profiles based on LDR
top_a = find_bb(apr,ind1)
bb_long = extend_bb(ind1,apr['timedates'][12,:],top_a)
apr['Ku'][:,:,:] = np.ma.masked_where(apr['alt_gate'][:,:,:] <= bb_long,apr['Ku'][:,:,:])
apr['Ka'] = np.ma.masked_where(apr['Ku'].mask,apr['Ka'])
apr['W'] = np.ma.masked_where(apr['Ku'].mask,apr['W'])
###
#correct for attenuation using SLW and Ku
if attenuation_correct:
print('correcting for attenuation...')
apr = atten_cor3(apr,fl,per_for_atten,O2H2O,lwc_alt=False)
print('corrected.')
maxchange = apr['maxchange']
elif attenuation_correct:
print('correcting for attenuation...')
apr = atten_cor2(apr3filename,fl,per_for_atten,O2H2O,lwc_alt=False)
print('corrected.')
maxchange = apr['maxchange']
else:
apr = apr3read(apr3filename)
if cloudtop:
print('Removing cloudtop noise..')
apr = cloudtopmask(apr)
if cal_adj_bool:
print('adding calibration means...')
# These values come from the analysis preformed by 3 reasearch groups: NASA JPL, University of Leister, and the University of Illinois. Techniques use sigma_0 of the ocean surface, comparision of frequencies at low Z and numerical simulations of particles.(error/uncertainty:+- 0.5 dB)
apr['Ku'] = apr['Ku'] + 0.8
apr['Ka'] = apr['Ka'] + 1
#Whh is the only one with a time varient calibration adjustment
apr['W'] = apr['W'] + cal_adj
#While calibrating the data, radar artifacts showed up when the roll of the aircraft was > 10degrees.
if rollfix:
roll = apr['roll']
roll3d = np.zeros(apr['Ku'].shape)
for i in np.arange(0,apr['Ku'].shape[1]):
for j in np.arange(0,apr['Ku'].shape[2]):
roll3d[:,i,j] = roll[i,j]
apr['Ku'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['Ku'])
apr['Ka'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['Ka'])
apr['W'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['W'])
#Get APR3 times (datetimes)
time_dates = apr['timedates'][:,:]
#fix a few radar files where w-band disapears
if time_dates[12,0] >= datetime.datetime(2015,12,18,6,58):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,550):
temp = np.ma.masked_where(time_dates[12,:] >= datetime.datetime(2015,12,18,7,6),apr['W'][j,i,:])
apr['W'][j,i,:] = temp
if time_dates[12,0] >= datetime.datetime(2015,12,1,23,43,48) and time_dates[12,0] <=datetime.datetime(2015,12,1,23,43,49):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,550):
temp = np.ma.masked_where(time_dates[12,:] >= datetime.datetime(2015,12,2,0,1,40),apr['W'][j,i,:])
apr['W'][j,i,:] = temp
#Check if radar file is large enought to use (50 gates is arbitrary)
if time_dates[12,:].shape[0] < 50:
print('Limited radar gates in time')
#return
#
#Load PSD
dtime_psd,ND,dD,midpoints = PSD_load(psd_filename_2ds,psd_filename_HVPS,day = time_dates[0,0].day,month=time_dates[0,0].month)
#
#Make ND a masked array (i.e. get rid of nans from loading it in)
ind = np.isnan(ND)
ND = np.ma.masked_where(ind,ND)
#for plotting routine
fontsize=14
#
#Varibles needed for the kdtree
leafsize = 16
query_eps = 0
query_p=2
query_distance_upper_bound = sphere_size
query_n_jobs =1
Barnes = True
K_d = sphere_size
#
#Pre-Determine arrays
Ku_gate = np.ma.array([])
Ka_gate = np.ma.array([])
W_gate = np.ma.array([])
DFR_gate = np.ma.array([])
DFR2_gate = np.ma.array([])
DFR3_gate = np.ma.array([])
lon_c = np.ma.array([])
lat_c = | np.ma.array([]) | numpy.ma.array |
from __future__ import print_function
import numpy as np
import cv2
import math
#global fg-bg subtractor based on Mixture of 2 Gaussians for modelling fg-bg clusters
fgbg = cv2.createBackgroundSubtractorMOG2(history=50)
def init_xywh_kalman():
#creating kalman with states = [x,y,w,h,vx,vy,vw,vh]
#measurement = [x,y,w,h]
kalman = cv2.KalmanFilter(8, 4)
# velocity of x&y is twice that w&h to model larger deviations prior over translation than scale
kalman.transitionMatrix = np.array([
[1.,0.,0.,0., .01,0.,0.,0.],
[0.,1.,0.,0., 0.,.01,0.,0.],
[0.,0.,1.,0., 0.,0.,.005,0.],
[0.,0.,0.,1., 0.,0.,0.,.005],
#
[0.,0.,0.,0., 1.,0.,0.,0.],
[0.,0.,0.,0., 0.,1.,0.,0.],
[0.,0.,0.,0., 0.,0.,1.,0.],
[0.,0.,0.,0., 0.,0.,0.,1.],
],dtype=np.float32)
kalman.measurementMatrix = | np.array([
[1.,0.,0.,0., 0.,0.,0.,0.],
[0.,1.,0.,0., 0.,0.,0.,0.],
[0.,0.,1.,0., 0.,0.,0.,0.],
[0.,0.,0.,1., 0.,0.,0.,0.],
], dtype=np.float32) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import json
import cv2
import numpy as np
import math
import tensorflow as tf
import multiprocessing
from Architecture import Architecture
from Architecture import FeaturePredictionType
from RenderPasses import RenderPasses
from Naming import Naming
from OpenEXRDirectory import OpenEXRDirectory
parser = argparse.ArgumentParser(description='Prediction for the DeepDenoiser.')
parser.add_argument(
'json_filename',
help='The json specifying all the relevant details.')
parser.add_argument(
'--input', type=str,
help='Make a prediction for the files in this directory.')
parser.add_argument(
'--tile_size', default=128,
help='Width and heights of the tiles into which the image is split before denoising.')
parser.add_argument(
'--tile_overlap_size', default=14,
help='Border size of the tiles that is overlapping to avoid artifacts.')
parser.add_argument(
'--threads', default=multiprocessing.cpu_count() + 1,
help='Number of threads to use.')
parser.add_argument(
'--data_format', type=str, default='channels_first',
choices=['channels_first', 'channels_last'],
help='A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
class FeatureLoader:
def __init__(self, feature_prediction):
self.feature_prediction = feature_prediction
def add_to_parse_dictionary(self, dictionary):
if self.feature_prediction.load_data:
dictionary[Naming.source_feature_name(self.feature_prediction.name, index=0)] = tf.FixedLenFeature([], tf.string)
def deserialize(self, parsed_features, height, width):
if self.feature_prediction.load_data:
internal_source = tf.decode_raw(
parsed_features[Naming.source_feature_name(self.feature_prediction.name, index=0)], tf.float32)
#internal_source = tf.reshape(internal_source, [height, width, self.feature_prediction.number_of_channels])
internal_source = tf.reshape(internal_source, [height, width, 3])
self.source = internal_source
def add_to_sources_dictionary(self, sources, height, width):
if self.feature_prediction.load_data:
sources[Naming.source_feature_name(self.feature_prediction.name, index=0)] = self.source
else:
assert self.feature_prediction.feature_prediction_type != FeaturePredictionType.AUXILIARY
source = tf.ones([height, width, self.feature_prediction.number_of_channels])
if self.feature_prediction.feature_prediction_type != FeaturePredictionType.COLOR:
# Direct and indirect need to be 0.5.
source = tf.scalar_mul(0.5, source)
sources[Naming.source_feature_name(self.feature_prediction.name, index=0)] = source
def input_fn_tfrecords(
files, features_loader, feature_flags,
tiles_height_width, batch_size, threads, data_format='channels_last'):
def fast_feature_parser(serialized_example):
# Load all the required indices.
features = {}
for feature_loader in features_loader:
feature_loader.add_to_parse_dictionary(features)
parsed_features = tf.parse_single_example(serialized_example, features)
for feature_loader in features_loader:
feature_loader.deserialize(parsed_features, tiles_height_width, tiles_height_width)
# Prepare the examples.
sources = {}
for feature_loader in features_loader:
feature_loader.add_to_sources_dictionary(sources, tiles_height_width, tiles_height_width)
if feature_flags != None:
feature_flags.add_to_source_dictionary(sources, tiles_height_width, tiles_height_width)
return sources
def feature_parser(serialized_example):
dataset = None
# Load all the required indices.
features = {}
for feature_loader in features_loader:
feature_loader.add_to_parse_dictionary(features)
parsed_features = tf.parse_single_example(serialized_example, features)
for feature_loader in features_loader:
feature_loader.deserialize(parsed_features, tiles_height_width, tiles_height_width)
# Prepare the examples.
sources = {}
for feature_loader in features_loader:
feature_loader.add_to_sources_dictionary(sources, tiles_height_width, tiles_height_width)
if feature_flags != None:
feature_flags.add_to_source_dictionary(sources, tiles_height_width, tiles_height_width)
if dataset == None:
dataset = tf.data.Dataset.from_tensors((sources))
else:
dataset = dataset.concatenate(tf.data.Dataset.from_tensors((sources)))
return dataset
dataset = tf.data.TFRecordDataset(files, compression_type=None, buffer_size=None, num_parallel_reads=threads)
dataset = dataset.map(map_func=fast_feature_parser, num_parallel_calls=threads)
#dataset = dataset.flat_map(map_func=feature_parser)
dataset = dataset.batch(batch_size)
prefetch_buffer_size = 5
dataset = dataset.prefetch(buffer_size=prefetch_buffer_size)
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
return features
def slow_direct_input_fn_predict(features_list, height, width):
dataset = None
for features in features_list:
for feature_name in features:
image = features[feature_name]
image = tf.convert_to_tensor(image, np.float32)
if len(image.shape) == 2:
image = tf.reshape(image, [-1, height, width, 1])
else:
image = tf.reshape(image, [-1, height, width, 3])
features[feature_name] = image
current_dataset = tf.data.Dataset.from_tensor_slices(features)
if dataset == None:
dataset = current_dataset
else:
dataset = dataset.concatenate(current_dataset)
dataset = dataset.batch(1)
iterator = dataset.make_one_shot_iterator()
result = iterator.get_next()
return result
def model_fn(features, labels, mode, params):
architecture = params['architecture']
predictions = architecture.predict(features, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = predictions[0]
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
def main(parsed_arguments):
# Eager execution was faster, but the reason was no clear. (DeepBlender)
tf.enable_eager_execution()
if not isinstance(parsed_arguments.threads, int):
parsed_arguments.threads = int(parsed_arguments.threads)
try:
architecture_json_filename = parsed_arguments.json_filename
architecture_json_content = open(architecture_json_filename, 'r').read()
parsed_architecture_json = json.loads(architecture_json_content)
except:
print('Expected a valid architecture json file.')
assert os.path.isdir(parsed_arguments.input)
if not isinstance(parsed_arguments.tile_size, int):
parsed_arguments.tile_size = int(parsed_arguments.tile_size)
if not isinstance(parsed_arguments.tile_overlap_size, int):
parsed_arguments.tile_overlap_size = int(parsed_arguments.tile_overlap_size)
tile_size = parsed_arguments.tile_size
tile_overlap_size = parsed_arguments.tile_overlap_size
data_format = parsed_arguments.data_format
architecture = Architecture(parsed_architecture_json, source_data_format='channels_last', data_format=data_format)
if architecture.data_format == 'channels_first':
use_CPU_only = False
else:
use_CPU_only = True
height = None
width = None
exr_files = OpenEXRDirectory._exr_files(parsed_arguments.input)
features = {}
required_features = architecture.auxiliary_features + architecture.feature_predictions
for feature_prediction in required_features:
exr_loaded = False
if feature_prediction.load_data:
for exr_file in exr_files:
if feature_prediction.name in exr_file:
image = OpenEXRDirectory._load_exr(exr_file)
# HACK: Assume just one source input!
features[Naming.source_feature_name(feature_prediction.name, index=0)] = image
exr_loaded = True
if height == None:
height = image.shape[0]
width = image.shape[1]
else:
assert height == image.shape[0]
assert width == image.shape[1]
break
else:
image = tf.ones([height, width, feature_prediction.number_of_channels])
if feature_prediction.feature_prediction_type != FeaturePredictionType.COLOR:
# Direct and indirect need to be 0.5.
image = tf.scalar_mul(0.5, image)
features[Naming.source_feature_name(feature_prediction.name, index=0)] = image
exr_loaded = True
if not exr_loaded:
# TODO: Improve (DeepBlender)
raise Exception('Image for \'' + feature_prediction.name + '\' could not be loaded or does not exist.')
smaller_side_length = min(height, width)
if smaller_side_length < 16:
raise Exception('The image needs to have at least a side length of 16 pixels.')
if smaller_side_length < tile_size:
ratio = tile_overlap_size / tile_size
tile_size = smaller_side_length
tile_overlap_size = int(tile_size * ratio)
# Split the images into tiles.
iteration_delta = tile_size - (2 * tile_overlap_size)
width_count = width - (2 * tile_overlap_size) - (2 * iteration_delta)
width_count = width_count / iteration_delta
width_count = math.ceil(width_count) + 2
height_count = height - (2 * tile_overlap_size) - (2 * iteration_delta)
height_count = height_count / iteration_delta
height_count = math.ceil(height_count) + 2
tiled_features_grid = [[None for _ in range(width_count) ] for _ in range(height_count)]
for height_index in range(height_count):
if height_index == 0:
lower_height = 0
upper_height = tile_size
elif height_index == height_count - 1:
upper_height = height
lower_height = upper_height - tile_size
else:
lower_height = height_index * iteration_delta
upper_height = lower_height + tile_size
for width_index in range(width_count):
if width_index == 0:
lower_width = 0
upper_width = tile_size
elif width_index == width_count - 1:
upper_width = width
lower_width = upper_width - tile_size
else:
lower_width = width_index * iteration_delta
upper_width = lower_width + tile_size
tiled_features = {}
for feature_name in features:
feature = features[feature_name]
tiled_feature = feature[lower_height:upper_height, lower_width:upper_width]
tiled_features[feature_name] = tiled_feature
tiled_features_grid[height_index][width_index] = tiled_features
# We don't need the features anymore.
features = None
# Directly predicting the results by creating a dataset from the tiled features resulted
# in a huge computational overhead.
# Converting the tiled features to tfrecords and predicting with a tfrecords dataset
# is a questionable approach, but it is significantly faster.
use_tfrecords = True
if use_tfrecords:
temporary_tfrecords_filename = './tmp.tfrecords'
tfrecords_writer = tf.python_io.TFRecordWriter(temporary_tfrecords_filename)
for height_index in range(height_count):
for width_index in range(width_count):
tiled_features = tiled_features_grid[height_index][width_index]
serializable_features = {}
for tiled_feature_name in tiled_features:
tiled_feature = tiled_features[tiled_feature_name]
tiled_feature = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(tiled_feature.tostring())]))
serializable_features[tiled_feature_name] = tiled_feature
example = tf.train.Example(features=tf.train.Features(feature=serializable_features))
tfrecords_writer.write(example.SerializeToString())
tfrecords_writer.close()
if use_CPU_only:
session_config = tf.ConfigProto(device_count = {'GPU': 0})
else:
session_config = tf.ConfigProto()
use_XLA = True
if use_XLA:
session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
run_config = tf.estimator.RunConfig(session_config=session_config)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=architecture.model_directory,
config=run_config,
params={'architecture': architecture})
if use_tfrecords:
features_loader = []
required_features = architecture.auxiliary_features + architecture.feature_predictions
for feature_prediction in required_features:
features_loader.append(FeatureLoader(feature_prediction))
tfrecords_files = [os.path.abspath(temporary_tfrecords_filename)]
batch_size = 1
threads = 1
predictions = estimator.predict(input_fn=lambda:
input_fn_tfrecords(
tfrecords_files, features_loader, architecture.feature_flags,
tile_size, batch_size, threads))
else:
tiled_features_list = []
for height_index in range(height_count):
for width_index in range(width_count):
tiled_features = tiled_features_grid[height_index][width_index]
tiled_features_list.append(tiled_features)
predictions = estimator.predict(input_fn=lambda:
slow_direct_input_fn_predict(tiled_features_list, tile_size, tile_size))
for height_index in range(height_count):
for width_index in range(width_count):
tiled_features_grid[height_index][width_index] = next(predictions)
predictions = {}
for feature_prediction_tuple in architecture.feature_prediction_tuples:
for feature_prediction in feature_prediction_tuple.feature_predictions:
if feature_prediction.load_data:
horizontal_feature_stripes = []
for height_index in range(height_count):
horizontal_feature_elements = []
for width_index in range(width_count):
tiled_predictions = tiled_features_grid[height_index][width_index]
prediction_name = Naming.feature_prediction_name(feature_prediction.name)
prediction = tiled_predictions[prediction_name]
lower_height = 0
upper_height = tile_size
lower_width = 0
upper_width = tile_size
if width_index != 0 and width_index != width_count - 1:
lower_width = tile_overlap_size
upper_width = upper_width - tile_overlap_size
elif width_index == 0 and width_index == width_count - 1:
pass
elif width_index == 0:
upper_width = upper_width - tile_overlap_size
else:
assert width_index == width_count - 1
existing_width = tile_overlap_size + ((width_count - 1) * (tile_size - (2 * tile_overlap_size)))
remaining_width = width - existing_width
lower_width = upper_width - remaining_width
if height_index != 0 and height_index != height_count - 1:
lower_height = tile_overlap_size
upper_height = upper_height - tile_overlap_size
elif height_index == 0 and height_index == height_count - 1:
pass
elif height_index == 0:
upper_height = upper_height - tile_overlap_size
else:
assert height_index == height_count - 1
existing_height = tile_overlap_size + ((height_count - 1) * (tile_size - (2 * tile_overlap_size)))
remaining_height = height - existing_height
lower_height = upper_height - remaining_height
prediction = prediction[lower_height:upper_height, lower_width:upper_width]
horizontal_feature_elements.append(prediction)
if len(horizontal_feature_elements) > 1:
horizontal_feature_stripe = np.concatenate(horizontal_feature_elements, 1)
else:
horizontal_feature_stripe = horizontal_feature_elements[0]
horizontal_feature_stripes.append(horizontal_feature_stripe)
if len(horizontal_feature_stripes) > 1:
prediction = np.concatenate(horizontal_feature_stripes, 0)
else:
prediction = horizontal_feature_stripes[0]
prediction_name = Naming.feature_prediction_name(feature_prediction.name)
predictions[prediction_name] = prediction
diffuse_direct = predictions[Naming.feature_prediction_name(RenderPasses.DIFFUSE_DIRECT)]
diffuse_indirect = predictions[Naming.feature_prediction_name(RenderPasses.DIFFUSE_INDIRECT)]
diffuse_color = predictions[Naming.feature_prediction_name(RenderPasses.DIFFUSE_COLOR)]
glossy_direct = predictions[Naming.feature_prediction_name(RenderPasses.GLOSSY_DIRECT)]
glossy_indirect = predictions[Naming.feature_prediction_name(RenderPasses.GLOSSY_INDIRECT)]
glossy_color = predictions[Naming.feature_prediction_name(RenderPasses.GLOSSY_COLOR)]
subsurface_direct = predictions[Naming.feature_prediction_name(RenderPasses.SUBSURFACE_DIRECT)]
subsurface_indirect = predictions[Naming.feature_prediction_name(RenderPasses.SUBSURFACE_INDIRECT)]
subsurface_color = predictions[Naming.feature_prediction_name(RenderPasses.SUBSURFACE_COLOR)]
transmission_direct = predictions[Naming.feature_prediction_name(RenderPasses.TRANSMISSION_DIRECT)]
transmission_indirect = predictions[Naming.feature_prediction_name(RenderPasses.TRANSMISSION_INDIRECT)]
transmission_color = predictions[Naming.feature_prediction_name(RenderPasses.TRANSMISSION_COLOR)]
volume_direct = predictions[Naming.feature_prediction_name(RenderPasses.VOLUME_DIRECT)]
volume_indirect = predictions[Naming.feature_prediction_name(RenderPasses.VOLUME_INDIRECT)]
environment = predictions[Naming.feature_prediction_name(RenderPasses.ENVIRONMENT)]
emission = predictions[Naming.feature_prediction_name(RenderPasses.EMISSION)]
alpha = predictions[Naming.feature_prediction_name(RenderPasses.ALPHA)]
# Combined features
diffuse = np.multiply(diffuse_color, np.add(diffuse_direct, diffuse_indirect))
glossy = np.multiply(glossy_color, np.add(glossy_direct, glossy_indirect))
subsurface = np.multiply(subsurface_color, np.add(subsurface_direct, subsurface_indirect))
transmission = np.multiply(transmission_color, np.add(transmission_direct, transmission_indirect))
# Combined image
image = np.add(diffuse, glossy)
image = np.add(image, subsurface)
image = np.add(image, transmission)
image = np.add(image, volume_direct)
image = np.add(image, volume_indirect)
image = np.add(image, environment)
image = np.add(image, emission)
# TODO: Alpha currently ignored for the combined image. (DeepBlender)
# Store as npy to open in Blender.
np.save(parsed_arguments.input + '/' + RenderPasses.COMBINED + '.npy', image)
np.save(parsed_arguments.input + '/' + RenderPasses.DIFFUSE_DIRECT + '.npy', diffuse_direct)
np.save(parsed_arguments.input + '/' + RenderPasses.DIFFUSE_INDIRECT + '.npy', diffuse_indirect)
np.save(parsed_arguments.input + '/' + RenderPasses.DIFFUSE_COLOR + '.npy', diffuse_color)
np.save(parsed_arguments.input + '/' + RenderPasses.GLOSSY_DIRECT + '.npy', glossy_direct)
np.save(parsed_arguments.input + '/' + RenderPasses.GLOSSY_INDIRECT + '.npy', glossy_indirect)
| np.save(parsed_arguments.input + '/' + RenderPasses.GLOSSY_COLOR + '.npy', glossy_color) | numpy.save |
import collections
import configparser
import csv
import datetime
import gzip
import itertools
import json
import logging
import pathlib
import pickle
import urllib.request
from typing import Tuple
import foolbox
import numpy as np
import sklearn.metrics as metrics
logger = logging.getLogger(__name__)
class AverageMeter(object):
"""
Computes and stores the average and current value.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count > 0:
self.avg = self.sum / self.count
def save_zip(object, path, protocol=0):
"""
Saves a compressed object to disk
"""
# Create the folder, if necessary
pathlib.Path(path).parent.mkdir(parents=True, exist_ok=True)
file = gzip.GzipFile(path, 'wb')
pickled = pickle.dumps(object, protocol)
file.write(pickled)
file.close()
def load_zip(path):
"""
Loads a compressed object from disk
"""
file = gzip.GzipFile(path, 'rb')
buffer = b""
while True:
data = file.read()
if data == b"":
break
buffer += data
object = pickle.loads(buffer)
file.close()
return object
def lp_norm(array, p):
# L_infinity: Maximum difference
if np.isinf(p):
value = np.max(np.abs(array))
# No normalisation for L-inf
# L_0: Count of different values
elif p == 0:
value = np.count_nonzero(np.reshape(array, -1))
# L_p: p-root of the sum of diff^p
else:
value = np.power(np.sum(np.power( | np.abs(array) | numpy.abs |
from __future__ import absolute_import, division, print_function, unicode_literals
from keras.utils import to_categorical
import numpy as np
import tensorflow as tf
import datetime
import scipy.io as sio
import math
from matplotlib.pyplot import pause
import os
import glob
class CFA_process:
# sets neighbor indexes for k-regular networks (number of neighbors is 'neighbors'
def get_connectivity(self, ii_saved_local, neighbors, devices):
if (ii_saved_local == 0):
sets_neighbors_final = np.arange(ii_saved_local + 1, ii_saved_local + neighbors + 1)
elif (ii_saved_local == devices - 1):
sets_neighbors_final = np.arange(ii_saved_local - neighbors, ii_saved_local)
elif (ii_saved_local >= math.ceil(neighbors / 2)) and (
ii_saved_local <= devices - math.ceil(neighbors / 2) - 1):
sets_neighbors = np.arange(ii_saved_local - math.floor(neighbors / 2),
ii_saved_local + math.floor(neighbors / 2) + 1)
# print("set_neighbors:", sets_neighbors)
index_ii = | np.where(sets_neighbors == ii_saved_local) | numpy.where |
# Code from Hayes 2016- k- fingerprinting
import math
import numpy as np
# re-seed the generator
#np.random.seed(1234)
#1. dictionary_() will extract features and write them to a target file (kFPdict) in the data folder
#2. calls RF_openworld(), which starts by dividing kFPdict into training and testing sets
#3. # -1 is IN, 1 is OUT
#file format: "direction time size"
"""Feeder functions"""
def neighborhood(iterable):
iterator = iter(iterable)
prev = (0)
item = next(iterator) # throws StopIteration if empty.
for nex in iterator:
yield (prev,item,nex)
prev = item
item = nex
yield (prev,item,None)
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
"""Non-feeder functions"""
def get_pkt_list(trace_data):
first_line = trace_data[0].rstrip()
first_line = first_line.split("\t")
first_time = float(first_line[0])
dta = []
for line in trace_data:
if "##HOST_FTS" in line:
continue
a = line.rstrip()
b = a.split("\t")
if "e-" in b[0]:
dr = b[1]
print("Exponent in total seconds: ", b)
b = [0.0, dr]
#print(b, float(b[0])- first_time)
if float(b[1]) > 0:
#dta.append(((float(b[0])- first_time), abs(int(b[2])), 1))
dta.append(((float(b[0])- first_time), 1))
else:
#dta.append(((float(b[1]) - first_time), abs(int(b[2])), -1))
dta.append(((float(b[0]) - first_time), -1))
return dta
def In_Out(list_data):
In = []
Out = []
for p in list_data:
if p[1] == -1:
In.append(p)
if p[1] == 1:
Out.append(p)
return In, Out
############### TIME FEATURES #####################
def inter_pkt_time(list_data):
times = [x[0] for x in list_data]
temp = []
#print(times)
#print(times[1:]+[times[0]])
for elem,next_elem in zip(times, times[1:]+[times[0]]):
temp.append(next_elem-elem)
return temp[:-1]
def interarrival_times(list_data):
In, Out = In_Out(list_data)
IN = inter_pkt_time(In)
OUT = inter_pkt_time(Out)
TOTAL = inter_pkt_time(list_data)
return IN, OUT, TOTAL
def interarrival_maxminmeansd_stats(list_data):
interstats = []
In, Out, Total = interarrival_times(list_data)
if In and Out:
avg_in = sum(In)/float(len(In))
avg_out = sum(Out)/float(len(Out))
avg_total = sum(Total)/float(len(Total))
interstats.append((max(In), max(Out), max(Total), avg_in, avg_out, avg_total, | np.std(In) | numpy.std |
"""
Train a logistic regression model.
Input:
data:
Output:
params: parameters of logistic regression model
"""
import os
import pandas as pd
from bokeh.plotting import figure
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn.utils import weight_norm
from torch.nn import ReLU
import numpy as np
import streamlit as st
import yaml
import pickle
from pathlib import Path, PurePath
import argparse
from datetime import datetime
from collections import OrderedDict
serverdir = Path(os.path.realpath(__file__)).parent.parent
he_for_medical_data = serverdir.parent.parent
def generate_random_data(num_data_samp, data_dim):
"Generate some random data for log reg."
a = np.random.rand(data_dim)+5
x_noise = 0.1*np.random.randn(num_data_samp,1)
x = 10*np.random.rand(num_data_samp,data_dim) - 5
b = np.array([-np.dot(a,x[row,...])-x_noise[row,...] for row in range(0,num_data_samp)])
b = np.exp(b)
y_float = 1/(1+b)
y = np.rint(y_float)
return {"x": x, "y": y}
class poly(nn.Module):
"""Polynomial activation function.
degreelist: list of powers of the polynomial.
"""
def __init__(self, degreelist):
super(poly,self).__init__()
self.degreelist = degreelist
p = len(degreelist)
arr = np.ones(p,dtype=np.float32)
coeff = torch.nn.Parameter(torch.tensor(arr), requires_grad=True)
self.register_parameter("coefficients", coeff)
def forward(self,x):
out = [torch.pow(x,n) for n in self.degreelist]
shape = x.shape
out = torch.cat([j.reshape(*shape,1) for j in out],dim=-1)
out = out * self.coefficients
out = out.sum(-1)
return out
class fully_conn(nn.Module):
"""Creates a fully connected neural network according to specs.
input_size: features length
layers: list of how many neurons per layer
activation: "relu" or "poly"
degrees: optional. If choosing activation=poly you must specify degress.
The activation polynomial will have trainable coefficients but only
for the degrees specified. E.g.: [2,3]-> activation= ax^2 +bx^3. """
def __init__(self, input_size, layers, activation, degrees = None):
super(fully_conn, self).__init__()
network = [("weightedLinear0", weight_norm(nn.Linear(input_size,layers[0])))]
numlayer = len(layers)
if activation == "relu":
if numlayer > 1:
Relu = ("relu0", ReLU())
network.append(Relu)
for i in range(numlayer-1):
l = (f"weightedLinear{i+1}", weight_norm(nn.Linear(layers[i],layers[i+1])))
if i < numlayer-2:
Relu = (f"relu{i+1}", ReLU())
network.extend([l, Relu])
else:
network.append(l)
if activation == "poly":
if numlayer > 1:
Poly = (f"poly0", poly(degrees))
network.append(Poly)
p = len(degrees)
for i in range(numlayer-1):
l = (f"weightedLinear{i+1}",weight_norm(nn.Linear(layers[i],layers[i+1])))
if i < numlayer-2:
Poly = (f"poly{i+1}", poly(degrees))
network.extend([l,Poly])
else:
network.append(l)
self.nnet = nn.Sequential(OrderedDict(network))
def forward(self,x):
logits = self.nnet(x)
return logits
def predict(self,x):
return torch.sigmoid(self.forward(x))
class logreg(nn.Module):
def __init__(self, input_size, classes):
super(logreg, self).__init__()
linear = nn.Linear(input_size, classes)
self.logistic_reg = weight_norm(linear,name = "weight")
def forward(self, x):
return self.logistic_reg(x)
def predict(self,x):
return torch.sigmoid(self.forward(x))
def train(config, train_data, model, optimizer_state=None):
"""
Training for mortality models.
config: dict of learning parameters
train_dict: dict {"x":ndarray, "y": ndarray}
"""
num_epochs = config["num_epochs"]
batch_size = config["batch_size"]
lr = config["learning_rate"]
train_x = train_data["train_x"]
train_y = train_data["train_y"]
test_x = train_data["test_x"]
test_y = train_data["test_y"]
train_tensors = torch.utils.data.TensorDataset(train_x,train_y)
train_loader = torch.utils.data.DataLoader(train_tensors,
batch_size = batch_size,
shuffle = True,
)
optimizer = torch.optim.Adam(model.parameters(),lr = lr)
if optimizer_state != None:
optimizer.load_state_dict(optimizer_state)
loss_values = []
pd_loss_value = pd.DataFrame(columns = ["loss", "test_loss","step"])
round = 0
placeholderpath = st.empty()
placeholdergraph = st.empty()
placeholder = st.empty()
for epoch in range(num_epochs):
for (x,y) in train_loader:
outputs = model(x)
optimizer.zero_grad()
loss = torch.nn.functional.binary_cross_entropy_with_logits(outputs,y)
loss.backward()
optimizer.step()
if round % 50 == 0:
pred = model(test_x)
test_loss = torch.nn.functional.binary_cross_entropy_with_logits(pred,test_y)
print(f"epoch: {epoch}/{num_epochs}; step: {round}; loss: {loss}; test_loss: {test_loss}")
lossdict = {"epoch": epoch,
"step": round,
"loss": loss.detach().numpy(),
"test_loss": test_loss.detach().numpy(),
}
loss_values.append(lossdict)
pd_loss_value = pd_loss_value.append(lossdict,ignore_index=True)
#df = pd_loss_value[["loss","test_loss","step"]].set_index('step')
p = figure(title="Loss/test loss")
p.line(pd_loss_value.step,pd_loss_value.loss,line_width=2, color="firebrick", legend="loss")
p.line(pd_loss_value.step,pd_loss_value.test_loss, line_width=2, legend="test_loss")
placeholdergraph.bokeh_chart(p)
placeholder.table(pd_loss_value)
round+=1
return model, optimizer, loss_values, placeholderpath
def convert_mortality_data(train_dict, test=False):
"""Converts mortality data dictionary with keys ("train", "test") or just
("test") for testing only when train == False.
"""
#Hack for now
if "test_x" in train_dict.keys():
if test == False:
train_dict["train_x"] = torch.Tensor(train_dict["train_x"].values)
train_dict["train_y"] = torch.Tensor(train_dict["train_y"].values).unsqueeze_(1)
train_dict["test_x"] = torch.Tensor(train_dict["test_x"].values)
train_dict["test_y"] = torch.Tensor(train_dict["test_y"].values).unsqueeze_(1)
else:
if test == False:
trainset = train_dict.pop("train")
train_dict["train_x"] = torch.Tensor(trainset.drop(columns = ["expire"]).values)
train_dict["train_y"] = torch.Tensor(trainset.expire.values).unsqueeze_(1)
testset = train_dict.pop("test")
train_dict["test_x"] = torch.Tensor(testset.drop(columns = ["expire"]).values)
train_dict["test_y"] = torch.Tensor(testset.expire.values).unsqueeze_(1)
train_dict["num_features"] = train_dict["test_x"].shape[1]
return train_dict
def main(modeldir = None, datadir = None, continuetrain = None, test = False):
#Get all parsed arguments
modeldir = serverdir.joinpath("model_params",modeldir)
data_pickle = he_for_medical_data.joinpath("data",datadir,"train_dict.pkl") #moved data path
#Load the training configs
cfgs = modeldir.joinpath("configs.yaml")
try:
with open(cfgs) as f:
configs = yaml.load(f,Loader = yaml.FullLoader)
except FileNotFoundError as e:
raise ValueError("There was a problem finding configs.yaml.")
except Exception as e:
raise ValueError(f"There was an exception: {e}")
#Load the data
try:
with open(data_pickle,'rb') as f:
data_dict = pickle.load(f)
except Exception as e:
raise ValueError(f"There was an exception raised when trying to load the data: {e}")
#Turn data into torch.tensor. For the future: can remove this to processing pipeline.
try:
train_data = convert_mortality_data(data_dict, test = test)
except Exception as e:
raise ValueError(f"There was an issue with the data format: {e}")
#Put together the model either nn or logreg
modeltype = configs["modeltype"]
if modeltype == "nn":
try:
layers = configs["layers"]
activation = configs["activation"]
degrees = configs["degrees"]
input_size = train_data["num_features"]
model = fully_conn(input_size,
layers,
activation,
degrees=degrees,
)
except Exception as e:
raise ValueError(f"The model couldn't load: {e}")
if modeltype == "logreg":
try:
layers = configs["layers"]
input_size = train_data["num_features"]
model = logreg(input_size, layers)
except Exception as e:
raise ValueError(f"The model couldn't load: {e}")
#Initialize model with pretrained params to continue training or test ...
if continuetrain == True or test == True:
list_of_paths = modeldir.glob("*")
paths = sorted(list_of_paths, key=lambda p: p.stat().st_ctime)
paths.reverse()
for path in paths:
if path.name[0:5] == "model":
latest_path = path
break
checkpoint = torch.load(latest_path)
model_state = checkpoint["model_state_dict"]
optimizer_state = checkpoint["optimizer_state_dict"]
model.load_state_dict(model_state)
else:
optimizer_state = None
#Predict only
if test == True:
test_x = train_data["test_x"]
test_y = train_data["test_y"].squeeze().numpy()
st.write("Model loaded. Now making predictions...")
y = model.predict(test_x).squeeze().detach().numpy()
predictions = | np.stack([test_y, y], axis=-1) | numpy.stack |
# 1. 배열 만들기
import numpy as np
a = np.array([[1, 2], [3, 4]])
print(a)
# 2. 사칙 연산
print('====================================================================================================')
print('== 문제 1. 아래의 a 배열에 모든 원소에 5를 더한 결과를 출력하시오!')
print('====================================================================================================\n')
a = np.array([[1, 2], [3, 4]])
print(a + 5)
print('====================================================================================================')
print('== 문제 2. 아래의 배열의 원소들의 평균값을 출력하시오!')
print('====================================================================================================\n')
a = np.array([1,2,4,5,5,7,10,13,18,21])
print(np.mean(a))
print('====================================================================================================')
print('== 문제 3. a 배열의 중앙값을 출력하시오!')
print('====================================================================================================\n')
print(np.median(a))
print('====================================================================================================')
print('== 문제 4. a 배열의 최대값과 최소값을 출력하시오!')
print('====================================================================================================\n')
print(np.max(a), np.min(a))
print('====================================================================================================')
print('== 문제 5. a 배열의 표준편차와 분산을 출력하시오!')
print('====================================================================================================\n')
print( | np.std(a) | numpy.std |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Löwdin population analysis."""
import random
import numpy
from cclib.method.population import Population
class LPA(Population):
"""The Löwdin population analysis"""
def __init__(self, *args):
# Call the __init__ method of the superclass.
super(LPA, self).__init__(logname="LPA", *args)
def __str__(self):
"""Return a string representation of the object."""
return "LPA of %s" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'LPA("%s")' % (self.data)
def calculate(self, indices=None, x=0.5, fupdate=0.05):
"""Perform a calculation of Löwdin population analysis.
Inputs:
indices - list of lists containing atomic orbital indices of fragments
x - overlap matrix exponent in wavefunxtion projection (x=0.5 for Lowdin)
"""
unrestricted = (len(self.data.mocoeffs) == 2)
nbasis = self.data.nbasis
# Determine number of steps, and whether process involves beta orbitals.
self.logger.info("Creating attribute aoresults: [array[2]]")
alpha = len(self.data.mocoeffs[0])
self.aoresults = [ numpy.zeros([alpha, nbasis], "d") ]
nstep = alpha
if unrestricted:
beta = len(self.data.mocoeffs[1])
self.aoresults.append(numpy.zeros([beta, nbasis], "d"))
nstep += beta
# intialize progress if available
if self.progress:
self.progress.initialize(nstep)
if hasattr(self.data, "aooverlaps"):
S = self.data.aooverlaps
elif hasattr(self.data, "fooverlaps"):
S = self.data.fooverlaps
# Get eigenvalues and matrix of eigenvectors for transformation decomposition (U).
# Find roots of diagonal elements, and transform backwards using eigevectors.
# We need two matrices here, one for S^x, another for S^(1-x).
# We don't need to invert U, since S is symmetrical.
eigenvalues, U = numpy.linalg.eig(S)
UI = U.transpose()
Sdiagroot1 = numpy.identity(len(S))*numpy.power(eigenvalues, x)
Sdiagroot2 = numpy.identity(len(S))*numpy.power(eigenvalues, 1-x)
Sroot1 = numpy.dot(U, numpy.dot(Sdiagroot1, UI))
Sroot2 = numpy.dot(U, numpy.dot(Sdiagroot2, UI))
step = 0
for spin in range(len(self.data.mocoeffs)):
for i in range(len(self.data.mocoeffs[spin])):
if self.progress and random.random() < fupdate:
self.progress.update(step, "Lowdin Population Analysis")
ci = self.data.mocoeffs[spin][i]
temp1 = numpy.dot(ci, Sroot1)
temp2 = numpy.dot(ci, Sroot2)
self.aoresults[spin][i] = numpy.multiply(temp1, temp2).astype("d")
step += 1
if self.progress:
self.progress.update(nstep, "Done")
retval = super(LPA, self).partition(indices)
if not retval:
self.logger.error("Error in partitioning results")
return False
# Create array for charges.
self.logger.info("Creating fragcharges: array[1]")
size = len(self.fragresults[0][0])
self.fragcharges = numpy.zeros([size], "d")
alpha = numpy.zeros([size], "d")
if unrestricted:
beta = numpy.zeros([size], "d")
for spin in range(len(self.fragresults)):
for i in range(self.data.homos[spin] + 1):
temp = numpy.reshape(self.fragresults[spin][i], (size,))
self.fragcharges = numpy.add(self.fragcharges, temp)
if spin == 0:
alpha = numpy.add(alpha, temp)
elif spin == 1:
beta = numpy.add(beta, temp)
if not unrestricted:
self.fragcharges = | numpy.multiply(self.fragcharges, 2) | numpy.multiply |
"""
Contours analysis based mainly on moments
"""
# Import required packages:
import numpy as np
import cv2
from matplotlib import pyplot as plt
# the aspect ratio is defined as: width/height
def aspect_ratio(contour):
"""Returns the aspect ratio of the contour based on the dimensions of the bounding rect"""
x, y, w, h = cv2.boundingRect(contour)
res = float(w) / h
return res
def roundness(contour, moments):
"""Calculates the roundness of a contour"""
length = cv2.arcLength(contour, True)
k = (length * length) / (moments['m00'] * 4 * np.pi)
return k
def eccentricity_from_ellipse(contour):
"""Calculates the eccentricity fitting an ellipse from a contour"""
(x, y), (MA, ma), angle = cv2.fitEllipse(contour)
a = ma / 2
b = MA / 2
ecc = np.sqrt(a ** 2 - b ** 2) / a
return ecc
def eccentricity_from_moments(moments):
"""Calculates the eccentricity from the moments of the contour"""
a1 = (moments['mu20'] + moments['mu02']) / 2
a2 = np.sqrt(4 * moments['mu11'] ** 2 + (moments['mu20'] - moments['mu02']) ** 2) / 2
ecc = | np.sqrt(1 - (a1 - a2) / (a1 + a2)) | numpy.sqrt |
import pandas as pd
from datetime import datetime, timedelta
import random
import numpy as np
import yfinance as yf
class Env:
def __init__(self, Enviorment, ticker=None):
self.switch_eviorment(Enviorment, ticker=ticker)
#setup enviorment variables
def switch_eviorment(self, Enviorment, ticker=None):
print('Settting Up Enviorment ;)')
eviorment_dictionary ={ "Standard": 0 , "Days":1, "Trading":2}
while (Enviorment not in eviorment_dictionary):
Enviorment= str(input("Enter the enviorment your want to choose\n"))
self.typee = eviorment_dictionary.get(Enviorment)
if(ticker == None ):
self.ticker= str(input('Ticker of stock (All Caps)\n'))
else:
self.ticker = ticker
not_done=True
while (not_done):
try:
yf.download(self.ticker, (datetime.now()-timedelta(days = 7)) , (datetime.now()))
not_done=False
except:
self.ticker= str(input('Enter Valid ticker of stock (All Caps)\n'))
#get date frame with specifications
def setdf(self, start_date=None, end_date=None, agent_memory=None, days_ahead=None):
if (self.typee!=4):
if(self.typee==1):
if not all([start_date, end_date, agent_memory, days_ahead]):
print('Some info missing, please enter it below \n')
else:
if not all([start_date, end_date, agent_memory]):
print('Some info missing, please enter it below \n')
if(start_date == None ):
self.start_date = datetime.strptime(str(input("Start date: Year-Month-Day (ex. '2007-01-01') \n")), '%Y-%m-%d')
else:
self.start_date = start_date
if(end_date == None ):
end = str(input("End date: Year-Month-Day (ex. '2007-01-01') or 'now' for current date \n"))
if end != 'now':
self.end_date = datetime.strptime(end, '%Y-%m-%d')
else:
self.end_date = datetime.now()
else:
if(type(end_date) is str):
self.end_date = datetime.now()
else:
self.end_date = end_date
if(agent_memory == None ):
self.agent_memory = int(input('Agent memory: days\n'))
else:
self.agent_memory = agent_memory
if(self.typee == 0 or self.typee == 2):
self.days_ahead = 1
elif(self.typee == 1):
if(days_ahead == None):
self.days_ahead = int(input('Days ahead to predict\n'))
else:
self.days_ahead = days_ahead
self.df = yf.download(self.ticker, self.start_date, self.end_date)
#Data comes as as: [High, Low, Open, Close, Volume, Adj Close] numpy array
#Used to predict close of next day
#returns data as needed
def getdata(self, shuffle=True, seed= 42):
datalist=[]
anslist =[]
if(self.typee == 0):
for i in range(len(self.df)-self.agent_memory):
datalist.append(self.df.iloc[i:i+self.agent_memory].to_numpy())
anslist.append(self.df.iloc[i+self.agent_memory, 3])
elif(self.typee == 1):
for i in range(len(self.df)-(self.agent_memory+self.days_ahead)+1):
datalist.append(self.df.iloc[i:i+self.agent_memory].to_numpy())
anslist.append(self.df.iloc[i+self.agent_memory:i+self.agent_memory+self.days_ahead , 3].to_numpy().reshape(self.days_ahead,1))
if(shuffle):
shufflelist = list(zip(datalist, anslist))
random.seed(seed)
random.shuffle(shufflelist)
datalist, anslist = zip(*shufflelist)
return datalist, anslist
#data with train_test split
def train_test(self, test_percent=None, shuffle = True, seed = 42, start_date=None, end_date=None, agent_memory=None, days_ahead=None):
self.setdf(start_date, end_date, agent_memory, days_ahead)
if (test_percent == None or test_percent >=1):
test_percent =.20
datalist , anslist = self.getdata(shuffle = shuffle, seed=seed)
test_data = np.asarray(datalist[int(len(datalist)*(1-test_percent)):])
train_data = np.asarray(datalist[:int(len(datalist)*(1-test_percent))])
test_ans = np.asarray(anslist[int(len(anslist)*(1-test_percent)):])
train_ans = np.asarray(anslist[:int(len(anslist)*(1-test_percent))])
return train_data, test_data, train_ans,test_ans
#data to test with if needed
def get_testdata(self, start_date=None, end_date =None, agent_memory=None, days_ahead=None, shuffle = True, seed=42):
self.setdf(start_date, end_date, agent_memory, days_ahead)
datalist , anslist = self.getdata(shuffle = shuffle, seed=seed)
return np.asarray(datalist), | np.asarray(anslist) | numpy.asarray |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Get basic statistics describing the database
# Compare a structure to a database
from tqdm.autonotebook import tqdm
import logging
from pymatgen import Structure
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import JmolNN
from .utils import (
get_structure_list,
get_rmsd,
closest_index,
tanimoto_distance,
get_number_bins,
)
import random
from scipy.spatial import distance
from sklearn.linear_model import HuberRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.metrics.pairwise import euclidean_distances
from scipy.stats import (
pearsonr,
ks_2samp,
mannwhitneyu,
ttest_ind,
anderson_ksamp,
gmean,
kurtosis,
mode,
variation,
skew,
normaltest,
kruskal,
median_absolute_deviation,
)
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from scipy import ndimage
import concurrent.futures
from functools import partial
from numba import jit
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("RemoveDuplicates")
logger.setLevel(logging.DEBUG)
# ToDo (maybe) make sure that input data is numeric?
# Todo: grid search for kernel width in MMD test
class Statistics:
def __init__(self):
pass
@staticmethod
def _get_one_graph_comparison(
structure_list_a: list, structure_list_b: list, _
) -> float:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
_:
Returns:
Jaccard distance between two random structure graphs
"""
logger.debug("i am in the graph comparison routine")
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
crystal_a = Structure.from_file(random_selection_1)
crystal_b = Structure.from_file(random_selection_2)
nn_strategy = JmolNN()
sgraph_a = StructureGraph.with_local_env_strategy(crystal_a, nn_strategy)
sgraph_b = StructureGraph.with_local_env_strategy(crystal_b, nn_strategy)
return sgraph_a.diff(sgraph_b, strict=False)["dist"]
except Exception:
return np.nan
@staticmethod
@jit
def euclidean_distance(u: np.ndarray, v: np.ndarray) -> float:
"""
Args:
u:
v:
Returns:
"""
return np.linalg.norm(u - v)
@staticmethod
def _randomized_graphs(
structure_list_a: list,
structure_list_b: list,
iterations: int = 5000,
njobs: int = 2,
) -> list:
"""
Randomly sample structures from the structure list and compare their Jaccard graph distance.
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
iterations (int): Number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of workers
Returns:
list of length iterations of the Jaccard distances
"""
diffs = []
get_one_graph_comparison_partial = partial(
Statistics._get_one_graph_comparison, structure_list_a, structure_list_b
)
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for graph comparisons")
for diff in tqdm(
executor.map(get_one_graph_comparison_partial, range(iterations)),
total=len(range(iterations)),
):
diffs.append(diff)
return diffs
@staticmethod
def _get_one_randomized_structure_property(
structure_list_a: list, structure_list_b: list, feature: str, _
) -> float:
"""
Returns difference between the selected property for two random structures.
Args:
structure_list_a (list): list of paths (str) to structures
structure_list_b (list): list of paths (str) to structures
feature (str): feature that shall be compared, available are 'density', 'num_sites'
and 'volume
_:
Returns:
difference of feature for two randomly selected structures
"""
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
crystal_a = Structure.from_file(random_selection_1)
crystal_b = Structure.from_file(random_selection_2)
if feature == "density":
diff = np.abs(crystal_a.density - crystal_b.density)
elif feature == "num_sites":
diff = np.abs(crystal_a.num_sites - crystal_b.num_sites)
elif feature == "volume":
diff = np.abs(crystal_a.volume - crystal_b.volume)
return diff
except Exception:
return np.nan
@staticmethod
def _randomized_structure_property(
structure_list_a: list,
structure_list_b: list,
feature: str = "density",
iterations: int = 5000,
njobs: int = 2,
) -> list:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
feature (str): property that is used for the structure comparisons, available options are
density, num_sites, volume. Default is density.
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of concurrent workers
Returns:
list with rmsds
"""
diffs = []
get_one_randomized_structure_property_partial = partial(
Statistics._get_one_randomized_structure_property,
structure_list_a,
structure_list_b,
feature,
)
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for graph comparisons")
for diff in tqdm(
executor.map(
get_one_randomized_structure_property_partial, range(iterations)
),
total=len(range(iterations)),
):
diffs.append(diff)
return diffs
@staticmethod
def _get_one_rmsd(structure_list_a: list, structure_list_b: list, _) -> float:
logger.debug("i am in the _get_one_rmsd routine")
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
a = get_rmsd(random_selection_1, random_selection_2)
return a
except Exception as e:
logger.error("Exception %s occured", e)
return np.nan
@staticmethod
def _randomized_rmsd(
structure_list_a: list,
structure_list_b: list,
iterations: float = 5000,
njobs: int = 2,
) -> list:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of concurrent workers
Returns:
"""
rmsds = []
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for rmsd comparisons")
get_one_rmsd_partial = partial(
Statistics._get_one_rmsd, structure_list_a, structure_list_b
)
for rmsd in tqdm(
executor.map(get_one_rmsd_partial, range(iterations)),
total=len(range(iterations)),
):
rmsds.append(rmsd)
return rmsds
@staticmethod
def optimal_knn(data, max_cluster: int = 20):
"""
use silhouette scores to find the optimal number of clusters.
we use silhouette scores as they are easier to use in a algorithm
than the "elbow criterion"
Args:
data (np.array): data matrix
max_cluster (int): maximum number of clusters. Optimization will happen
for all cluster numbers k in (2, min(len(data), max_cluster))
Returns:
"""
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
logger.debug("searching for optimal knn clustering")
silhouette_scores = []
n_clusters = []
# avoid that k > len(data)
upper_boundary = np.min([len(data), max_cluster])
sc = StandardScaler()
data = sc.fit_transform(data)
for n_cluster in range(2, upper_boundary):
kmeans = KMeans(n_clusters=n_cluster).fit(data)
label = kmeans.labels_
sil_coeff = silhouette_score(data, label, metric="euclidean")
silhouette_scores.append(sil_coeff)
n_clusters.append(n_cluster)
optimal_n_cluster = n_clusters[np.argmax(silhouette_scores)]
kmeans = KMeans(n_clusters=optimal_n_cluster).fit(data)
logger.info("found optimal knn clustering with %s clusters", optimal_n_cluster)
return kmeans, optimal_n_cluster
@staticmethod
def trimean(data):
"""
Args:
data: numeric data
Returns:
trimean (float) for data
"""
q1 = np.quantile(data, 0.25)
q3 = np.quantile(data, 0.75)
return (q1 + 2 * | np.median(data) | numpy.median |
import numpy as np
from ...dimensions.dim_linear import DimLinear
from ...dimensions.dim_angular import DimAngular
from ...dimensions import DimDegree
from ...dimensions import DimRadian
from ..cross_sect_base import CrossSectBase, CrossSectToken
__all__ = ['CrossSectOuterRotor']
class CrossSectOuterRotor(CrossSectBase):
def __init__(self, **kwargs: any) -> None:
'''
Initialization function for Outer Rotor class. This function takes in
arguments and saves the information passed to private variable to make
them read-only
Parameters
----------
**kwargs : any
DESCRIPTION. Keyword arguments provided to the initialization function.
The following argument names have to be included in order for the code
to execute: name, dim_l, dim_t, dim_theta, location.
Returns
-------
None
'''
self._create_attr(kwargs)
super()._validate_attr()
self._validate_attr()
@property
def dim_alpha_rs(self):
return self._dim_alpha_rs
@property
def dim_alpha_rm(self):
return self._dim_alpha_rm
@property
def dim_r_ro(self):
return self._dim_r_ro
@property
def dim_d_rp(self):
return self._dim_d_rp
@property
def dim_d_ri(self):
return self._dim_d_ri
@property
def dim_d_rs(self):
return self._dim_d_rs
@property
def dim_p(self):
return self._dim_p
@property
def dim_S(self):
return self._dim_S
def draw(self, drawer):
alpha_rs = DimRadian(self.dim_alpha_rs)
alpha_rm = DimRadian(self.dim_alpha_rm)
r_ro = self.dim_r_ro
d_rp = self.dim_d_rp
d_ri = self.dim_d_ri
d_rs = self.dim_d_rs
pole_pair = self.dim_p
segments = self.dim_S
alpha_total = DimRadian(DimDegree(180 / pole_pair))
# outer arc
r = r_ro
x1 = r * np.cos(alpha_total / 2)
y1 = r * np.sin(alpha_total / 2)
# inner arc between poles
r = r_ro - d_ri - d_rp
x2 = r * np.cos(alpha_rm / 2)
y2 = r * np.sin(alpha_rm / 2)
x3 = r * np.cos(alpha_total / 2)
y3 = r * np.sin(alpha_total / 2)
# line containing region between poles
r = r_ro - d_ri
x4 = r * | np.cos(alpha_rm / 2) | numpy.cos |
"""Generate coil geometries.
This module provides functions to generate various coil geometries
that can be used in conjuction with the eppy module to calculate eddy
currents in flat plates.
"""
import numpy as np
import numpy.typing as npt
# ----------------------------------------------------------------------
# User defined types
#
ArrayFloat = npt.NDArray[np.float_]
def straight_wire(start: ArrayFloat, end: ArrayFloat,
n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for straight line.
Parameters
----------
start : ndarray(dtype=float, dim=1)
Coordinate of start point (x, y, z).
end : ndarray(dtype=float, dim=1)
Coordinate of end point (x, y, z).
n : int, defaults to 40
Number of line segments.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
points = np.array([start, end])
line = np.array([0, 1])
line = line[None, :]
L = np.linalg.norm(end - start)
esize = L/n
R, dl = coil_segments(points, esize, lines=line)
return R, dl
def circular_coil(center: ArrayFloat, radius: float, plane: str="XY",
n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for circular coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
radius : float
Radius of the circular coil.
plane : {'XY', 'YZ'}, defaults to 'XY'
Plane in which the circular coil is defined.
n : int, defaults to 40
Number of line segments.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
P = np.zeros((3, 3))
if plane == "XY":
P[0] = center + np.array([radius, 0, 0])
P[1] = center + np.array([0, radius, 0])
P[2] = center - np.array([radius, 0, 0])
elif plane == "YZ":
P[0] = center + np.array([0, radius, 0])
P[1] = center + np.array([0, 0, radius])
P[2] = center - | np.array([0, radius, 0]) | numpy.array |
# Copyright (C) 2020 <NAME>
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import numpy as np
from phonopy.phonon.degeneracy import degenerate_sets
from phono3py.phonon3.conductivity import (Conductivity, all_bands_exist,
unit_to_WmK)
from phono3py.phonon3.conductivity import write_pp as _write_pp
from phono3py.phonon3.collision_matrix import CollisionMatrix
from phono3py.phonon.grid import get_grid_points_by_rotations
from phono3py.file_IO import (write_kappa_to_hdf5,
write_collision_to_hdf5,
read_collision_from_hdf5,
write_collision_eigenvalues_to_hdf5,
write_unitary_matrix_to_hdf5,
read_pp_from_hdf5)
from phonopy.units import THzToEv, Kb
def get_thermal_conductivity_LBTE(
interaction,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
grid_points=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=1e-4, # for group velocity
is_full_pp=False,
pinv_cutoff=1.0e-8,
pinv_solver=0, # default: dsyev in lapacke
write_collision=False,
read_collision=False,
write_kappa=False,
write_pp=False,
read_pp=False,
write_LBTE_solution=False,
compression="gzip",
input_filename=None,
output_filename=None,
log_level=0):
if temperatures is None:
_temperatures = [300, ]
else:
_temperatures = temperatures
if sigmas is None:
sigmas = []
if log_level:
print("-" * 19 + " Lattice thermal conducitivity (LBTE) " + "-" * 19)
print("Cutoff frequency of pseudo inversion of collision matrix: %s" %
pinv_cutoff)
if read_collision:
temps = None
else:
temps = _temperatures
lbte = Conductivity_LBTE(
interaction,
grid_points=grid_points,
temperatures=temps,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
solve_collective_phonon=solve_collective_phonon,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
read_pp=read_pp,
pp_filename=input_filename,
pinv_cutoff=pinv_cutoff,
pinv_solver=pinv_solver,
log_level=log_level)
if read_collision:
read_from = _set_collision_from_file(
lbte,
interaction.bz_grid,
indices=read_collision,
is_reducible_collision_matrix=is_reducible_collision_matrix,
filename=input_filename,
log_level=log_level)
if not read_from:
print("Reading collision failed.")
return False
if log_level:
temps_read = lbte.temperatures
if len(temps_read) > 5:
text = (" %.1f " * 5 + "...") % tuple(temps_read[:5])
text += " %.1f" % temps_read[-1]
else:
text = (" %.1f " * len(temps_read)) % tuple(temps_read)
print("Temperature: " + text)
for i in lbte:
if write_pp:
_write_pp(lbte,
interaction,
i,
filename=output_filename,
compression=compression)
if write_collision:
_write_collision(
lbte,
interaction,
i=i,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_one_gp_colmat=(grid_points is not None),
filename=output_filename)
lbte.delete_gp_collision_and_pp()
# Write full collision matrix
if write_LBTE_solution:
if ((read_collision and
all_bands_exist(interaction) and
read_from == "grid_points" and
grid_points is None) or
(not read_collision)):
_write_collision(lbte, interaction, filename=output_filename)
if grid_points is None and all_bands_exist(interaction):
lbte.set_kappa_at_sigmas()
if write_kappa:
_write_kappa(
lbte,
interaction.primitive.volume,
is_reducible_collision_matrix=is_reducible_collision_matrix,
write_LBTE_solution=write_LBTE_solution,
pinv_solver=pinv_solver,
compression=compression,
filename=output_filename,
log_level=log_level)
return lbte
def _write_collision(lbte,
interaction,
i=None,
is_reducible_collision_matrix=False,
is_one_gp_colmat=False,
filename=None):
grid_points = lbte.get_grid_points()
temperatures = lbte.temperatures
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
gamma = lbte.gamma
gamma_isotope = lbte.gamma_isotope
collision_matrix = lbte.collision_matrix
mesh = lbte.mesh_numbers
if i is not None:
gp = grid_points[i]
if is_one_gp_colmat:
igp = 0
else:
if is_reducible_collision_matrix:
igp = interaction.bz_grid.bzg2grg[gp]
else:
igp = i
if all_bands_exist(interaction):
for j, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j, igp]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(
temperatures,
mesh,
gamma=gamma[j, :, igp],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j, :, igp],
grid_point=gp,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
else:
for j, sigma in enumerate(sigmas):
for k, bi in enumerate(interaction.band_indices):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j, igp, k]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(
temperatures,
mesh,
gamma=gamma[j, :, igp, k],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j, :, igp, k],
grid_point=gp,
band_index=bi,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
else:
for j, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(temperatures,
mesh,
gamma=gamma[j],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
def _write_kappa(lbte,
volume,
is_reducible_collision_matrix=False,
write_LBTE_solution=False,
pinv_solver=None,
compression="gzip",
filename=None,
log_level=0):
temperatures = lbte.temperatures
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
mesh = lbte.mesh_numbers
weights = lbte.get_grid_weights()
frequencies = lbte.get_frequencies()
ave_pp = lbte.get_averaged_pp_interaction()
qpoints = lbte.get_qpoints()
kappa = lbte.kappa
kappa_RTA = lbte.get_kappa_RTA()
gamma = lbte.gamma
gamma_isotope = lbte.gamma_isotope
gv = lbte.get_group_velocities()
f_vector = lbte.get_f_vectors()
gv_by_gv = lbte.get_gv_by_gv()
mode_cv = lbte.get_mode_heat_capacities()
mode_kappa = lbte.get_mode_kappa()
mode_kappa_RTA = lbte.get_mode_kappa_RTA()
mfp = lbte.get_mean_free_path()
coleigs = lbte.get_collision_eigenvalues()
# After kappa calculation, the variable is overwritten by unitary matrix
unitary_matrix = lbte.collision_matrix
if is_reducible_collision_matrix:
frequencies = lbte.get_frequencies_all()
else:
frequencies = lbte.get_frequencies()
for i, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[i]
else:
gamma_isotope_at_sigma = None
write_kappa_to_hdf5(temperatures,
mesh,
frequency=frequencies,
group_velocity=gv,
gv_by_gv=gv_by_gv,
mean_free_path=mfp[i],
heat_capacity=mode_cv,
kappa=kappa[i],
mode_kappa=mode_kappa[i],
kappa_RTA=kappa_RTA[i],
mode_kappa_RTA=mode_kappa_RTA[i],
f_vector=f_vector,
gamma=gamma[i],
gamma_isotope=gamma_isotope_at_sigma,
averaged_pp_interaction=ave_pp,
qpoint=qpoints,
weight=weights,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
kappa_unit_conversion=unit_to_WmK / volume,
compression=compression,
filename=filename,
verbose=log_level)
if coleigs is not None:
write_collision_eigenvalues_to_hdf5(temperatures,
mesh,
coleigs[i],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=log_level)
if write_LBTE_solution:
if pinv_solver is not None:
solver = _select_solver(pinv_solver)
if solver in [1, 2, 3, 4, 5]:
write_unitary_matrix_to_hdf5(
temperatures,
mesh,
unitary_matrix=unitary_matrix,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
solver=solver,
filename=filename,
verbose=log_level)
def _set_collision_from_file(lbte,
bz_grid,
indices='all',
is_reducible_collision_matrix=False,
filename=None,
log_level=0):
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
mesh = lbte.mesh_numbers
grid_points = lbte.get_grid_points()
indices = indices
if len(sigmas) > 1:
gamma = []
collision_matrix = []
read_from = None
if log_level:
print("---------------------- Reading collision data from file "
"----------------------")
sys.stdout.flush()
for j, sigma in enumerate(sigmas):
collisions = read_collision_from_hdf5(mesh,
indices=indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if collisions:
(colmat_at_sigma,
gamma_at_sigma,
temperatures) = collisions
if len(sigmas) == 1:
collision_matrix = colmat_at_sigma
gamma = np.zeros((1,) + gamma_at_sigma.shape,
dtype='double', order='C')
gamma[0] = gamma_at_sigma
else:
collision_matrix.append(colmat_at_sigma)
gamma.append(gamma_at_sigma)
read_from = "full_matrix"
else:
vals = _allocate_collision(True,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename)
if vals:
colmat_at_sigma, gamma_at_sigma, temperatures = vals
else:
if log_level:
print("Collision at grid point %d doesn't exist." %
grid_points[0])
vals = _allocate_collision(False,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename)
if vals:
colmat_at_sigma, gamma_at_sigma, temperatures = vals
else:
if log_level:
print("Collision at (grid point %d, band index %d) "
"doesn't exist." % (grid_points[0], 1))
return False
for i, gp in enumerate(grid_points):
if not _collect_collision_gp(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
bz_grid.bzg2grg,
indices,
is_reducible_collision_matrix,
filename,
log_level):
num_band = colmat_at_sigma.shape[3]
for j in range(num_band):
if not _collect_collision_band(
colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
bz_grid.bzg2grg,
j,
indices,
is_reducible_collision_matrix,
filename,
log_level):
return False
if len(sigmas) == 1:
gamma = gamma_at_sigma
collision_matrix = colmat_at_sigma
else:
gamma.append(gamma_at_sigma[0])
collision_matrix.append(colmat_at_sigma[0])
read_from = "grid_points"
if len(sigmas) > 1:
temperatures = np.array(temperatures, dtype='double', order='C')
gamma = np.array(gamma, dtype='double', order='C')
collision_matrix = np.array(collision_matrix,
dtype='double', order='C')
lbte.set_gamma(gamma)
lbte.set_collision_matrix(collision_matrix)
# lbte.set_temperatures invokes allocation of arrays. So this must
# be called after setting collision_matrix for saving memory
# space.
lbte.set_temperatures(temperatures)
return read_from
def _allocate_collision(for_gps,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename):
num_mesh_points = np.prod(mesh)
if for_gps:
collision = read_collision_from_hdf5(mesh,
indices=indices,
grid_point=grid_points[0],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=False)
else:
collision = read_collision_from_hdf5(mesh,
indices=indices,
grid_point=grid_points[0],
band_index=0,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=False)
if collision is None:
return False
num_temp = len(collision[2]) # This is to treat indices="all".
if is_reducible_collision_matrix:
if for_gps:
num_band = collision[0].shape[4] # for gps (s,T,b,irgp,b)
else:
num_band = collision[0].shape[3] # for bands (s,T,irgp,b)
gamma_at_sigma = np.zeros(
(1, num_temp, num_mesh_points, num_band),
dtype='double', order='C')
colmat_at_sigma = np.zeros(
(1, num_temp,
num_mesh_points, num_band,
num_mesh_points, num_band),
dtype='double', order='C')
else:
if for_gps:
num_band = collision[0].shape[5] # for gps (s,T,b0,3,irgp,b,3)
else:
num_band = collision[0].shape[4] # for bands (s,T,3,irgp,b,3)
gamma_at_sigma = np.zeros(
(1, num_temp, len(grid_points), num_band),
dtype='double', order='C')
colmat_at_sigma = np.zeros(
(1, num_temp,
len(grid_points), num_band, 3,
len(grid_points), num_band, 3),
dtype='double', order='C')
temperatures = np.zeros(num_temp, dtype='double', order='C')
return colmat_at_sigma, gamma_at_sigma, temperatures
def _collect_collision_gp(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
bzg2grg,
indices,
is_reducible_collision_matrix,
filename,
log_level):
collision_gp = read_collision_from_hdf5(
mesh,
indices=indices,
grid_point=gp,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if not collision_gp:
return False
(colmat_at_gp,
gamma_at_gp,
temperatures_at_gp) = collision_gp
if is_reducible_collision_matrix:
igp = bzg2grg[gp]
else:
igp = i
gamma_at_sigma[0, :, igp] = gamma_at_gp
colmat_at_sigma[0, :, igp] = colmat_at_gp[0]
temperatures[:] = temperatures_at_gp
return True
def _collect_collision_band(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
bzg2grg,
j,
indices,
is_reducible_collision_matrix,
filename,
log_level):
collision_band = read_collision_from_hdf5(
mesh,
indices=indices,
grid_point=gp,
band_index=j,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if collision_band is False:
return False
(colmat_at_band,
gamma_at_band,
temperatures_at_band) = collision_band
if is_reducible_collision_matrix:
igp = bzg2grg[gp]
else:
igp = i
gamma_at_sigma[0, :, igp, j] = gamma_at_band
colmat_at_sigma[0, :, igp, j] = colmat_at_band[0]
temperatures[:] = temperatures_at_band
return True
def _select_solver(pinv_solver):
try:
import phono3py._phono3py as phono3c
default_solver = phono3c.default_colmat_solver()
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
default_solver = 4
solver_numbers = (1, 2, 3, 4, 5, 6)
solver = pinv_solver
if solver == 0: # default solver
if default_solver in (4, 5, 6):
try:
import scipy.linalg
except ImportError:
solver = 1
else:
solver = default_solver
else:
solver = default_solver
elif solver not in solver_numbers:
solver = default_solver
return solver
def diagonalize_collision_matrix(collision_matrices,
i_sigma=None,
i_temp=None,
pinv_solver=0,
log_level=0):
"""Diagonalize collision matrices.
Note
----
collision_matricies is overwritten by eigenvectors.
Parameters
----------
collision_matricies : ndarray, optional
Collision matrix. This ndarray has to have the following size and
flags.
shapes:
(sigmas, temperatures, prod(mesh), num_band, prod(mesh), num_band)
(sigmas, temperatures, ir_grid_points, num_band, 3,
ir_grid_points, num_band, 3)
(size, size)
dtype='double', order='C'
i_sigma : int, optional
Index of BZ integration methods, tetrahedron method and smearing
method with widths. Default is None.
i_temp : int, optional
Index of temperature. Default is None.
pinv_solver : int, optional
Diagnalization solver choice.
log_level : int, optional
Verbosity level. Smaller is more quiet. Default is 0.
Returns
-------
w : ndarray, optional
Eigenvalues.
shape=(size_of_collision_matrix,), dtype='double'
"""
start = time.time()
# Matrix size of collision matrix to be diagonalized.
# The following value is expected:
# ir-colmat: num_ir_grid_points * num_band * 3
# red-colmat: num_mesh_points * num_band
shape = collision_matrices.shape
if len(shape) == 6:
size = shape[2] * shape[3]
assert size == shape[4] * shape[5]
elif len(shape) == 8:
size = np.prod(shape[2:5])
assert size == np.prod(shape[5:8])
elif len(shape) == 2:
size = shape[0]
assert size == shape[1]
solver = _select_solver(pinv_solver)
# [1] dsyev: safer and slower than dsyevd and smallest memory usage
# [2] dsyevd: faster than dsyev and largest memory usage
if solver in [1, 2]:
if log_level:
routine = ['dsyev', 'dsyevd'][solver - 1]
sys.stdout.write("Diagonalizing by lapacke %s... " % routine)
sys.stdout.flush()
import phono3py._phono3py as phono3c
w = np.zeros(size, dtype='double')
if i_sigma is None:
_i_sigma = 0
else:
_i_sigma = i_sigma
if i_temp is None:
_i_temp = 0
else:
_i_temp = i_temp
phono3c.diagonalize_collision_matrix(collision_matrices,
w,
_i_sigma,
_i_temp,
0.0,
(solver + 1) % 2,
0) # only diagonalization
elif solver == 3: # np.linalg.eigh depends on dsyevd.
if log_level:
sys.stdout.write("Diagonalizing by np.linalg.eigh... ")
sys.stdout.flush()
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, col_mat[:] = np.linalg.eigh(col_mat)
elif solver == 4: # fully scipy dsyev
if log_level:
sys.stdout.write("Diagonalizing by "
"scipy.linalg.lapack.dsyev... ")
sys.stdout.flush()
import scipy.linalg
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, _, info = scipy.linalg.lapack.dsyev(col_mat.T, overwrite_a=1)
elif solver == 5: # fully scipy dsyevd
if log_level:
sys.stdout.write("Diagonalizing by "
"scipy.linalg.lapack.dsyevd... ")
sys.stdout.flush()
import scipy.linalg
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, _, info = scipy.linalg.lapack.dsyevd(col_mat.T, overwrite_a=1)
if log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return w
class Conductivity_LBTE(Conductivity):
def __init__(self,
interaction,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # finite difference for group veolocity
is_full_pp=False,
read_pp=False,
pp_filename=None,
pinv_cutoff=1.0e-8,
pinv_solver=0,
log_level=0):
self._pp = None
self._temperatures = None
self._sigmas = None
self._sigma_cutoff = None
self._is_kappa_star = None
self._gv_delta_q = None
self._is_full_pp = None
self._log_level = None
self._primitive = None
self._dm = None
self._frequency_factor_to_THz = None
self._cutoff_frequency = None
self._boundary_mfp = None
self._point_operations = None
self._rotations_cartesian = None
self._grid_points = None
self._grid_weights = None
self._bz_grid = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._kappa = None
self._mode_kappa = None
self._kappa_RTA = None
self._mode_kappa_RTA = None
self._read_gamma = False
self._read_gamma_iso = False
self._frequencies = None
self._cv = None
self._gv = None
self._f_vectors = None
self._gv_sum2 = None
self._mfp = None
self._gamma = None
self._gamma_iso = None
self._averaged_pp_interaction = None
self._conversion_factor = None
self._is_isotope = None
self._isotope = None
self._mass_variances = None
self._grid_point_count = None
self._collision_eigenvalues = None
Conductivity.__init__(self,
interaction,
grid_points=grid_points,
temperatures=temperatures,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
log_level=log_level)
self._is_reducible_collision_matrix = is_reducible_collision_matrix
self._solve_collective_phonon = solve_collective_phonon
if not self._is_kappa_star:
self._is_reducible_collision_matrix = True
self._collision_matrix = None
self._read_pp = read_pp
self._pp_filename = pp_filename
self._pinv_cutoff = pinv_cutoff
self._pinv_solver = pinv_solver
if grid_points is None:
self._all_grid_points = True
else:
self._all_grid_points = False
if self._temperatures is not None:
self._allocate_values()
def set_kappa_at_sigmas(self):
if len(self._grid_points) != len(self._ir_grid_points):
print("Collision matrix is not well created.")
import sys
sys.exit(1)
else:
weights = self._prepare_collision_matrix()
self._set_kappa_at_sigmas(weights)
def set_collision_matrix(self, collision_matrix):
self._collision_matrix = collision_matrix
def get_f_vectors(self):
return self._f_vectors
@property
def collision_matrix(self):
return self._collision_matrix
def get_collision_matrix(self):
return self.collision_matrix
def get_collision_eigenvalues(self):
return self._collision_eigenvalues
def get_mean_free_path(self):
return self._mfp
def get_frequencies_all(self):
return self._frequencies[self._bz_grid.grg2bzg]
def get_kappa_RTA(self):
return self._kappa_RTA
def get_mode_kappa_RTA(self):
return self._mode_kappa_RTA
def delete_gp_collision_and_pp(self):
"""Deallocate large arrays"""
self._collision.delete_integration_weights()
self._pp.delete_interaction_strength()
def _run_at_grid_point(self):
"""Calculate properties at a grid point"""
i = self._grid_point_count
self._show_log_header(i)
gp = self._grid_points[i]
if not self._all_grid_points:
self._collision_matrix[:] = 0
if not self._read_gamma:
self._collision.set_grid_point(gp)
if self._log_level:
print("Number of triplets: %d" %
len(self._pp.get_triplets_at_q()[0]))
self._set_collision_matrix_at_sigmas(i)
if self._is_reducible_collision_matrix:
i_data = self._bz_grid.bzg2grg[gp]
else:
i_data = i
self._set_harmonic_properties(i, i_data)
self._set_gv_by_gv(i, i_data)
if self._isotope is not None:
gamma_iso = self._get_gamma_isotope_at_sigmas(i)
band_indices = self._pp.band_indices
self._gamma_iso[:, i_data, :] = gamma_iso[:, band_indices]
if self._log_level:
self._show_log(i)
def _allocate_values(self):
"""Allocate arrays."""
num_band0 = len(self._pp.band_indices)
num_band = len(self._primitive) * 3
num_temp = len(self._temperatures)
if self._is_reducible_collision_matrix:
self._allocate_reducible_colmat_values(
num_temp, num_band0, num_band)
else:
self._allocate_ir_colmat_values(num_temp, num_band0, num_band)
def _allocate_local_values(self, num_temp, num_band0, num_grid_points):
"""Allocate grid point local arrays."""
self._kappa = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._kappa_RTA = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._gv = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._f_vectors = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._gv_sum2 = np.zeros((num_grid_points, num_band0, 6),
dtype='double', order='C')
self._mfp = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
3), dtype='double', order='C')
self._cv = np.zeros((num_temp, num_grid_points, num_band0),
dtype='double', order='C')
if self._is_full_pp:
self._averaged_pp_interaction = np.zeros(
(num_grid_points, num_band0), dtype='double', order='C')
if self._gamma is None:
self._gamma = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0), dtype='double', order='C')
if self._isotope is not None:
self._gamma_iso = np.zeros((len(self._sigmas),
num_grid_points,
num_band0), dtype='double', order='C')
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
def _allocate_reducible_colmat_values(self, num_temp, num_band0, num_band):
"""Allocate arrays for reducilble collision matrix."""
num_mesh_points = np.prod(self._pp.mesh_numbers)
if self._all_grid_points:
num_stored_grid_points = num_mesh_points
else:
num_stored_grid_points = 1
self._allocate_local_values(num_temp, num_band0, num_mesh_points)
self._collision = CollisionMatrix(
self._pp,
is_reducible_collision_matrix=True,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas), num_temp,
num_stored_grid_points, num_band0,
num_mesh_points, num_band),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas), num_temp, num_mesh_points * num_band),
dtype='double', order='C')
def _allocate_ir_colmat_values(self, num_temp, num_band0, num_band):
"""Allocate arrays for ir collision matrix."""
num_ir_grid_points = len(self._ir_grid_points)
num_grid_points = len(self._grid_points)
if self._all_grid_points:
num_stored_grid_points = num_grid_points
else:
num_stored_grid_points = 1
self._allocate_local_values(num_temp, num_band0, num_grid_points)
self._rot_grid_points = np.zeros(
(num_ir_grid_points, len(self._point_operations)), dtype='int_')
for i, ir_gp in enumerate(self._ir_grid_points):
self._rot_grid_points[i] = get_grid_points_by_rotations(
ir_gp,
self._bz_grid)
self._collision = CollisionMatrix(
self._pp,
rotations_cartesian=self._rotations_cartesian,
num_ir_grid_points=num_ir_grid_points,
rot_grid_points=self._rot_grid_points,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas),
num_temp,
num_stored_grid_points, num_band0, 3,
num_ir_grid_points, num_band, 3),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas),
num_temp,
num_ir_grid_points * num_band * 3),
dtype='double', order='C')
def _set_collision_matrix_at_sigmas(self, i):
"""Calculate collision matrices at grid point
i : int
Grid point count.
"""
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating collision matrix with "
if sigma is None:
text += "tetrahedron method."
else:
text += "sigma=%s" % sigma
if self._sigma_cutoff is None:
text += "."
else:
text += "(%4.2f SD)." % self._sigma_cutoff
print(text)
self._collision.set_sigma(sigma, sigma_cutoff=self._sigma_cutoff)
self._collision.set_integration_weights()
if self._read_pp:
pp, _g_zero = read_pp_from_hdf5(
self._pp.mesh_numbers,
grid_point=self._grid_points[i],
sigma=sigma,
sigma_cutoff=self._sigma_cutoff,
filename=self._pp_filename,
verbose=(self._log_level > 0))
_, g_zero = self._collision.get_integration_weights()
if self._log_level:
if len(self._sigmas) > 1:
print("Multiple sigmas or mixing smearing and "
"tetrahedron method is not supported.")
if _g_zero is not None and (_g_zero != g_zero).any():
raise ValueError("Inconsistency found in g_zero.")
self._collision.set_interaction_strength(pp)
elif j != 0 and (self._is_full_pp or self._sigma_cutoff is None):
if self._log_level:
print("Existing ph-ph interaction is used.")
else:
if self._log_level:
print("Calculating ph-ph interaction...")
self._collision.run_interaction(is_full_pp=self._is_full_pp)
if self._is_full_pp and j == 0:
self._averaged_pp_interaction[i] = (
self._pp.get_averaged_interaction())
for k, t in enumerate(self._temperatures):
self._collision.set_temperature(t)
self._collision.run()
if self._all_grid_points:
if self._is_reducible_collision_matrix:
i_data = self._bz_grid.bzg2grg[self._grid_points[i]]
else:
i_data = i
else:
i_data = 0
self._gamma[j, k, i_data] = (
self._collision.get_imag_self_energy())
self._collision_matrix[j, k, i_data] = (
self._collision.get_collision_matrix())
def _prepare_collision_matrix(self):
"""Prepare collision matrix to be solved."""
if self._is_reducible_collision_matrix:
if self._is_kappa_star:
self._average_collision_matrix_by_degeneracy()
num_mesh_points = np.prod(self._pp.mesh_numbers)
num_rot = len(self._point_operations)
rot_grid_points = np.zeros(
(num_rot, num_mesh_points), dtype='int_')
# Ir-grid points and rot_grid_points in generalized regular grid
ir_gr_grid_points = np.array(
self._bz_grid.bzg2grg[self._ir_grid_points], dtype='int_')
for i in range(num_mesh_points):
rot_grid_points[:, i] = self._bz_grid.bzg2grg[
get_grid_points_by_rotations(self._bz_grid.grg2bzg[i],
self._bz_grid)]
self._expand_collisions(ir_gr_grid_points, rot_grid_points)
self._expand_local_values(ir_gr_grid_points, rot_grid_points)
self._combine_reducible_collisions()
weights = np.ones(np.prod(self._pp.mesh_numbers), dtype='int_')
self._symmetrize_collision_matrix()
else:
self._combine_collisions()
weights = self._get_weights()
for i, w_i in enumerate(weights):
for j, w_j in enumerate(weights):
self._collision_matrix[:, :, i, :, :, j, :, :] *= w_i * w_j
self._average_collision_matrix_by_degeneracy()
self._symmetrize_collision_matrix()
return weights
def _set_kappa_at_sigmas(self, weights):
"""Calculate thermal conductivity"""
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "----------- Thermal conductivity (W/m-k) "
if sigma:
text += "for sigma=%s -----------" % sigma
else:
text += "with tetrahedron method -----------"
print(text)
sys.stdout.flush()
for k, t in enumerate(self._temperatures):
if t > 0:
self._set_kappa_RTA(j, k, weights)
w = diagonalize_collision_matrix(
self._collision_matrix,
i_sigma=j,
i_temp=k,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
self._collision_eigenvalues[j, k] = w
self._set_kappa(j, k, weights)
if self._log_level:
print(("#%6s " + " %-10s" * 6) %
("T(K)", "xx", "yy", "zz", "yz", "xz", "xy"))
print(("%7.1f " + " %10.3f" * 6) %
((t,) + tuple(self._kappa[j, k])))
print((" %6s " + " %10.3f" * 6) %
(("(RTA)",) + tuple(self._kappa_RTA[j, k])))
print("-" * 76)
sys.stdout.flush()
if self._log_level:
print('')
def _combine_collisions(self):
"""Include diagonal elements into collision matrix."""
num_band = len(self._primitive) * 3
for j, k in list(np.ndindex(
(len(self._sigmas), len(self._temperatures)))):
for i, ir_gp in enumerate(self._ir_grid_points):
for r, r_gp in zip(
self._rotations_cartesian, self._rot_grid_points[i]):
if ir_gp != r_gp:
continue
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, :, i, l, :] += main_diagonal[l] * r
def _combine_reducible_collisions(self):
"""Include diagonal elements into collision matrix."""
num_band = len(self._primitive) * 3
num_mesh_points = np.prod(self._pp.mesh_numbers)
for j, k in list(
np.ndindex((len(self._sigmas), len(self._temperatures)))):
for i in range(num_mesh_points):
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, i, l] += main_diagonal[l]
def _expand_collisions(self, ir_gr_grid_points, rot_grid_points):
"""Fill elements of full collision matrix by symmetry"""
start = time.time()
if self._log_level:
sys.stdout.write("- Expanding properties to all grid points ")
sys.stdout.flush()
try:
import phono3py._phono3py as phono3c
phono3c.expand_collision_matrix(self._collision_matrix,
ir_gr_grid_points,
rot_grid_points)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for i, ir_gp in enumerate(ir_gr_grid_points):
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
colmat_irgp = self._collision_matrix[:, :, ir_gp, :, :, :].copy()
colmat_irgp /= multi
self._collision_matrix[:, :, ir_gp, :, :, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
for k in range(num_mesh_points):
gp_c = rot_grid_points[j, k]
self._collision_matrix[:, :, gp_r, :, gp_c, :] += (
colmat_irgp[:, :, :, k, :])
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _expand_local_values(self, ir_gr_grid_points, rot_grid_points):
"""Fill elements of local properties at grid points"""
for i, ir_gp in enumerate(ir_gr_grid_points):
gv_irgp = self._gv[ir_gp].copy()
self._gv[ir_gp] = 0
cv_irgp = self._cv[:, ir_gp, :].copy()
self._cv[:, ir_gp, :] = 0
gamma_irgp = self._gamma[:, :, ir_gp, :].copy()
self._gamma[:, :, ir_gp, :] = 0
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
if self._gamma_iso is not None:
gamma_iso_irgp = self._gamma_iso[:, ir_gp, :].copy()
self._gamma_iso[:, ir_gp, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
self._gamma[:, :, gp_r, :] += gamma_irgp / multi
if self._gamma_iso is not None:
self._gamma_iso[:, gp_r, :] += gamma_iso_irgp / multi
self._gv[gp_r] += np.dot(gv_irgp, r.T) / multi
self._cv[:, gp_r, :] += cv_irgp / multi
def _get_weights(self):
"""Returns weights used for collision matrix and |X> and |f>
self._rot_grid_points : ndarray
shape=(ir_grid_points, point_operations), dtype='int_'
r_gps : grid points of arms of k-star with duplicates
len(r_gps) == order of crystallographic point group
len(unique(r_gps)) == number of arms of the k-star
Returns
-------
weights : list
sqrt(g_k)/|g|, where g is the crystallographic point group and
g_k is the number of arms of k-star.
"""
weights = []
n = float(self._rot_grid_points.shape[1])
for r_gps in self._rot_grid_points:
weights.append(np.sqrt(len(np.unique(r_gps)) / n))
sym_broken = False
for gp in np.unique(r_gps):
if (len( | np.where(r_gps == gp) | numpy.where |
"""
Compare various IIR filters
"""
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
def freq2rad(freq, fs):
return freq * np.pi / (fs/2)
def rad2freq(rad, fs):
return rad * (fs/2) / np.pi
# MAIN PARAMETER
pole_coef = 0.95
fs = 16000
# prepare figure
ALPHA = 0.8
f_max = 4000
plt.figure()
# simple filter
b = | np.array([1, -1]) | numpy.array |
import pandas as pd
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
import numpy as np
df = pd.read_csv('Iris.csv')
df = df.drop(['Id'],axis=1)
target = df['Species']
s = set()
for val in target:
s.add(val)
s = list(s)
rows = list(range(100,150))
df = df.drop(df.index[rows])
import matplotlib.pyplot as plt
x = df['SepalLengthCm']
y = df['PetalLengthCm']
setosa_x = x[:50]
setosa_y = y[:50]
versicolor_x = x[50:]
versicolor_y = y[50:]
plt.figure(figsize=(8,6))
plt.scatter(setosa_x,setosa_y,marker='+',color='green')
plt.scatter(versicolor_x,versicolor_y,marker='_',color='red')
plt.show()
###
## Drop rest of the features and extract the target values
df = df.drop(['SepalWidthCm','PetalWidthCm'],axis=1)
Y = []
target = df['Species']
for val in target:
if(val == 'Iris-setosa'):
Y.append(-1)
else:
Y.append(1)
df = df.drop(['Species'],axis=1)
X = df.values.tolist()
## Shuffle and split the data into training and test set
X, Y = shuffle(X,Y)
x_train = []
y_train = []
x_test = []
y_test = []
x_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=0.9)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
## Support Vector Machine
#import numpy as np
train_f1 = x_train[:,0]
train_f2 = x_train[:,1]
train_f1 = train_f1.reshape(90,1)
train_f2 = train_f2.reshape(90,1)
w1 = np.zeros((90,1))
w2 = np.zeros((90,1))
epochs = 1
alpha = 0.0001
while(epochs < 10000):
y = w1 * train_f1 + w2 * train_f2
prod = y * y_train
print(epochs)
count = 0
for val in prod:
if(val >= 1):
cost = 0
w1 = w1 - alpha * (2 * 1/epochs * w1)
w2 = w2 - alpha * (2 * 1/epochs * w2)
else:
cost = 1 - val
w1 = w1 + alpha * (train_f1[count] * y_train[count] - 2 * 1/epochs * w1)
w2 = w2 + alpha * (train_f2[count] * y_train[count] - 2 * 1/epochs * w2)
count += 1
epochs += 1
from sklearn.metrics import accuracy_score
## Clip the weights
index = list(range(10,90))
w1 = np.delete(w1,index)
w2 = | np.delete(w2,index) | numpy.delete |
import numpy as np
import torch
import torch.nn as nn
from scipy.stats import multivariate_normal
from ..components.snake import Snake
_nine_offsets = [
( 0, 0),
( 1, 1),
( 0, 1),
(-1, 1),
(-1, 0),
(-1, -1),
( 0, -1),
( 1, -1),
( 1, 0),
]
class GaussianField():
def __init__(self, diam, cov=0.05):
assert (diam % 2 == 1), 'diam must be an odd'
self.diam = diam
self.cov = cov # .05 leaves about 95% prob mass within central block
# only consider the 3x3 region
self.increment = 1 / diam
# compute 3 units
self.l, self.r = -1.5, 1.5
self.field_shape = (3 * diam, 3 * diam)
self.unit_area = self.increment ** 2
self.prob_field = self.compute_prob_field()
def compute_prob_field(self):
cov = self.cov
increment = self.increment
l, r = self.l, self.r
cov_mat = np.array([
[cov, 0],
[0, cov]
])
rv = multivariate_normal([0, 0], cov_mat)
half_increment = increment / 2
xs, ys = np.mgrid[
l + half_increment: r: increment,
l + half_increment: r: increment
] # use half increment to make things properly centered
pos = np.dstack((xs, ys))
prob_field = rv.pdf(pos).astype(np.float32)
assert prob_field.shape == self.field_shape
return prob_field
@torch.no_grad()
def compute_local_mass(self):
kernel_size = self.diam
pad = (kernel_size - 1) // 2
prob_field = self.prob_field
conv = nn.Conv2d(
in_channels=1, out_channels=1, kernel_size=kernel_size,
padding=pad, bias=False
) # do not use cuda for now; no point
conv.weight.data.copy_(torch.tensor(1.0))
prob_field = torch.as_tensor(
prob_field, device=conv.weight.device
)[(None,) * 2] # [1, 1, h, w]
local_sum = conv(prob_field).squeeze().cpu().numpy()
local_sum = local_sum * self.unit_area
return local_sum
class MakeProbTsr():
'''
make a prob tsr of shape [h, w, num_votes] filled with the corresponding
spatial voting prob
'''
def __init__(self, spec, diam, grid_spec, vote_mask, var=0.05):
# indices grid of shape [2, H, W], where first dim is y, x; swap them
# obtain [H, W, 2] where last channel is (y, x)
self.spec = spec
self.diam = diam
self.vote_mask = vote_mask
self.var = var
# process grid spec to 0 based indexing and change radius to diam
radius = (diam - 1) // 2
center = | np.array((radius, radius)) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 31 12:37:00 2017
@author: <NAME>
Either you can import CSVConverter or design your own which provides API parse()
which inturn returns a two dimensional array of input data where each row is an array(size 5, 4 columns of attributes and 5th column is class name) of attributes of type float/int.
Module CSVConverter.py kept seperate to reduce dependency on storage structure of data set.
"""
import CSVConverter as source
import numpy
import math
training_set = []
testing_set = []
classes = ["I. setosa","I. versicolor","I. virginica"]
attribute_size = 4
class_size = 3;
X = []
Y = []
mean_and_varience_array = []
def loadData() :
print("\n------------------------[loadData()-------------------------");
data_set = source.parse()
print("\nSource data set size:{0}".format(len(data_set)))
index = 0
for row in data_set:
index+=1
#Every 3rd row is a testing set...
if(index%3==0):
testing_set.append(row);
#Rest are training set..
else:
training_set.append(row);
print("\nTesting set size:{0}".format(len(testing_set)))
print("Training set size:{0}".format(len(training_set)))
print("------------------------loadData()]-------------------------\n\n");
def model() :
print("\n------------------------[Model-------------------------");
global X
global Y
global mean_and_varience_array
for row in training_set:
X.append(row[:-1])
Y.append(row[-1])
X = numpy.asarray(X)
Y = numpy.asarray(Y)
X = X.astype(numpy.float)
[m, n] = X.shape
#Mannually classifying:
class1 = X[0:34]
class2 = X[34:67]
class3 = X[67:100]
classes = [class1,class2,class3]
print("Training set size m:{0}".format(len(training_set)))
print("Training set attribute_size:{0}".format(attribute_size))
# calculate Mean & Varience:
for classIndex in range(0,class_size) :
mean_and_varience_array.append([])
for sample in range(0,n) :
mean = numpy.mean(classes[classIndex][:,sample])
varience = numpy.var(classes[classIndex][:,sample])
mean_and_varience_array[classIndex].append([mean, varience])
print("\nmean & varience for class-{0}'s attributes:\n{1}".format(classIndex,mean_and_varience_array[classIndex]))
print("------------------------Model]-------------------------\n");
def computeProb(mean,varience,attribute) :
exponent = math.exp(-(math.pow(attribute-mean,2)/(2*varience)))
return (1.0 / (math.sqrt((2.0*math.pi) * varience))) * exponent
def classify() :
print("\n\n------------------------[Classify-------------------------");
test_set_size = len(testing_set)
correct_prediction_count = 0
probabilityOfEachClass = 1/float(class_size)
for testItem in testing_set:
class_probability_of_item = []
for classIndex in range(0,class_size) :
#Attribute-1
probAttribute1 = computeProb(mean_and_varience_array[classIndex][0][0],mean_and_varience_array[classIndex][0][1],numpy.float(testItem[0]))
#Attribute-2
probAttribute2 = computeProb(mean_and_varience_array[classIndex][1][0],mean_and_varience_array[classIndex][1][1],numpy.float(testItem[1]))
#Attribute-3
probAttribute3 = computeProb(mean_and_varience_array[classIndex][2][0],mean_and_varience_array[classIndex][2][1],numpy.float(testItem[2]))
#Attribute-4
probAttribute4 = computeProb(mean_and_varience_array[classIndex][3][0],mean_and_varience_array[classIndex][3][1],numpy.float(testItem[3]))
#Total proability of class - classIndex:
classProbability = probAttribute1 * probAttribute2 * probAttribute3 * probAttribute4 * probabilityOfEachClass
class_probability_of_item.append(classProbability)
#Now, get most probable class index..
maxProbValue = max(class_probability_of_item)
maxProbClassIndex = class_probability_of_item.index(maxProbValue)
# print("{0} vs {1}".format(classes[maxProbClassIndex],testItem[4]))
if(classes[maxProbClassIndex] == testItem[4]) :
correct_prediction_count += 1
#Calculate percentage of right classifications..
correctnessPercentage = correct_prediction_count/float(test_set_size) * 100
print('\nCorrectness of classifying testing set : {0}%'.format(correctnessPercentage))
print("------------------------Classify]-------------------------\n");
def predict(predictItemAttributes) :
print("\n\n------------------------[predict-------------------------");
print("\npredictItemAttributes: {0}".format(predictItemAttributes))
if(len(predictItemAttributes) == attribute_size):
probabilityOfEachClass = 1/float(class_size)
class_probability_of_item = []
for classIndex in range(0,class_size) :
#Attribute-1
probAttribute1 = computeProb(mean_and_varience_array[classIndex][0][0],mean_and_varience_array[classIndex][0][1],numpy.float(predictItemAttributes[0]))
#Attribute-2
probAttribute2 = computeProb(mean_and_varience_array[classIndex][1][0],mean_and_varience_array[classIndex][1][1],numpy.float(predictItemAttributes[1]))
#Attribute-3
probAttribute3 = computeProb(mean_and_varience_array[classIndex][2][0],mean_and_varience_array[classIndex][2][1], | numpy.float(predictItemAttributes[2]) | numpy.float |
import numpy as np
from sklearn.model_selection import StratifiedKFold, train_test_split, GroupShuffleSplit, ShuffleSplit
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, roc_curve
from sklearn.ensemble import RandomForestClassifier
def block_hour(dataset, labels, patient_id_array, hours=4):
new_dataset = []
for column in dataset.T:
new_feature = []
new_labels = []
for patient_id in np.unique(patient_id_array):
print("Patient ID: ", patient_id)
patient_index = np.where(patient_id_array == patient_id)[0]
patient = dataset[patient_index]
patient_labels = labels[patient_index]
for i in range(len(patient_labels) - hours):
if 1 in patient_labels[i:i + hours]:
num = 1
else:
num = 0
new_feature.append(column[i:i + hours])
new_labels.append(num)
new_dataset.append(np.array(new_feature))
return np.array(new_dataset), np.array(new_labels)
features = np.nan_to_num(np.load('Datasets/training_1_nanfill.npy'))
labels = np.load('Datasets/training_1_Y.npy')
patient_id = np.load('Datasets/training_1_patient.npy')
X, y = block_hour(features, labels, patient_id, 4)
# X, y = features, labels
subs = ['MEAN', 'NORMAL', 'ZERO']
i = 0
acc = {}
f1 = {}
auc = {}
skf = StratifiedKFold(n_splits=10)
acc[subs[i]] = []
f1[subs[i]] = []
auc[subs[i]] = []
res = []
y_test_all = []
for train_index, test_index in skf.split(X[0], y):
print("TRAIN:", train_index, "TEST:", test_index)
aux_res = []
for feature in X:
X_train, X_test = feature[train_index], feature[test_index]
y_train, y_test = y[train_index], y[test_index]
elf = RandomForestClassifier(n_estimators=20)
print("Start training...")
elf = elf.fit(X_train, y_train)
print("Start testing...")
pred = elf.predict_proba(X_test)[:, 1]
results = elf.predict(X_test)
aux_res.append(pred)
print(subs[i], " Accuracy: ", accuracy_score(results, y_test))
print(subs[i], "F1-Score: ", f1_score(results, y_test))
res.append(pred)
y_test_all.append(y_test)
acc[subs[i]].append(accuracy_score(results, y_test))
f1[subs[i]].append(f1_score(results, y_test))
auc[subs[i]].append(roc_auc_score(y_test, pred))
print(subs[i], " Accuracy: ", accuracy_score(results, y_test))
print(subs[i], "F1-Score: ", f1_score(results, y_test))
print(subs[i], "AUC: ", auc)
res = np.concatenate(res)
y_test_all = np.concatenate(y_test_all)
fpr, tpr, thresholds = roc_curve(y_test_all, res, pos_label=1)
threshold = 0
accuracy = []
f1_score_list = []
step = 0.001
for threshold in np.arange(0, 1, step):
print(threshold)
new_results = np.zeros(len(res))
new_results[np.where(res > threshold)[0]] = 1
new_results[ | np.where(res <= threshold) | numpy.where |
import geotiler
import geopy.distance
import math
import matplotlib.pyplot as plt
import numpy as np
class DeadEndIdentification(object):
r"""Dead end identification based on coordinates.
Date:
2021
Author:
<NAME>
License:
MIT
Attributes:
None
Description:
This module is intended to be used for the identification and visualization of dead ends in an exercise.
Dead end is a part of an exercise, where an athlete suddenly makes a U-turn and takes the same path as before the U-turn is conducted (in the opposite direction).
"""
def __init__(self, positions, distances, tolerance_degrees=5, tolerance_position=5, minimum_distance=500) -> None:
"""Initialization of the object.
return: None
"""
self.reorganize_exercise_data(np.array(positions), np.array(distances), interval_distance=10) # Reorganizing the exercise data in order to achieve better results.
self.reorganize_exercise_data(self.positions, self.distances, interval_distance=1) # Reorganizing the exercise data in order to achieve better results.
self.tolerance_degrees = tolerance_degrees
self.tolerance_position = tolerance_position
self.minimum_distance = minimum_distance
def reorganize_exercise_data(self, positions, distances, interval_distance=1) -> None:
"""Exercise is reorganized in the way that the trackpoints are organized in a constant interval of distance.
return: None
"""
distance = distances[-1]
self.distances = np.arange(math.ceil(distance))
self.positions = np.empty((0, 2), float)
j = 0
for i in np.arange(np.shape(self.distances)[0] - 1):
while i > distances[j + 1]:
j += 1
position1 = positions[j]
position2 = positions[j + 1]
distance1 = distances[j]
distance2 = distances[j + 1]
if distance2 - distance1 == 0.0:
self.positions = np.append(self.positions, [np.array([position1[0], position1[1]])], axis=0)
else:
multiplying_factor = (i - distance1) / (distance2 - distance1)
self.positions = np.append(self.positions, [np.array([position1[0] + multiplying_factor * (position2[0] - position1[0]), position1[1] + multiplying_factor * (position2[1] - position1[1])])], axis=0)
self.positions = self.positions[::interval_distance]
self.distances = self.distances[::interval_distance]
def is_dead_end(self, azimuth1, azimuth2, tolerance_azimuth) -> bool:
"""Checking if two azimuths represent a part of a dead end allowing the given tolerance.
return: bool
"""
if abs(180 - abs(azimuth1 - azimuth2)) < tolerance_azimuth:
return True
return False
def long_enough_to_be_a_dead_end(self, distance1, distance2) -> bool:
"""Checking whether a dead end is long enough to be a dead end.
return: bool
"""
if distance2 - distance1 < self.minimum_distance:
return False
return True
def really_is_dead_end(self, position1, position2, tolerance_coordinates) -> bool:
"""Checking whether a dead end really is a dead end.
return: bool
"""
print(geopy.distance.distance(position1, position2).m)
if geopy.distance.distance(position1, position2).m < tolerance_coordinates:
return True
return False
def identify_dead_ends(self) -> None:
"""Identifying dead ends of the exercise.
return: None
"""
azimuths = np.array([])
self.dead_ends = np.empty((0, 2), int)
# Calculating the azimuths between the pairs of positions.
# https://www.omnicalculator.com/other/azimuth#how-to-calculate-the-azimuth-from-latitude-and-longitude
for i in np.arange(1, np.shape(self.positions)[0]):
latitude1 = self.positions[i - 1][0]
latitude2 = self.positions[i][0]
longitude1 = self.positions[i - 1][1]
longitude2 = self.positions[i][1]
longitude_difference = longitude2 - longitude1
azimuth = math.atan2(math.sin(longitude_difference) * math.cos(latitude2),
math.cos(latitude1) * math.sin(latitude2) - math.sin(latitude1) * math.cos(latitude2) * math.cos(longitude_difference))
azimuth *= 180 / math.pi # Converting the azimuth to degrees.
# If the azimuth's value is negative, the conversion to a positive value is crucial in the next step of the algorithm.
if azimuth < 0:
azimuth += 360
azimuths = np.append(azimuths, azimuth)
# Checking for dead ends in the exercise.
i = 50
while i < np.shape(azimuths)[0]:
print(f"\rProgress: {100 * i // np.shape(azimuths)[0]} %", end='')
for j in np.arange(50):
try:
if self.is_dead_end(azimuths[i - j - 1], azimuths[i + j], self.tolerance_degrees):
previous = i - j - 2
next = i + j + 1
while self.is_dead_end(azimuths[previous], azimuths[next], self.tolerance_degrees):
previous -= 1
next += 1
if np.array([previous, next]) not in self.dead_ends:
if self.long_enough_to_be_a_dead_end(self.distances[previous], self.distances[next]): # and self.really_is_dead_end(self.distances[previous], self.distances[next], self.tolerance_position):
self.dead_ends = np.append(self.dead_ends, [np.array([previous, next])], axis=0)
# i += next - previous
except:
pass
i += 1
print(self.dead_ends)
# Merging the dead ends.
i = 1
number_of_dead_ends = np.shape(self.dead_ends)[0]
while i < number_of_dead_ends:
last_element = self.dead_ends[i - 1][-1] # Retrieving the last index in the previous interval.
first_element = self.dead_ends[i][0] # Retrieving the first index in the current interval.
# If the distance between two dead ends is less than 300 meters, the two dead ends are combined.
if first_element - last_element < 300:
self.dead_ends[i - 1][1] = self.dead_ends[i][1]
self.dead_ends = np.delete(self.dead_ends, i, 0) # Current interval is removed from the list
number_of_dead_ends -= 1
else:
i += 1
print(self.dead_ends)
# Removing the dead ends which are too short to be counted as dead ends.
# i = 0
# while i < np.shape(self.dead_ends)[0]:
# if not self.long_enough_to_be_a_dead_end(self.distances[self.dead_ends[i][0]], self.distances[self.dead_ends[i][1]]):
# self.dead_ends = np.delete(self.dead_ends, i, 0)
# i -= 1
# i += 1
# Removing the dead ends which are not dead ends.
i = 0
while i < np.shape(self.dead_ends)[0]:
print(np.linalg.norm(self.positions[self.dead_ends[i][0]] - self.positions[self.dead_ends[i][1]]))
if np.linalg.norm(self.positions[self.dead_ends[i][0]] - self.positions[self.dead_ends[i][1]]) > self.tolerance_position:
self.dead_ends = np.delete(self.dead_ends, i, 0)
i -= 1
i += 1
print(self.dead_ends)
print("\rProgress: 100 %")
def draw_map(self) -> None:
""" Visualization of the exercise with dead ends.
return: None
"""
if | np.shape(self.positions) | numpy.shape |
import datajoint as dj
import numpy as np
from . import experiment, get_schema_name, foraging_analysis
from .model.bandit_model_comparison import BanditModelComparison
schema = dj.schema(get_schema_name('foraging_model'))
@schema
class ModelClass(dj.Lookup):
definition = """
model_class: varchar(32) # e.g. LossCounting, RW1972, Hattori2019
---
desc='': varchar(100)
"""
contents = [
['LossCounting', 'Count the number of losses and switch when the number exceeds a threshold'],
['RW1972', 'Rescorla–Wagner model (single learnig rate)'],
['LNP', 'Linear-nonlinear-Possion (exponential recency-weighted average)'],
['Bari2019', 'Bari et al 2019 (different learning rates for chosen/unchosen)'],
['Hattori2019', 'Hattori et al 2019 (different learning rates for rew/unrew/unchosen)'],
]
@schema
class ModelParam(dj.Lookup):
definition = """
model_param: varchar(32) # e.g. learn_rate, epsilon, w_tau1
---
param_notation: varchar(32) # r'$\tau_1$'
"""
contents = [
['loss_count_threshold_mean', r'$\mu_{LC}$'],
['loss_count_threshold_std', r'$\sigma_{LC}$'],
['tau1', r'$\tau_1$'],
['tau2', r'$\tau_2$'],
['w_tau1', r'$w_{\tau_1}$'],
['learn_rate', r'$\alpha$'],
['learn_rate_rew', r'$\alpha_{rew}$'],
['learn_rate_unrew', r'$\alpha_{unr}$'],
['forget_rate', r'$\delta$'],
['softmax_temperature', r'$\sigma$'],
['epsilon', r'$\epsilon$'],
['biasL', r'$b_L$'],
['biasR', r'$b_R$'],
['choice_step_size', r'$\alpha_c$'],
['choice_softmax_temperature', r'$\sigma_c$'],
]
@schema
class Model(dj.Manual):
definition = """
model_id: int
---
-> ModelClass
model_notation: varchar(500)
n_params: int # Effective param count
is_bias: bool
is_epsilon_greedy: bool
is_softmax: bool
is_choice_kernel: bool
desc='': varchar(500) # Long name
fit_cmd: blob # Fitting command compatible with the Dynamic-Foraing repo
"""
class Param(dj.Part):
definition = """
-> master
-> ModelParam
---
param_idx: int # To keep params the same order as the original definition in MODELS, hence `fit_result.x`
param_lower_bound: float
param_higher_bound: float
"""
@classmethod
def load_models(cls):
# Original definition from the Dynamic-Foraging repo, using the format: [forager, [para_names], [lower bounds], [higher bounds], desc(optional)]
MODELS = [
# No bias
['LossCounting', ['loss_count_threshold_mean', 'loss_count_threshold_std'],
[0, 0], [40, 10], 'LossCounting: mean, std, no bias'],
['RW1972_epsi', ['learn_rate', 'epsilon'],
[0, 0], [1, 1], 'SuttonBarto: epsilon, no bias'],
['RW1972_softmax', ['learn_rate', 'softmax_temperature'],
[0, 1e-2], [1, 15], 'SuttonBarto: softmax, no bias'],
['LNP_softmax', ['tau1', 'softmax_temperature'],
[1e-3, 1e-2], [100, 15], 'Sugrue2004, Corrado2005: one tau, no bias'],
['LNP_softmax', ['tau1', 'tau2', 'w_tau1', 'softmax_temperature'],
[1e-3, 1e-1, 0, 1e-2], [15, 40, 1, 15], 'Corrado2005, Iigaya2019: two taus, no bias'],
['Bari2019', ['learn_rate', 'forget_rate', 'softmax_temperature'],
[0, 0, 1e-2], [1, 1, 15], 'RL: chosen, unchosen, softmax, no bias'],
['Hattori2019', ['learn_rate_rew', 'learn_rate_unrew', 'softmax_temperature'],
[0, 0, 1e-2], [1, 1, 15], 'RL: rew, unrew, softmax, no bias'],
['Hattori2019', ['learn_rate_rew', 'learn_rate_unrew', 'forget_rate', 'softmax_temperature'],
[0, 0, 0, 1e-2], [1, 1, 1, 15], 'RL: rew, unrew, unchosen, softmax, no bias'],
# With bias
['RW1972_epsi', ['learn_rate', 'epsilon', 'biasL'],
[0, 0, -0.5], [1, 1, 0.5], 'SuttonBarto: epsilon'],
['RW1972_softmax', ['learn_rate', 'softmax_temperature', 'biasL'],
[0, 1e-2, -5], [1, 15, 5], 'SuttonBarto: softmax'],
['LNP_softmax', ['tau1', 'softmax_temperature', 'biasL'],
[1e-3, 1e-2, -5], [100, 15, 5], 'Sugrue2004, Corrado2005: one tau'],
['LNP_softmax', ['tau1', 'tau2', 'w_tau1', 'softmax_temperature', 'biasL'],
[1e-3, 1e-1, 0, 1e-2, -5], [15, 40, 1, 15, 5], 'Corrado2005, Iigaya2019: two taus'],
['Bari2019', ['learn_rate', 'forget_rate', 'softmax_temperature', 'biasL'],
[0, 0, 1e-2, -5], [1, 1, 15, 5], 'RL: chosen, unchosen, softmax'],
['Hattori2019', ['learn_rate_rew', 'learn_rate_unrew', 'softmax_temperature', 'biasL'],
[0, 0, 1e-2, -5], [1, 1, 15, 5], 'RL: rew, unrew, softmax'],
['Hattori2019', ['learn_rate_rew', 'learn_rate_unrew', 'forget_rate', 'softmax_temperature', 'biasL'],
[0, 0, 0, 1e-2, -5], [1, 1, 1, 15, 5], '(full Hattori) RL: rew, unrew, unchosen, softmax'],
# With bias and choice kernel
['RW1972_softmax_CK', ['learn_rate', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 1e-2, -5, 0, 1e-2], [1, 15, 5, 1, 20], 'SuttonBarto: softmax, choice kernel'],
['LNP_softmax_CK', ['tau1', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[1e-3, 1e-2, -5, 0, 1e-2], [100, 15, 5, 1, 20], 'Sugrue2004, Corrado2005: one tau, choice kernel'],
['LNP_softmax_CK', ['tau1', 'tau2', 'w_tau1', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[1e-3, 1e-1, 0, 1e-2, -5, 0, 1e-2], [15, 40, 1, 15, 5, 1, 20], 'Corrado2005, Iigaya2019: two taus, choice kernel'],
['Bari2019_CK', ['learn_rate', 'forget_rate', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 0, 1e-2, -5, 0, 1e-2], [1, 1, 15, 5, 1, 20], 'RL: chosen, unchosen, softmax, choice kernel'],
['Hattori2019_CK', ['learn_rate_rew', 'learn_rate_unrew', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 0, 1e-2, -5, 0, 1e-2], [1, 1, 15, 5, 1, 20], 'RL: rew, unrew, softmax, choice kernel'],
['Hattori2019_CK', ['learn_rate_rew', 'learn_rate_unrew', 'forget_rate', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 0, 0, 1e-2, -5, 0, 1e-2], [1, 1, 1, 15, 5, 1, 20], 'Hattori + choice kernel'],
['Hattori2019_CK', ['learn_rate_rew', 'learn_rate_unrew', 'forget_rate', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 0, 0, 1e-2, -5, 1, 1e-2], [1, 1, 1, 15, 5, 1, 20], 'choice_step_size fixed at 1 --> Bari 2019: only the last choice matters'],
]
# Parse and insert MODELS
for model_id, model in enumerate(MODELS):
# Insert Model
model_class = [mc for mc in ModelClass.fetch("model_class") if mc in model[0]][0]
is_bias = True if any(['bias' in param for param in model[1]]) else False
is_epsilon_greedy = True if 'epsilon' in model[1] else False
is_softmax = True if 'softmax_temperature' in model[1] else False
is_choice_kernel = True if 'choice_step_size' in model[1] else False
n_params = 0
param_notation = []
# Insert Model
for param, lb, ub in zip(*model[1:4]):
if lb < ub:
n_params += 1 # Only count effective params
param_notation.append((ModelParam & f'model_param="{param}"').fetch1("param_notation"))
else:
param_notation.append((ModelParam & f'model_param="{param}"').fetch1("param_notation") + f'= {lb}')
param_notation = ', '.join(param_notation)
model_notation = f'{model[0]} ({param_notation})'
desc = model[4] if len(model) == 5 else '' # model[0] + ': ' + ', '.join(model[1]) # Use the user-defined string if exists
Model.insert1(dict(model_id=model_id, model_class=model_class, model_notation=model_notation, n_params=n_params,
is_bias=is_bias, is_epsilon_greedy=is_epsilon_greedy, is_softmax=is_softmax, is_choice_kernel=is_choice_kernel,
desc=desc, fit_cmd=model[:4]),
skip_duplicates=True)
# Insert Model.Param
for idx, (param, lb, ub) in enumerate(zip(*model[1:4])):
# The result table should save both effective and fixed params
Model.Param.insert1(dict(model_id=model_id, model_param=param, param_idx=idx,
param_lower_bound=lb, param_higher_bound=ub),
skip_duplicates=True)
return
@schema
class FittedSessionModel(dj.Computed):
definition = """
-> experiment.Session
-> Model
---
n_trials: int
n_params: int
log_likelihood: float # raw log likelihood of the model
aic: float # AIC
bic: float # BIC
lpt: float # Likelihood-Per-Trial raw
lpt_aic: float # Likelihood-Per-Trial with AIC penalty
lpt_bic: float # Likelihood-Per-Trial with AIC penalty
prediction_accuracy: float # non-cross-validated prediction accuracy
cross_valid_accuracy_fit: float # cross-validated accuracy (fitting set)
cross_valid_accuracy_test: float # cross-validated accuracy (testing set)
cross_valid_accuracy_test_bias_only = NULL: float # accuracy predicted only by bias (testing set)
"""
key_source = (foraging_analysis.SessionTaskProtocol() & 'session_task_protocol = 100' & 'session_real_foraging'
) * Model() #& (experiment.Session & 'session_date > "2021-01-01"')
class FittedParam(dj.Part):
definition = """
-> master
-> Model.Param
---
fitted_value: float
"""
class PredictiveChoiceProb(dj.Part):
definition = """
# Could be used to compute latent value Q (for models that have Q). Ignored trial skipped.
-> master
-> experiment.SessionTrial
-> experiment.WaterPort
---
choice_prob: float
"""
def make(self, key):
choice_history, reward_history, p_reward, q_choice_outcome = get_session_history(key)
model_str = (Model & key).fetch('fit_cmd')
# --- Actual fitting ---
model_comparison_this = BanditModelComparison(choice_history, reward_history, model=model_str)
model_comparison_this.fit(pool='', plot_predictive=None, if_verbose=False) # Parallel on sessions, not on DE
model_comparison_this.cross_validate(pool='', k_fold=2, if_verbose=False)
# ------ Grab results ----
fit_result = model_comparison_this.results_raw[0]
cross_valid_result = model_comparison_this.prediction_accuracy_CV.iloc[0]
# Insert session fitted stats
self.insert1(dict(**key,
n_trials=fit_result.n_trials,
n_params=fit_result.k_model,
log_likelihood=fit_result.log_likelihood,
aic=fit_result.AIC,
bic=fit_result.BIC,
lpt=fit_result.LPT,
lpt_aic=fit_result.LPT_AIC,
lpt_bic=fit_result.LPT_BIC,
prediction_accuracy=fit_result.prediction_accuracy,
cross_valid_accuracy_fit=np.mean(cross_valid_result.prediction_accuracy_fit),
cross_valid_accuracy_test= | np.mean(cross_valid_result.prediction_accuracy_test) | numpy.mean |
from scipy.stats import norm, poisson
import numpy as np
def weiner_process(m, N, t0, T):
"""
Function for modelling a Weiner process
Parameters:
m (int): number of sample paths
N (int): number of approximation points
t0 (float): simulation start time
T (float): simulation end time
Returns:
trajectories (numpy.ndarray): a matrix with m lines and N columns where each line corresponds to a different
sample path of a Weiner process from t0 to T, approximated at N points
time (numpy.ndarray): an array equal to numpy.linspace(t0, T, N)
"""
dt = (T - t0) / (N - 1)
trajectories = | np.zeros((m, N)) | numpy.zeros |
#!/usr/bin/python
# vim: set expandtab ts=4 sw=4:
# %% -----------------------------------------------------
#
# This script loads the EMD analyses from one run of the LFP data and creates
# figures 6 and 8. Figure 6 shows a segment of the time-series and associated
# EMD metrics and figure 8 shows the single cycle representation of around 2000
# cycles.
# %% -----------------------------------------------------
# Imports and definitions
import os
import emd
import h5py
import sails
import pandas
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from emd_waveform_utils import config
import matplotlib
matplotlib.rc('font', serif=config['fontname'])
# %% ------------------------------------------------------
emd.logger.set_up(level='DEBUG')
run = 2
run_name = config['recordings'][2]
datafile = os.path.join(config['analysisdir'], run_name + '.hdf5')
F = h5py.File(datafile, 'r')
sample_rate = 1250
imf = F['imf'][...]
IP = F['IP'][...]
IA = F['IA'][...]
IF = F['IF'][...]
speed = F['speed'][...]
metricfile = os.path.join(config['analysisdir'], run_name + '.csv')
df = pandas.read_csv(metricfile)
# Carrier frequency histogram definition
edges, bins = emd.spectra.define_hist_bins(2, 128, 128, 'log')
plot_inds = np.arange(7500+1250, 7500+1250+4*1250)
# %% ------------------------------------------
# Create graphical abstract
TINY_SIZE = 6
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=TINY_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=TINY_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
frames = True
def remove_frames(ax, tags=['top', 'right']):
for tag in tags:
ax.spines[tag].set_visible(False)
start = 41000
inds = np.arange(start, start+1250*1).astype(int)
tt = np.linspace(0, 1, len(inds))
plt.figure(figsize=(14, 10))
ax1 = plt.axes([0.05, .775, .125, .1], frameon=frames)
ax2 = plt.axes([.308, .725, .125, .2], frameon=frames)
ax3 = plt.axes([.5666, .725, .125, .2], frameon=frames)
ax4 = plt.axes([.825, .725, .125, .2], frameon=frames)
ax5 = plt.axes([.06, .35, .2, .125], frameon=frames)
ax1.plot(tt, imf[inds, :].sum(axis=1), 'k')
ax1.plot(tt, np.zeros_like(tt)-500, 'k', linewidth=0.5)
remove_frames(ax1, tags=['top', 'right', 'bottom'])
ax1.set_xlim(tt[0], tt[-1])
ax1.set_xticks([0, 0.5, 1])
ax1.set_xlabel('Time (secs)')
ax1.set_ylabel(r'Amp ($\mu V$)')
ax1.spines['left'].set_bounds(-500, 500)
ax1.set_yticks([-500, 0, 500])
remove_frames(ax2, tags=['top', 'right', 'bottom', 'left'])
ax2.set_xlim(tt[0], tt[-1])
ax2.set_xticks([0, 0.5, 1])
for ii in range(4):
ax2.plot(tt, np.zeros_like(tt)-ii*500, 'k', linewidth=0.5)
ax2.plot((0, 0), (-200-ii*500, 200-ii*500), 'k')
ax2.text(-.015, 200-ii*500, '200', va='center', ha='right', fontsize=TINY_SIZE)
ax2.text(-.015, -200-ii*500, '-200', va='center', ha='right', fontsize=TINY_SIZE)
ax2.set_yticks([])
ax2.plot(tt, imf[inds, 2:6] - np.arange(0, 2000, 500)[None, :])
ax2.set_ylabel(r'Amp ($\mu V$)', labelpad=20)
ax2.set_xlabel('Time (secs)')
ip = IP[inds, 5]
ip[np.gradient(ip) < -2] = np.nan
remove_frames(ax3, tags=['top', 'right', 'left'])
ax3.set_yticks([])
ax3.plot(tt, ip)
ax3.set_xlim(tt[0], tt[-1])
ax3.set_xticks([0, 0.5, 1])
ax3.set_xlabel('Time (secs)')
ax3.plot(tt, IF[inds, 5]-14)
ax3.plot((0, 0), (0, np.pi*2), 'k')
ax3.plot((0, 0), (4-14, 10-14), 'k')
ax3.text(-.015, np.pi*2, r'2$\pi$', va='center', ha='right', fontsize=TINY_SIZE)
ax3.text(-.015, 0, r'0', va='center', ha='right', fontsize=TINY_SIZE)
ax3.text(-.015, 10-14, '10', va='center', ha='right', fontsize=TINY_SIZE)
ax3.text(-.015, 7-14, '7', va='center', ha='right', fontsize=TINY_SIZE)
ax3.text(-.015, 4-14, '4', va='center', ha='right', fontsize=TINY_SIZE)
ax3.text(-.1, 7-14, 'Instantaneous\nFrequency (Hz)', va='center', ha='right', fontsize=SMALL_SIZE, rotation=90)
ax3.text(-.1, np.pi, 'Instantaneous\nPhase (rads)', va='center', ha='right', fontsize=SMALL_SIZE, rotation=90)
inds = np.arange(start, start+1250*4).astype(int)
tt = np.linspace(0, 4, len(inds))
ax4.fill_between(tt, speed[inds], 0, alpha=0.5)
ax4.plot((tt[0], tt[-1]), (2, 2), 'k--')
ii = imf[inds, 5]/100 - 3.5
ax4.plot(tt, ii, 'k')
ii[speed[inds] > 2] = np.nan
ax4.plot(tt, ii, 'r')
ax4.set_xlabel('Time (secs)')
ax4.set_xlim(tt[0], tt[-1])
ax4.set_xticks([0, 1, 2, 3, 4])
ax4.set_yticks([])
remove_frames(ax4, tags=['top', 'right', 'left'])
ax4.plot((0, 0), (0, 5), 'k')
ax4.plot((0, 0), (-5.5, -1.5), 'k')
ax4.text(-.03, 0, '0', va='center', ha='right', fontsize=TINY_SIZE)
ax4.text(-.03, 2, '2', va='center', ha='right', fontsize=TINY_SIZE)
ax4.text(-.03, 4, '4', va='center', ha='right', fontsize=TINY_SIZE)
ax4.text(-.015, -1.5, '200', va='center', ha='right', fontsize=TINY_SIZE)
ax4.text(-.015, -3.5, '0', va='center', ha='right', fontsize=TINY_SIZE)
ax4.text(-.015, -5.5, '-200', va='center', ha='right', fontsize=TINY_SIZE)
ax4.text(-.4, -3.5, 'Amp. ($\mu$V)', va='center', ha='right', fontsize=SMALL_SIZE, rotation=90)
ax4.text(-.4, 2.5, 'Movement\nSpeed (cm/s)', va='center', ha='right', fontsize=SMALL_SIZE, rotation=90)
start = 41000
inds = np.arange(start, start+1250*1).astype(int)
tt = np.linspace(0, 4, len(inds))
C = emd.cycles.Cycles(IP[inds, 5], compute_timings=True)
C.compute_cycle_metric('peak', imf[inds, 5], emd.cycles.cf_peak_sample)
C.compute_cycle_metric('desc', imf[inds, 5], emd.cycles.cf_descending_zero_sample)
C.compute_cycle_metric('trough', imf[inds, 5], emd.cycles.cf_trough_sample)
df_abs = C.get_metric_dataframe()
ax5.plot(imf[inds, 5], 'k')
for ii in range(1, len(df_abs)-1):
st = df_abs['start_sample'].values[ii]
pk = st + df_abs['peak'].values[ii]
ax5.plot(pk, imf[inds[int(pk)], 5], '^r')
tr = st + df_abs['trough'].values[ii]
ax5.plot(tr, imf[inds[int(tr)], 5], 'vb')
asc = st + df_abs['desc'].values[ii]
ax5.plot(asc, imf[inds[int(asc)], 5], 'oc')
desc = st
ax5.plot(desc, imf[inds[int(desc)], 5], 'om')
if ii == 1:
plt.legend(['Oscillation', 'Peak', 'Trough', 'Descending Zero', 'Ascending Zero'], frameon=False, bbox_to_anchor=(0.5, -1), loc='center')
remove_frames(ax5, tags=['top', 'right'])
ax5.set_xlim(tt[0], tt[-1])
ax5.set_xticks(np.linspace(0, len(tt), 5))
ax5.set_xticklabels(np.arange(5))
ax5.set_xlabel('Time (secs)')
ax5.set_ylabel(r'Amp ($\mu V$)')
ax5.spines['left'].set_bounds(-300, 300)
ax6 = plt.axes([0.35, 0.42, 0.1, 0.1])
ax7 = plt.axes([0.35, 0.2, 0.1, 0.2])
ax8 = plt.axes([0.495, 0.42, 0.1, 0.1])
ax9 = plt.axes([0.495, 0.2, 0.1, 0.2])
pa = emd.cycles.phase_align(IP[inds, 5], IF[inds, 5], cycles=C)
cind = (3, 7)
ax6.plot(imf[inds[C._slice_cache[cind[0]]], 5], 'r')
ax6.plot(imf[inds[C._slice_cache[cind[1]]], 5], 'b')
remove_frames(ax6, tags=['top', 'right', 'bottom'])
ax6.set_ylabel(r'Amp ($\mu V$)')
ax6.set_xticks([])
ax6.spines['left'].set_bounds(-200, 200)
ax7.plot(IF[inds[C._slice_cache[cind[0]]], 5], 'r')
ax7.plot(IF[inds[C._slice_cache[cind[1]]], 5], 'b')
remove_frames(ax7, tags=['top', 'right'])
ax7.set_xlabel('Time (secs)')
ax7.set_ylabel('Instantaneous\nFrequency (Hz)', rotation=90, fontsize=SMALL_SIZE)
ax8.plot(np.sin(2*np.pi*np.linspace(0, 1)), 'r')
ax8.plot(np.sin(2*np.pi*np.linspace(0, 1)), 'b--')
remove_frames(ax8, tags=['top', 'right', 'bottom'])
ax8.set_ylabel(r'Amp (a.u.)')
ax8.set_xticks([])
ax8.spines['left'].set_bounds(-1, 1)
ax9.plot(pa[0][:, cind[0]], 'r')
ax9.plot(pa[0][:, cind[1]], 'b')
remove_frames(ax9, tags=['top', 'right'])
ax9.set_xlabel('Phase (rads)')
ax9.set_xticks(np.linspace(0, 48, 3))
ax9.set_xticklabels(['0', r'$\pi$', r'2$\pi$'])
inds = np.arange(start, start+1250*12).astype(int)
C = emd.cycles.Cycles(IP[inds, 5], compute_timings=True)
pa, _ = emd.cycles.phase_align(IP[inds, 5], IF[inds, 5], cycles=C)
pa = pa[:, np.isfinite(pa.mean(axis=0))]
goods = np.logical_and((pa.min(axis=0) > 3), (pa.mean(axis=0) <10))
ax10 = plt.axes([0.675, 0.25, .1, .25])
im = ax10.pcolormesh(pa[:, goods].T, vmin=5, vmax=12)
cb = plt.colorbar(im)
cb.set_label('Instantaneous\nFrequency (Hz)')
ax10.set_xlabel('Phase (rads)')
ax10.set_xticks(np.linspace(0, 48, 3))
ax10.set_xticklabels(['0', r'$\pi$', r'2$\pi$'])
ax10.set_ylabel('Cycles')
ax11 = plt.axes([0.9, 0.425, 0.093, 0.12])
ax12 = plt.axes([0.9, 0.25, 0.093, 0.12])
ax13 = plt.axes([0.9, 0.075, 0.093, 0.12])
samples_per_cycle = 480
ncycles = 6
ph = np.linspace(0, np.pi*2*ncycles, samples_per_cycle*ncycles)
t = np.linspace(0, ncycles, samples_per_cycle*ncycles)
basis = np.c_[np.zeros_like(ph),
0.9*np.cos(2*np.pi*1*t)[:, None],
-0.9*np.cos(2*np.pi*1*t)[:, None],
1.55*np.sin(2*np.pi*1*t)[:, None],
-1.55*np.sin(2*np.pi*1*t)[:, None],
np.sin(2*np.pi*2*t)[:, None],
-0.8*np.sin(2*np.pi*2*t)[:, None]]
basis = basis * 1/4
phs = ph[:, None] + basis
X = np.sin(phs)
IP2, IF2, IA2 = emd.spectra.frequency_transform(X, samples_per_cycle, 'hilbert')
cycles = emd.cycles.get_cycle_vector(IP2, return_good=True)
lin_inds = cycles[:, 0] == 1
inds = cycles[:, 1] == 2
ax11.plot(np.linspace(0, 1, inds.sum()), np.sin(phs[inds, 1]))
inds = cycles[:, 2] == 2
ax11.plot(np.linspace(0, 1, inds.sum()), np.sin(phs[inds, 2]))
remove_frames(ax11, tags=['top', 'right'])
ax11.set_yticks([-1, 0, 1])
ax11.set_ylabel('Amp (a.u.)')
ax11.set_xlim(0, 1)
ax11.set_xticks([0, 1])
ax11.set_title('Motif 1', fontsize=MEDIUM_SIZE)
ax11.spines['left'].set_bounds(-1, 1)
inds = cycles[:, 3] == 2
ax12.plot(np.linspace(0, 1, inds.sum()), np.sin(phs[inds, 3]))
inds = cycles[:, 4] == 2
ax12.plot(np.linspace(0, 1, inds.sum()), np.sin(phs[inds, 4]))
remove_frames(ax12, tags=['top', 'right'])
ax12.set_yticks([-1, 0, 1])
ax12.set_xlim(0, 1)
ax12.set_ylabel('Amp (a.u.)')
ax12.set_xticks([0, 1])
ax12.set_title('Motif 2', fontsize=MEDIUM_SIZE)
ax12.spines['left'].set_bounds(-1, 1)
inds = cycles[:, 5] == 2
ax13.plot(np.linspace(0, 1, inds.sum()), np.sin(phs[inds, 5]))
inds = cycles[:, 6] == 2
ax13.plot(np.linspace(0, 1, inds.sum()), np.sin(phs[inds, 6]))
remove_frames(ax13, tags=['top', 'right'])
ax13.set_xlabel('Cycle Duration', fontsize=SMALL_SIZE)
ax13.set_yticks([-1, 0, 1])
ax13.set_ylabel('Amp (a.u.)')
ax13.set_xlim(0, 1)
ax13.set_xticks([0, 1])
ax13.set_title('Motif 3', fontsize=MEDIUM_SIZE)
ax13.spines['left'].set_bounds(-1, 1)
outname = os.path.join(config['figdir'], 'emd_fig1_graphicalabstract.png')
plt.savefig(outname, dpi=300, transparent=True)
plt.style.use('default')
# %% ------------------------------------------
# Create figure 5 time-series
width = config['3col_width'] / 25.4
height = width * .6
plot_horiz = True
sparse_horiz = True
plot_vert = True
fontsize_side = 'large'
fontsize_tick = 10
horiz_width = .35
inds = np.arange(20230, 20000+1250*3).astype(int)
start = 193000
start = 41000
inds = np.arange(start, start+1250*2.8).astype(int)
cmap = plt.cm.Set1
cols = cmap(np.linspace(0, 1, 8))
cols[4, :] = [.5, .5, .2, 1]
indx = [5, 1, 2, 3, 4, 0, 6, 7]
cols = cols[indx, :]
plt.figure(figsize=(width*2, height*2))
plt.axes([.08, .025, .95, .95], frameon=False)
plt.xticks([])
plt.yticks([])
# Plot Data
plt.plot(imf[inds, :6].sum(axis=1), color=[.2, .2, .2], linewidth=.5)
plt.plot(imf[inds, 5], color=cols[5, :], linewidth=1)
plt.plot([0, 0], [-350, 350], 'k')
plt.text(-250, 80, 'LFP', fontsize=fontsize_side,
verticalalignment='center', horizontalalignment='center')
plt.text(-250, 600, 'Cycle No', fontsize=fontsize_side,
verticalalignment='center', horizontalalignment='center')
plt.text(-250, -80, 'Theta', fontsize=fontsize_side,
verticalalignment='center', horizontalalignment='center', color='r')
plt.plot([1.9*1250, 2.9*1250], [800, 800], 'k')
plt.text(2.4*1250, 825, '1 Second', horizontalalignment='center',
verticalalignment='bottom', fontsize=fontsize_side)
# Plot IMFs
step = -500
labels = ['IMF1', 'IMF2', 'IMF3', 'IMF4', 'IMF5', 'IMF6', 'IMF7+']
for ii in range(7):
yind = -300*(1+ii)+step
if plot_horiz:
plt.plot([-10, len(inds)], [yind, yind], color=[.7, .7, .7], linewidth=horiz_width)
plt.plot([-10, 0], [yind, yind], 'k')
if ii < 6:
plt.plot(.5*imf[inds, ii]+yind, color=cols[ii, :])
else:
plt.plot(.5*imf[inds, ii:].sum(axis=1)+yind, color=cols[ii, :])
plt.text(-22, yind, labels[ii], fontsize=fontsize_tick, verticalalignment='center', horizontalalignment='right')
plt.plot([0, 0], [-2800, -600], 'k')
plt.text(-275, -300*(1+3)+step, 'IMFs', fontsize=fontsize_side, verticalalignment='center', horizontalalignment='center')
# Instantaneous Phase
labels = [r'$-\pi$', r'$0$', r'$\pi$']
for ii in range(3):
yind = -3500+ii*75*((2*np.pi)/2)
if sparse_horiz and ii == 1:
plt.plot([-10, len(inds)], [yind, yind], color=[.7, .7, .7], linewidth=horiz_width)
elif plot_horiz and not sparse_horiz:
plt.plot([-10, len(inds)], [yind, yind], color=[.7, .7, .7], linewidth=horiz_width)
plt.plot([-10, 0], [yind, yind], color='k')
plt.text(-22, yind, labels[ii], fontsize=fontsize_tick, verticalalignment='center', horizontalalignment='right')
plt.plot([0, 0], [-3500, -3500+2*np.pi*75], 'k')
ip = IP[inds, 5]
naninds = np.where(np.diff(ip) < -5.5)[0]+1
ip[naninds] = np.nan
plt.plot(ip*75 - 3500, linewidth=1.5)
plt.text(-300, -3500+1*75*((2*np.pi)/2), 'Instantaneous\nPhase (rads)', fontsize=fontsize_side,
verticalalignment='center', horizontalalignment='center')
# Instantaneous Frequency
if_to_plot = IF[inds, 5]
ymin_f = np.nanmin(np.round(if_to_plot))
ymin = np.nanmin(ymin_f*40 - 4200)
ymax_f = np.nanmax(np.round(if_to_plot))
ymax = np.nanmin(ymax_f*40 - 4200)
plt.plot([0, 0], [ymin, ymax], 'k')
indx = np.linspace(ymin, ymax, 3)
indx_f = np.linspace(ymin_f, ymax_f, 3)
for ii in range(3):
if sparse_horiz and ii == 1:
plt.plot([-10, len(inds)], [indx[ii], indx[ii]], color=[.7, .7, .7], linewidth=horiz_width)
elif plot_horiz and not sparse_horiz:
plt.plot([-10, len(inds)], [indx[ii], indx[ii]], color=[.7, .7, .7], linewidth=horiz_width)
plt.plot([-10, 0], [indx[ii], indx[ii]], color='k')
plt.text(-22, indx[ii], indx_f[ii], fontsize=fontsize_tick, verticalalignment='center', horizontalalignment='right')
plt.plot(if_to_plot*40 - 4200)
plt.text(-300, indx[1], 'Instantaneous\nFrequency (Hz)', fontsize=fontsize_side, verticalalignment='center', horizontalalignment='center')
# Plot cycle bounds and compute within cycle frequency variability
cycles_to_plot = emd.cycles.get_cycle_vector(IP[inds, 5, None])
cycle_starts = np.where(np.diff(cycles_to_plot, axis=0))[0]
cm = np.zeros_like(inds)*np.nan
cv = np.zeros_like(inds)*np.nan
for ii in range(len(cycle_starts)):
if plot_vert:
plt.plot((cycle_starts[ii], cycle_starts[ii]), (-4600, 350), color=[.8, .8, .8], linewidth=.5)
if ii < len(cycle_starts)-1:
cm[cycle_starts[ii]:cycle_starts[ii+1]] = IF[inds[cycle_starts[ii]:cycle_starts[ii+1]], 5].mean()
cv[cycle_starts[ii]:cycle_starts[ii+1]] = IF[inds[cycle_starts[ii]:cycle_starts[ii+1]], 5].std()
plt.text((cycle_starts[ii]+cycle_starts[ii+1])/2, 600, ii+1,
fontsize=fontsize_tick, verticalalignment='center', horizontalalignment='center')
# Within cycle frequency variability
plt.fill_between(np.arange(len(inds)), cv*1e2 - 4600, np.ones_like(inds)-4601)
plt.plot((0, 0), (-4601, -4601+300), 'k')
plt.plot([-15, len(inds)], (-4601, -4601), color=[.7, .7, .7], linewidth=.5)
indx = np.linspace(0, 3, 4)*1e2 - 4600
indx_lab = np.round(np.linspace(0, 3, 4), 2).astype(int)
for ii in range(4):
if plot_horiz and sparse_horiz is False:
plt.plot([-10, len(inds)], (indx[ii], indx[ii]), color=[.7, .7, .7], linewidth=horiz_width)
elif ii == 0:
plt.plot([-10, len(inds)], (indx[ii], indx[ii]), color=[.7, .7, .7], linewidth=horiz_width)
plt.plot((-10, 0), (-4601+100*ii, -4601+100*ii), 'k')
plt.text(-22, indx[ii], indx_lab[ii], fontsize=fontsize_tick,
verticalalignment='center', horizontalalignment='right')
plt.text(-300, indx[1:3].mean(), 'Instantaneous\nFrequency\nStd-Dev', fontsize=fontsize_side,
verticalalignment='center', horizontalalignment='center')
outname = os.path.join(config['figdir'], 'emd_fig6_real_sift.png')
plt.savefig(outname, dpi=300, transparent=True)
# %% --------------------------------------------------------------------
# Create figure 5 - Supplemental
inds2 = inds[:600]
tx = np.linspace(0, 2, 512)
plt.figure(figsize=(14, 10))
plt.subplots_adjust(hspace=0.3)
# Harmonic
plt.subplot(221)
a = np.sin(2*np.pi*tx)
b = np.sin(2*np.pi*2*tx)
plt.plot(tx, a)
plt.plot(tx, b)
plt.plot(tx, a+b-3)
plt.ylim(-5, 3)
plt.legend(['Base Signal', 'High Freq Signal', 'Summed Signal'], frameon=False, fontsize='large')
for tag in ['top', 'right', 'left']:
plt.gca().spines[tag].set_visible(False)
plt.yticks([])
plt.title('Simulation A')
plt.xlabel('Time (Seconds)')
plt.subplot(222)
b = 0.2*np.sin(2*np.pi*2*tx)
plt.plot(tx, a)
plt.plot(tx, b)
plt.plot(tx, a+b-3)
plt.ylim(-5, 3)
plt.legend(['Base Signal', 'Harmonic', 'Summed Signal'], frameon=False, fontsize='large')
for tag in ['top', 'right', 'left']:
plt.gca().spines[tag].set_visible(False)
plt.yticks([])
plt.title('Simulation B')
plt.xlabel('Time (Seconds)')
plt.subplot(212)
plt.plot(imf[inds2, :].sum(axis=1), label='Raw Signal')
plt.plot(imf[inds2, 5]-500, label='IMF-6')
plt.plot(imf[inds2, 4]-500, label='IMF-5')
plt.plot(imf[inds2, 4]+imf[inds2, 5]-1000, label='IMF-5 + IMF-6')
plt.legend(frameon=False, fontsize='large')
for tag in ['top', 'right', 'left']:
plt.gca().spines[tag].set_visible(False)
plt.yticks([])
plt.xticks(np.arange(5)*125, np.arange(5)*100)
plt.xlabel('Time (milliseconds)')
plt.title('Real Data')
outname = os.path.join(config['figdir'], 'emd_fig6_supplemental_zoom.png')
plt.savefig(outname, dpi=300, transparent=True)
# %% --------------------------------------------------------------------
# Create figure 6 - spectra
edges, bins = emd.spectra.define_hist_bins(2, 35, 64, 'linear')
cwt = sails.wavelet.morlet(imf[inds, :6].sum(axis=1), bins, sample_rate, normalise='simple', ret_mode='amplitude')
hht = emd.spectra.hilberthuang(IF[inds, :6], IA[inds, :6], edges, mode='amplitude')
hht = ndimage.gaussian_filter(hht, 1)
t = np.arange(len(inds))
plt.figure(figsize=(width*1.925, height*1.25))
plt.axes([.13, .55, .855, .425], frameon=True)
pcm = plt.pcolormesh(t, bins, hht, cmap='hot_r')
for ii in range(len(cycle_starts)):
if plot_vert:
plt.plot((cycle_starts[ii], cycle_starts[ii]), (2, 100), color=[.8, .8, .8], linewidth=.5)
plt.ylim(2, 35)
plt.xticks(np.arange(0, len(inds), sample_rate/2), [])
plt.ylabel('Frequency (Hz)')
for tag in ['top', 'right']:
plt.gca().spines[tag].set_visible(False)
plt.ylabel('Frequency (Hz)'); plt.xlabel('')
ax = plt.axes([.97, .65, .015, .3])
cb = plt.colorbar(pcm, cax=ax)
ax.yaxis.set_ticks_position('left')
cb.set_label('Power')
plt.axes([.13, .095, .855, .425], frameon=True)
pcm = plt.pcolormesh(t, bins, cwt, cmap='hot_r')
for ii in range(len(cycle_starts)):
if plot_vert:
plt.plot((cycle_starts[ii], cycle_starts[ii]), (2, 100), color=[.8, .8, .8], linewidth=.5)
plt.ylim(2, 35)
plt.xticks(np.arange(0, len(inds), sample_rate/2), np.arange(0, len(inds), sample_rate/2)/sample_rate)
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (seconds)')
for tag in ['top', 'right']:
plt.gca().spines[tag].set_visible(False)
plt.ylabel('Frequency (Hz)'); plt.xlabel('Time (seconds)')
ax = plt.axes([.97, .195, .015, .3])
cb = plt.colorbar(pcm, cax=ax)
ax.yaxis.set_ticks_position('left')
cb.set_label('Power')
outname = os.path.join(config['figdir'], 'emd_fig6_real_sift_spec.png')
plt.savefig(outname, dpi=300, transparent=True)
# %% --------------------------------------------------------------------
# Create Figure 8
def decorate_ax(ax):
for tag in ['top', 'right']:
ax.spines[tag].set_visible(False)
waveform = F['zc_waveform'][...]
instfreq = F['zc_instfreq'][...]
pa = F['pa'][...]
ctrl = np.c_[np.zeros_like(df['start_sample']),
df['peak_sample'],
df['desc_sample'],
df['trough_sample'],
df['duration_samples']]
ctrl_mets = np.c_[df['peak2trough'], df['asc2desc']].T
I = np.argsort(ctrl[:, 4])[::-1]
segments = np.zeros((ctrl.shape[0], 400))*np.nan
for ii in range(ctrl.shape[0]):
for jj in range(1, ctrl.shape[1]):
segments[ii, int(np.round(ctrl[ii, jj-1])):int(np.round(ctrl[ii, jj]))] = jj
# Remove cycles with ambiguous peaks
goods = np.setdiff1d(np.arange(segments.shape[0]), np.where(segments[:, 0]==4)[0])
segments = segments[goods, :]
I = np.argsort(ctrl[goods, 4])[::-1]
ctrl_mets = ctrl_mets[:, goods]
pa = pa[:, goods]
instfreq = instfreq[:, goods]
trim = 2700 # Can't see anything if we plot every cycle...
I = I[:-trim]
width = config['2col_width'] / 25.4
height = config['3col_width'] / 25.4
# Figure start
plt.figure(figsize=(width*2, height*2))
# Plot control point segments
plt.axes([.1, .1, .2, .65])
plt.pcolormesh(segments[I, :])
plt.xticks(np.linspace(0, 200, 5), (np.linspace(0, 200, 5)/sample_rate*1000).astype(int))
plt.xlabel('Time (ms)')
plt.xlim(0, 250)
plt.ylabel('# Cycle (Sorted by duration)')
decorate_ax(plt.gca())
plt.axes([.1, .775, .144, .075], frameon=False)
plt.xticks([]);
plt.yticks([])
cols = plt.cm.viridis(np.linspace(0, 1, 4))
for ii in range(4):
xvals = np.linspace(0, .25)+.25*ii
plt.plot(xvals, np.sin(2*np.pi*xvals), linewidth=3, color=cols[ii, :])
# Plot control point metrics
plt.axes([.31, .1, .1, .65])
plt.plot(ctrl_mets[0][I], np.arange(len(ctrl_mets[0])-trim), '.')
plt.plot(ctrl_mets[1][I], np.arange(len(ctrl_mets[0])-trim), '.')
plt.plot(np.zeros_like(ctrl_mets[1][I]), np.arange(len(ctrl_mets[0])-trim), 'k', linewidth=.5)
plt.xlim(0, 1)
plt.ylim(0, len(ctrl_mets[0])-trim)
plt.yticks([])
decorate_ax(plt.gca())
plt.gca().spines['left'].set_visible(False)
plt.axes([.31, .775, .1, .15])
plt.hist(ctrl_mets[0][I], np.linspace(-1, 1), alpha=.5)
plt.hist(ctrl_mets[1][I], np.linspace(-1, 1), alpha=.5)
plt.xticks(np.linspace(-.25, .25, 3), [])
plt.legend(['Peak/Trough', 'Ascent/Descent'], frameon=False,
fontsize=8, loc='center', bbox_to_anchor=(0.5, 0.5, 1, 1))
decorate_ax(plt.gca())
plt.xlim(0, 1)
plt.ylim(0, 800)
plt.title('Control-Point Ratios\n')
# Plot temporally aligned instantaneous frequency
plt.axes([.5, .1, .2, .65])
plt.pcolormesh(instfreq[:, I].T, vmin=6, vmax=14)
decorate_ax(plt.gca())
plt.xticks(np.linspace(0, 200, 5), (np.linspace(0, 200, 5)/sample_rate*1000).astype(int))
plt.xlabel('Time (ms)')
plt.xlim(0, 250)
plt.axes([.5, .775, .2, .15])
plt.plot(np.nanmean(instfreq, axis=1))
decorate_ax(plt.gca())
plt.title('Cycle-Onset Aligned\nInst. Freq')
plt.xticks(np.linspace(0, 200, 5), [])
plt.xlim(0.60)
# Plot phase aligned instantaneous frequency
plt.axes([.75, .1, .2, .65])
pcm = plt.pcolormesh(pa[:, I].T, vmin=6, vmax=14)
plt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
plt.xlabel('Theta Phase')
plt.yticks(np.arange(8)*200, [])
plt.axes([.75, .775, .2, .15])
plt.plot(np.nanmean(pa, axis=1))
plt.xlim(0, 48)
decorate_ax(plt.gca())
plt.xticks(np.arange(5)*12, [])
plt.title('Phase-Aligned\nInst. Freq')
# Inst. freq colourbar
ax = plt.axes([.685, .45, .015, .18])
cb = plt.colorbar(pcm, cax=ax)
ax.yaxis.set_ticks_position('left')
plt.title('Instantaneous\nFrequency (Hz)', fontsize=9)
outname = os.path.join(config['figdir'], 'emd_fig8_real_phasealign.png')
plt.savefig(outname, dpi=300, transparent=True)
# %% --------------------------------------------------------------------
# Create Figure 8 - REVISED
def decorate_ax(ax):
for tag in ['top', 'right']:
ax.spines[tag].set_visible(False)
waveform = F['zc_waveform'][...]
instfreq = F['zc_instfreq'][...]
pa = F['pa'][...]
ctrl = np.c_[np.zeros_like(df['start_sample']),
df['peak_sample'],
df['desc_sample'],
df['trough_sample'],
df['duration_samples']]
ctrl_mets = np.c_[df['peak2trough'], df['asc2desc']].T
I = np.argsort(ctrl[:, 4])[::-1]
segments = np.zeros((ctrl.shape[0], 400))*np.nan
for ii in range(ctrl.shape[0]):
for jj in range(1, ctrl.shape[1]):
segments[ii, int(np.round(ctrl[ii, jj-1])):int(np.round(ctrl[ii, jj]))] = jj
# Remove cycles with ambiguous peaks
goods = np.setdiff1d(np.arange(segments.shape[0]), np.where(segments[:, 0]==4)[0])
segments = segments[goods, :]
I = np.argsort(ctrl[goods, 4])[::-1]
ctrl_mets = ctrl_mets[:, goods]
pa = pa[:, goods]
instfreq = instfreq[:, goods]
trim = 2700 # Can't see anything if we plot every cycle...
I = I[:-trim]
I2 = I[::15]
width = config['2col_width'] / 25.4
height = config['3col_width'] / 25.4
col_height = 0.45
top_height = 0.3
# Figure start
plt.figure(figsize=(width*3, height*2))
# Plot control point segments
plt.axes([.1, .1, .2, col_height])
#plt.pcolormesh(segments[I2, :])
plt.plot(ctrl[I2, 1], np.arange(len(I2)), '^')
plt.plot(ctrl[I2, 2], np.arange(len(I2)), 'x')
plt.plot(ctrl[I2, 3], np.arange(len(I2)), 'v')
plt.plot(ctrl[I2, 4], np.arange(len(I2)), '.')
plt.legend(['Peak', 'Desc', 'Trough', 'Asc'], frameon=False, loc='center', bbox_to_anchor=(0.4, 0.2, 1, 1))
plt.xticks(np.linspace(0, 200, 5), (np.linspace(0, 200, 5)/sample_rate*1000).astype(int))
plt.xlabel('Time (ms)')
plt.xlim(0, 250)
plt.ylim(0, len(I2))
plt.ylabel('# Cycle (Sorted by duration)')
decorate_ax(plt.gca())
plt.axes([.1, .6, .2, top_height-0.05])
plt.plot((0.5, 0.5), (0, 800), 'k--')
plt.hist(ctrl_mets[0][I], np.linspace(-1, 1), alpha=.5)
plt.hist(ctrl_mets[1][I], np.linspace(-1, 1), alpha=.5)
#plt.xticks(np.linspace(-.25, .25, 3))
plt.legend(['Sinusoid', 'Peak/Trough', 'Ascent/Descent'], frameon=False,
fontsize=10, loc='center', bbox_to_anchor=(0.5, 0.4, 1, 1))
decorate_ax(plt.gca())
plt.xlim(0, 1)
plt.ylim(0, 800)
plt.title('Control-Point Ratios\n')
plt.xlabel('Ratio')
plt.ylabel('Num Cycles')
# Plot temporally aligned instantaneous frequency
plt.axes([.425, .1, .2, col_height])
plt.imshow(instfreq[:, I2].T, interpolation='nearest', vmin=6, vmax=12, origin='lower', aspect='auto')
decorate_ax(plt.gca())
plt.xticks(np.linspace(0, 200, 5), (np.linspace(0, 200, 5)/sample_rate*1000).astype(int))
plt.xlabel('Time (ms)')
plt.xlim(0, 250)
plt.axes([.425, .6, .2, top_height/2])
mn = np.nanmean(instfreq[:, I], axis=1)
sem = np.nanstd(instfreq[:, I], axis=1)
sem = sem / np.sqrt(np.sum(np.isnan(instfreq[:, I])==False, axis=1))
plt.errorbar(np.arange(313), mn, yerr=sem, errorevery=4)
decorate_ax(plt.gca())
plt.xticks(np.linspace(0, 200, 5), (np.linspace(0, 200, 5)/sample_rate*1000).astype(int))
plt.xlim(0, 250)
plt.legend(['Avg IF (std-error of mean)'], loc='center', bbox_to_anchor=(0.3, 0.5, 1, 1), frameon=False)
plt.ylabel('Instantaneous\nFrequency (Hz)')
plt.axes([.425, .8, .2, 0.075])
plt.plot(np.nanmean(waveform[:, I], axis=1), 'k')
for tag in ['top', 'right', 'bottom']:
plt.gca().spines[tag].set_visible(False)
plt.xticks([])
plt.ylim(-200, 200)
plt.xlim(0, 250)
plt.legend(['Avg Waveform'], loc='center', bbox_to_anchor=(0.3, 0.5, 1, 1), frameon=False)
plt.ylabel(r'Amplitude ($\mu$V)')
plt.title('Cycle-Onset Alignment\n\n')#\nInstantaneous. Frequency\n(std-error of mean)')
# Plot phase aligned instantaneous frequency
plt.axes([.75, .1, .2, col_height])
pcm = plt.imshow(pa[:, I2].T, interpolation='nearest', vmin=6, vmax=12, origin='lower', aspect='auto')
plt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
plt.xlabel('Theta Phase (rads)')
decorate_ax(plt.gca())
plt.axes([.75, .6, .2, top_height/2])
mn = np.nanmean(pa[:, I], axis=1)
sem = np.nanstd(pa[:, I], axis=1) / | np.sqrt(I.shape[0]) | numpy.sqrt |
import numpy as np
from tqdm import tqdm
import utils.helper as hlp
def slidewindow(ts, horizon=.2, stride=0.2):
xf = []
yf = []
for i in range(0, ts.shape[0], int(stride * ts.shape[0])):
horizon1 = int(horizon * ts.shape[0])
if (i + horizon1 + horizon1 <= ts.shape[0]):
xf.append(ts[i:i + horizon1,0])
yf.append(ts[i + horizon1:i + horizon1 + horizon1, 0])
xf = np.asarray(xf)
yf = np.asarray(yf)
return xf, yf
def cutPF(ts, perc=.5):
seq_len = ts.shape[0]
new_ts = ts.copy()
t=int(perc*seq_len)
return new_ts[:t, ...], new_ts[t:, ...]
def cutout(ts, perc=.1):
seq_len = ts.shape[0]
new_ts = ts.copy()
win_len = int(perc * seq_len)
start = np.random.randint(0, seq_len-win_len-1)
end = start + win_len
start = max(0, start)
end = min(end, seq_len)
# print("[INFO] start={}, end={}".format(start, end))
new_ts[start:end, ...] = 0
# return new_ts, ts[start:end, ...]
return new_ts
def cut_piece2C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len/(2*2)
if perc<1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len-win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1-start2)<(win_class):
label=0
else:
label=1
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece3C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len/(2*3)
if perc<1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len-win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1-start2)<(win_class):
label=0
elif abs(start1-start2)<(2*win_class):
label=1
else:
label=2
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece4C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 4)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
else:
label = 3
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece5C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 5)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
else:
label = 4
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece6C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 6)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
else:
label = 5
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece7C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 7)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
elif abs(start1 - start2) < (6 * win_class):
label = 5
else:
label = 6
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece8C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 8)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
elif abs(start1 - start2) < (6 * win_class):
label = 5
elif abs(start1 - start2) < (7 * win_class):
label = 6
else:
label = 7
return ts[start1:end1, ...], ts[start2:end2, ...], label
def jitter(x, sigma=0.03):
# https://arxiv.org/pdf/1706.00527.pdf
return x + np.random.normal(loc=0., scale=sigma, size=x.shape)
def scaling(x, sigma=0.1):
# https://arxiv.org/pdf/1706.00527.pdf
factor = np.random.normal(loc=1., scale=sigma, size=(x.shape[0],x.shape[2]))
return np.multiply(x, factor[:,np.newaxis,:])
def rotation(x):
flip = np.random.choice([-1, 1], size=(x.shape[0],x.shape[2]))
rotate_axis = np.arange(x.shape[2])
np.random.shuffle(rotate_axis)
return flip[:,np.newaxis,:] * x[:,:,rotate_axis]
def scaling_s(x, sigma=0.1, plot=False):
# https://arxiv.org/pdf/1706.00527.pdf
factor = np.random.normal(loc=1., scale=sigma, size=(1, x.shape[1]))
x_ = np.multiply(x, factor[:, :])
if plot:
hlp.plot1d(x, x_, save_file='aug_examples/scal.png')
return x_
def rotation_s(x, plot=False):
flip = np.random.choice([-1], size=(1, x.shape[1]))
rotate_axis = np.arange(x.shape[1])
np.random.shuffle(rotate_axis)
x_ = flip[:, :] * x[:, rotate_axis]
if plot:
hlp.plot1d(x, x_, save_file='aug_examples/rotation_s.png')
return x_
def rotation2d(x, sigma=0.2):
thetas = np.random.normal(loc=0, scale=sigma, size=(x.shape[0]))
c = np.cos(thetas)
s = np.sin(thetas)
ret = np.zeros_like(x)
for i, pat in enumerate(x):
rot = np.array(((c[i], -s[i]), (s[i], c[i])))
ret[i] = np.dot(pat, rot)
return ret
def permutation(x, max_segments=5, seg_mode="equal"):
orig_steps = np.arange(x.shape[1])
num_segs = np.random.randint(1, max_segments, size=(x.shape[0]))
ret = np.zeros_like(x)
for i, pat in enumerate(x):
if num_segs[i] > 1:
if seg_mode == "random":
split_points = np.random.choice(x.shape[1]-2, num_segs[i]-1, replace=False)
split_points.sort()
splits = np.split(orig_steps, split_points)
else:
splits = np.array_split(orig_steps, num_segs[i])
warp = np.concatenate(np.random.permutation(splits)).ravel()
ret[i] = pat[warp]
else:
ret[i] = pat
return ret
def magnitude_warp(x, sigma=0.2, knot=4):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(x.shape[1])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(x.shape[0], knot+2, x.shape[2]))
warp_steps = (np.ones((x.shape[2],1))*(np.linspace(0, x.shape[1]-1., num=knot+2))).T
ret = np.zeros_like(x)
for i, pat in enumerate(x):
li = []
for dim in range(x.shape[2]):
li.append(CubicSpline(warp_steps[:, dim], random_warps[i, :, dim])(orig_steps))
warper = np.array(li).T
ret[i] = pat * warper
return ret
def magnitude_warp_s(x, sigma=0.2, knot=4, plot=False):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(x.shape[0])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(1, knot + 2, x.shape[1]))
warp_steps = (np.ones((x.shape[1], 1)) * (np.linspace(0, x.shape[0] - 1., num=knot + 2))).T
li = []
for dim in range(x.shape[1]):
li.append(CubicSpline(warp_steps[:, dim], random_warps[0, :, dim])(orig_steps))
warper = np.array(li).T
x_ = x * warper
if plot:
hlp.plot1d(x, x_, save_file='aug_examples/magnitude_warp_s.png')
return x_
def time_warp(x, sigma=0.2, knot=4):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(x.shape[1])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(x.shape[0], knot+2, x.shape[2]))
warp_steps = (np.ones((x.shape[2],1))*(np.linspace(0, x.shape[1]-1., num=knot+2))).T
ret = np.zeros_like(x)
for i, pat in enumerate(x):
for dim in range(x.shape[2]):
time_warp = CubicSpline(warp_steps[:,dim], warp_steps[:,dim] * random_warps[i,:,dim])(orig_steps)
scale = (x.shape[1]-1)/time_warp[-1]
ret[i,:,dim] = np.interp(orig_steps, np.clip(scale*time_warp, 0, x.shape[1]-1), pat[:,dim]).T
return ret
def time_warp_s(x, sigma=0.2, knot=4, plot=False):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(x.shape[0])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(1, knot + 2, x.shape[1]))
warp_steps = (np.ones((x.shape[1], 1)) * (np.linspace(0, x.shape[0] - 1., num=knot + 2))).T
ret = np.zeros_like(x)
for dim in range(x.shape[1]):
time_warp = CubicSpline(warp_steps[:, dim],
warp_steps[:, dim] * random_warps[0, :, dim])(orig_steps)
scale = (x.shape[0] - 1) / time_warp[-1]
ret[:, dim] = np.interp(orig_steps, np.clip(scale * time_warp, 0, x.shape[0] - 1),
x[:, dim]).T
if plot:
hlp.plot1d(x, ret, save_file='aug_examples/time_warp_s.png')
return ret
def window_slice(x, reduce_ratio=0.9):
# https://halshs.archives-ouvertes.fr/halshs-01357973/document
target_len = np.ceil(reduce_ratio*x.shape[1]).astype(int)
if target_len >= x.shape[1]:
return x
starts = np.random.randint(low=0, high=x.shape[1]-target_len, size=(x.shape[0])).astype(int)
ends = (target_len + starts).astype(int)
ret = np.zeros_like(x)
for i, pat in enumerate(x):
for dim in range(x.shape[2]):
ret[i,:,dim] = np.interp(np.linspace(0, target_len, num=x.shape[1]), np.arange(target_len), pat[starts[i]:ends[i],dim]).T
return ret
def window_slice_s(x, reduce_ratio=0.9):
# https://halshs.archives-ouvertes.fr/halshs-01357973/document
target_len = np.ceil(reduce_ratio * x.shape[0]).astype(int)
if target_len >= x.shape[0]:
return x
starts = np.random.randint(low=0, high=x.shape[0] - target_len, size=(1)).astype(int)
ends = (target_len + starts).astype(int)
ret = np.zeros_like(x)
for dim in range(x.shape[1]):
ret[:, dim] = np.interp(np.linspace(0, target_len, num=x.shape[0]), np.arange(target_len),
x[starts[0]:ends[0], dim]).T
return ret
def window_warp(x, window_ratio=0.1, scales=[0.5, 2.]):
# https://halshs.archives-ouvertes.fr/halshs-01357973/document
warp_scales = np.random.choice(scales, x.shape[0])
warp_size = np.ceil(window_ratio*x.shape[1]).astype(int)
window_steps = np.arange(warp_size)
window_starts = np.random.randint(low=1, high=x.shape[1]-warp_size-1, size=(x.shape[0])).astype(int)
window_ends = (window_starts + warp_size).astype(int)
ret = np.zeros_like(x)
for i, pat in enumerate(x):
for dim in range(x.shape[2]):
start_seg = pat[:window_starts[i],dim]
window_seg = np.interp(np.linspace(0, warp_size-1, num=int(warp_size*warp_scales[i])), window_steps, pat[window_starts[i]:window_ends[i],dim])
end_seg = pat[window_ends[i]:,dim]
warped = np.concatenate((start_seg, window_seg, end_seg))
ret[i,:,dim] = np.interp(np.arange(x.shape[1]), np.linspace(0, x.shape[1]-1., num=warped.size), warped).T
return ret
def window_warp_s(x, window_ratio=0.1, scales=[0.5, 2.]):
# https://halshs.archives-ouvertes.fr/halshs-01357973/document
warp_scales = np.random.choice(scales, 1)
warp_size = np.ceil(window_ratio * x.shape[0]).astype(int)
window_steps = np.arange(warp_size)
window_starts = np.random.randint(low=1, high=x.shape[0] - warp_size - 1, size=(1)).astype(int)
window_ends = (window_starts + warp_size).astype(int)
ret = | np.zeros_like(x) | numpy.zeros_like |
# coding: utf-8
# ## AI for Medicine Course 1 Week 1 lecture exercises
# <a name="counting-labels"></a>
# # Counting labels
#
# As you saw in the lecture videos, one way to avoid having class imbalance impact the loss function is to weight the losses differently. To choose the weights, you first need to calculate the class frequencies.
#
# For this exercise, you'll just get the count of each label. Later on, you'll use the concepts practiced here to calculate frequencies in the assignment!
# In[1]:
# Import the necessary packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
# Read csv file containing training datadata
train_df = pd.read_csv("nih/train-small.csv")
# In[4]:
# Count up the number of instances of each class (drop non-class columns from the counts)
class_counts = train_df.sum().drop(['Image','PatientId'])
print(class_counts)
# In[ ]:
for column in class_counts.keys():
print(f"The class {column} has {train_df[column].sum()} samples")
# In[ ]:
# Plot up the distribution of counts
sns.barplot(class_counts.values, class_counts.index, color='b')
plt.title('Distribution of Classes for Training Dataset', fontsize=15)
plt.xlabel('Number of Patients', fontsize=15)
plt.ylabel('Diseases', fontsize=15)
plt.show()
# <a name="weighted-loss"></a>
# # Weighted Loss function
#
# Below is an example of calculating weighted loss. In the assignment, you will calculate a weighted loss function. This sample code will give you some intuition for what the weighted loss function is doing, and also help you practice some syntax you will use in the graded assignment.
#
# For this example, you'll first define a hypothetical set of true labels and then a set of predictions.
#
# Run the next cell to create the 'ground truth' labels.
# In[ ]:
# Generate an array of 4 binary label values, 3 positive and 1 negative
y_true = np.array(
[[1],
[1],
[1],
[0]])
print(f"y_true: \n{y_true}")
# ### Two models
# To better understand the loss function, you will pretend that you have two models.
# - Model 1 always outputs a 0.9 for any example that it's given.
# - Model 2 always outputs a 0.1 for any example that it's given.
# In[ ]:
# Make model predictions that are always 0.9 for all examples
y_pred_1 = 0.9 * np.ones(y_true.shape)
print(f"y_pred_1: \n{y_pred_1}")
print()
y_pred_2 = 0.1 * np.ones(y_true.shape)
print(f"y_pred_2: \n{y_pred_2}")
# ### Problems with the regular loss function
# The learning goal here is to notice that with a regular loss function (not a weighted loss), the model that always outputs 0.9 has a smaller loss (performs better) than model 2.
# - This is because there is a class imbalance, where 3 out of the 4 labels are 1.
# - If the data were perfectly balanced, (two labels were 1, and two labels were 0), model 1 and model 2 would have the same loss. Each would get two examples correct and two examples incorrect.
# - However, since the data is not balanced, the regular loss function implies that model 1 is better than model 2.
# ### Notice the shortcomings of a regular non-weighted loss
#
# See what loss you get from these two models (model 1 always predicts 0.9, and model 2 always predicts 0.1), see what the regular (unweighted) loss function is for each model.
# In[ ]:
loss_reg_1 = -1 * np.sum(y_true * np.log(y_pred_1)) + -1 * np.sum((1 - y_true) * np.log(1 - y_pred_1))
print(f"loss_reg_1: {loss_reg_1:.4f}")
# In[ ]:
loss_reg_2 = -1 * np.sum(y_true * np.log(y_pred_2)) + -1 * np.sum((1 - y_true) * np.log(1 - y_pred_2))
print(f"loss_reg_2: {loss_reg_2:.4f}")
# In[ ]:
print(f"When the model 1 always predicts 0.9, the regular loss is {loss_reg_1:.4f}")
print(f"When the model 2 always predicts 0.1, the regular loss is {loss_reg_2:.4f}")
# Notice that the loss function gives a greater loss when the predictions are always 0.1, because the data is imbalanced, and has three labels of `1` but only one label for `0`.
#
# Given a class imbalance with more positive labels, the regular loss function implies that the model with the higher prediction of 0.9 performs better than the model with the lower prediction of 0.1
# ### How a weighted loss treats both models the same
# With a weighted loss function, you will get the same weighted loss when the predictions are all 0.9 versus when the predictions are all 0.1.
# - Notice how a prediction of 0.9 is 0.1 away from the positive label of 1.
# - Also notice how a prediction of 0.1 is 0.1 away from the negative label of 0
# - So model 1 and 2 are "symmetric" along the midpoint of 0.5, if you plot them on a number line between 0 and 1.
# ### Weighted Loss Equation
# Calculate the loss for the zero-th label (column at index 0)
#
# - The loss is made up of two terms. To make it easier to read the code, you will calculate each of these terms separately. We are giving each of these two terms a name for explanatory purposes, but these are not officially called $loss_{pos}$ or $loss_{neg}$
#
# - $loss_{pos}$: we'll use this to refer to the loss where the actual label is positive (the positive examples).
# - $loss_{neg}$: we'll use this to refer to the loss where the actual label is negative (the negative examples).
#
# $$ loss^{(i)} = loss_{pos}^{(i)} + los_{neg}^{(i)} $$
#
# $$loss_{pos}^{(i)} = -1 \times weight_{pos}^{(i)} \times y^{(i)} \times log(\hat{y}^{(i)})$$
#
# $$loss_{neg}^{(i)} = -1 \times weight_{neg}^{(i)} \times (1- y^{(i)}) \times log(1 - \hat{y}^{(i)})$$
# Since this sample dataset is small enough, you can calculate the positive weight to be used in the weighted loss function. To get the positive weight, count how many NEGATIVE labels are present, divided by the total number of examples.
#
# In this case, there is one negative label, and four total examples.
#
# Similarly, the negative weight is the fraction of positive labels.
#
# Run the next cell to define positive and negative weights.
# In[ ]:
# calculate the positive weight as the fraction of negative labels
w_p = 1/4
# calculate the negative weight as the fraction of positive labels
w_n = 3/4
print(f"positive weight w_p: {w_p}")
print(f"negative weight w_n {w_n}")
# ### Model 1 weighted loss
# Run the next two cells to calculate the two loss terms separately.
#
# Here, `loss_1_pos` and `loss_1_neg` are calculated using the `y_pred_1` predictions.
# In[ ]:
# Calculate and print out the first term in the loss function, which we are calling 'loss_pos'
loss_1_pos = -1 * np.sum(w_p * y_true * np.log(y_pred_1 ))
print(f"loss_1_pos: {loss_1_pos:.4f}")
# In[ ]:
# Calculate and print out the second term in the loss function, which we're calling 'loss_neg'
loss_1_neg = -1 * np.sum(w_n * (1 - y_true) * np.log(1 - y_pred_1 ))
print(f"loss_1_neg: {loss_1_neg:.4f}")
# In[ ]:
# Sum positive and negative losses to calculate total loss
loss_1 = loss_1_pos + loss_1_neg
print(f"loss_1: {loss_1:.4f}")
# ### Model 2 weighted loss
#
# Now do the same calculations for when the predictions are from `y_pred_2'. Calculate the two terms of the weighted loss function and add them together.
# In[ ]:
# Calculate and print out the first term in the loss function, which we are calling 'loss_pos'
loss_2_pos = -1 * np.sum(w_p * y_true * np.log(y_pred_2))
print(f"loss_2_pos: {loss_2_pos:.4f}")
# In[ ]:
# Calculate and print out the second term in the loss function, which we're calling 'loss_neg'
loss_2_neg = -1 * np.sum(w_n * (1 - y_true) * np.log(1 - y_pred_2))
print(f"loss_2_neg: {loss_2_neg:.4f}")
# In[ ]:
# Sum positive and negative losses to calculate total loss when the prediction is y_pred_2
loss_2 = loss_2_pos + loss_2_neg
print(f"loss_2: {loss_2:.4f}")
# ### Compare model 1 and model 2 weighted loss
# In[ ]:
print(f"When the model always predicts 0.9, the total loss is {loss_1:.4f}")
print(f"When the model always predicts 0.1, the total loss is {loss_2:.4f}")
# ### What do you notice?
# Since you used a weighted loss, the calculated loss is the same whether the model always predicts 0.9 or always predicts 0.1.
#
# You may have also noticed that when you calculate each term of the weighted loss separately, there is a bit of symmetry when comparing between the two sets of predictions.
# In[ ]:
print(f"loss_1_pos: {loss_1_pos:.4f} \t loss_1_neg: {loss_1_neg:.4f}")
print()
print(f"loss_2_pos: {loss_2_pos:.4f} \t loss_2_neg: {loss_2_neg:.4f}")
# Even though there is a class imbalance, where there are 3 positive labels but only one negative label, the weighted loss accounts for this by giving more weight to the negative label than to the positive label.
# ### Weighted Loss for more than one class
#
# In this week's assignment, you will calculate the multi-class weighted loss (when there is more than one disease class that your model is learning to predict). Here, you can practice working with 2D numpy arrays, which will help you implement the multi-class weighted loss in the graded assignment.
#
# You will work with a dataset that has two disease classes (two columns)
# In[ ]:
# View the labels (true values) that you will practice with
y_true = np.array(
[[1,0],
[1,0],
[1,0],
[1,0],
[0,1]
])
y_true
# ### Choosing axis=0 or axis=1
# You will use `numpy.sum` to count the number of times column `0` has the value 0.
# First, notice the difference when you set axis=0 versus axis=1
# In[ ]:
# See what happens when you set axis=0
print(f"using axis = 0 {np.sum(y_true,axis=0)}")
# Compare this to what happens when you set axis=1
print(f"using axis = 1 {np.sum(y_true,axis=1)}")
# Notice that if you choose `axis=0`, the sum is taken for each of the two columns. This is what you want to do in this case. If you set `axis=1`, the sum is taken for each row.
# ### Calculate the weights
# Previously, you visually inspected the data to calculate the fraction of negative and positive labels. Here, you can do this programmatically.
# In[ ]:
# set the positive weights as the fraction of negative labels (0) for each class (each column)
w_p = | np.sum(y_true == 0,axis=0) | numpy.sum |
import json
import bz2
import gzip
import _pickle as cPickle
import gym
import numpy as np
import quaternion
import skimage.morphology
import habitat
from envs.utils.fmm_planner import FMMPlanner
from constants import coco_categories
import envs.utils.pose as pu
class ObjectGoal_Env(habitat.RLEnv):
"""The Object Goal Navigation environment class. The class is responsible
for loading the dataset, generating episodes, and computing evaluation
metrics.
"""
def __init__(self, args, rank, config_env, dataset):
self.args = args
self.rank = rank
super().__init__(config_env, dataset)
# Loading dataset info file
self.split = config_env.DATASET.SPLIT
self.episodes_dir = config_env.DATASET.EPISODES_DIR.format(
split=self.split)
if args.custom_eps:
with open("{}/train_episode_data.json".format(args.custom_eps), 'r') as f:
episodes_all = json.load(f)
self.episodes_all = {}
for ep in episodes_all:
if ep["scene"] in self.episodes_all:
self.episodes_all[ep["scene"]].append(ep)
else:
self.episodes_all[ep["scene"]] = [ep]
dataset_info_file = self.episodes_dir + \
"{split}_info.pbz2".format(split=self.split)
with bz2.BZ2File(dataset_info_file, 'rb') as f:
self.dataset_info = cPickle.load(f)
# Specifying action and observation space
self.action_space = gym.spaces.Discrete(3)
self.observation_space = gym.spaces.Box(0, 255,
(3, args.frame_height,
args.frame_width),
dtype='uint8')
# Initializations
self.episode_no = 0
# Scene info
self.last_scene_path = None
self.scene_path = None
self.scene_name = None
# Episode Dataset info
self.eps_data = None
self.eps_data_idx = None
self.gen_ep_idx = 1
self.gt_planner = None
self.object_boundary = None
self.goal_idx = None
self.goal_name = None
self.map_obj_origin = None
self.starting_loc = None
self.starting_distance = None
if args.eval and args.shuffle:
self.shuffled_indices = np.arange(args.num_eval_episodes)
np.random.shuffle(self.shuffled_indices)
# Episode tracking info
self.curr_distance = None
self.prev_distance = None
self.timestep = None
self.stopped = None
self.path_length = None
self.last_sim_location = None
self.trajectory_states = []
self.info = {}
self.info['distance_to_goal'] = None
self.info['spl'] = None
self.info['success'] = None
def load_new_episode(self):
"""The function loads a fixed episode from the episode dataset. This
function is used for evaluating a trained model on the val split.
"""
args = self.args
self.scene_path = self.habitat_env.sim.config.SCENE
scene_name = self.scene_path.split("/")[-1].split(".")[0]
if self.scene_path != self.last_scene_path:
if not args.testset:
episodes_file = self.episodes_dir + \
"content/{}_episodes.json.gz".format(scene_name)
print("Loading episodes from: {}".format(episodes_file))
with gzip.open(episodes_file, 'r') as f:
self.eps_data = json.loads(
f.read().decode('utf-8'))["episodes"]
else:
episodes_file = self.episodes_dir + \
"content/{}_test_episodes.json".format(scene_name)
print("Loading episodes from: {}".format(episodes_file))
with open(episodes_file, 'r') as f:
self.eps_data = json.load(f)
self.eps_data_idx = 0
self.last_scene_path = self.scene_path
# Load episode info
if self.args.shuffle:
episode = self.eps_data[self.shuffled_indices[self.eps_data_idx]]
else:
episode = self.eps_data[self.eps_data_idx]
self.info["episode_data"] = episode
self.eps_data_idx += 1
self.eps_data_idx = self.eps_data_idx % len(self.eps_data)
pos = episode["start_position"]
rot = quaternion.from_float_array(episode["start_rotation"])
goal_name = episode["object_category"]
goal_idx = episode["object_id"]
floor_idx = episode["floor_id"]
# Load scene info
scene_info = self.dataset_info[scene_name]
sem_map = scene_info[floor_idx]['sem_map']
map_obj_origin = scene_info[floor_idx]['origin']
# Setup ground truth planner
object_boundary = args.success_dist
map_resolution = args.map_resolution
selem = skimage.morphology.disk(2)
traversible = skimage.morphology.binary_dilation(
sem_map[0], selem) != True
traversible = 1 - traversible
planner = FMMPlanner(traversible)
selem = skimage.morphology.disk(
int(object_boundary * 100. / map_resolution))
goal_map = skimage.morphology.binary_dilation(
sem_map[goal_idx + 1], selem) != True
goal_map = 1 - goal_map
planner.set_multi_goal(goal_map)
# Get starting loc in GT map coordinates
x = -pos[2]
y = -pos[0]
min_x, min_y = map_obj_origin / 100.0
map_loc = int((-y - min_y) * 20.), int((-x - min_x) * 20.)
self.gt_planner = planner
self.starting_loc = map_loc
self.object_boundary = object_boundary
self.goal_idx = goal_idx
self.goal_name = goal_name
self.map_obj_origin = map_obj_origin
self.starting_distance = self.gt_planner.fmm_dist[self.starting_loc]\
/ 20.0 + self.object_boundary
self.info["episode_data"]["shortest_dist"] = self.starting_distance
self.prev_distance = self.starting_distance
self._env.sim.set_agent_state(pos, rot)
self.info["sim_pos"] = pos
self.info["sim_rot"] = rot
self.info["scene"] = scene_name
self.info["floor_idx"] = floor_idx
# The following two should match approximately
#print(self.starting_loc)
#print(self.sim_continuous_to_sim_map(self.get_sim_location()))
self.info['gt_pos'] = self.sim_continuous_to_sim_map(self.get_sim_location())
obs = self._env.sim.get_observations_at(pos, rot)
return obs
def load_incomplete_episode(self):
args = self.args
self.scene_path = self.habitat_env.sim.config.SCENE
scene_name = self.scene_path.split("/")[-1].split(".")[0]
if self.scene_path != self.last_scene_path:
print("Loading episodes from: {}".format(scene_name))
self.eps_data_idx = 0
self.last_scene_path = self.scene_path
episode = self.episodes_all[scene_name][self.eps_data_idx]
self.info["episode_data"] = episode
self.eps_data_idx += 1
self.eps_data_idx = self.eps_data_idx % len(self.episodes_all[scene_name])
pos = episode["sim_pos"]
rot = quaternion.from_rotation_vector(episode["sim_rot"])
goal_name = episode["goal_name"]
goal_idx = episode["goal_cat_id"]
floor_idx = episode["floor_idx"]
# Load scene info
scene_info = self.dataset_info[scene_name]
sem_map = scene_info[floor_idx]['sem_map']
map_obj_origin = scene_info[floor_idx]['origin']
# Setup ground truth planner
object_boundary = args.success_dist
map_resolution = args.map_resolution
selem = skimage.morphology.disk(2)
traversible = skimage.morphology.binary_dilation(
sem_map[0], selem) != True
traversible = 1 - traversible
planner = FMMPlanner(traversible)
selem = skimage.morphology.disk(
int(object_boundary * 100. / map_resolution))
goal_map = skimage.morphology.binary_dilation(
sem_map[goal_idx + 1], selem) != True
goal_map = 1 - goal_map
planner.set_multi_goal(goal_map)
# Get starting loc in GT map coordinates
x = -pos[2]
y = -pos[0]
min_x, min_y = map_obj_origin / 100.0
map_loc = int((-y - min_y) * 20.), int((-x - min_x) * 20.)
self.gt_planner = planner
self.starting_loc = map_loc
self.object_boundary = object_boundary
self.goal_idx = goal_idx
self.goal_name = goal_name
self.map_obj_origin = map_obj_origin
self.starting_distance = self.gt_planner.fmm_dist[self.starting_loc]\
/ 20.0 + self.object_boundary
self.info["episode_data"]["shortest_dist"] = self.starting_distance
self.prev_distance = self.starting_distance
self._env.sim.set_agent_state(pos, rot)
self.info["sim_pos"] = pos
self.info["sim_rot"] = rot
# The following two should match approximately
#print(self.starting_loc)
#print(self.sim_continuous_to_sim_map(self.get_sim_location()))
self.info['gt_pos'] = self.sim_continuous_to_sim_map(self.get_sim_location())
obs = self._env.sim.get_observations_at(pos, rot)
return obs
def generate_new_episode(self):
"""The function generates a random valid episode. This function is used
for training a model on the train split.
"""
args = self.args
self.scene_path = self.habitat_env.sim.config.SCENE
scene_name = self.scene_path.split("/")[-1].split(".")[0]
scene_info = self.dataset_info[scene_name]
map_resolution = args.map_resolution
floor_idx = np.random.randint(len(scene_info.keys()))
floor_height = scene_info[floor_idx]['floor_height']
sem_map = scene_info[floor_idx]['sem_map']
map_obj_origin = scene_info[floor_idx]['origin']
cat_counts = sem_map.sum(2).sum(1)
possible_cats = list(np.arange(6))
for i in range(6):
if cat_counts[i + 1] == 0:
possible_cats.remove(i)
object_boundary = args.success_dist
loc_found = False
while not loc_found:
if len(possible_cats) == 0:
print("No valid objects for {}".format(floor_height))
eps = eps - 1
continue
goal_idx = np.random.choice(possible_cats)
for key, value in coco_categories.items():
if value == goal_idx:
goal_name = key
selem = skimage.morphology.disk(2)
traversible = skimage.morphology.binary_dilation(
sem_map[0], selem) != True
traversible = 1 - traversible
planner = FMMPlanner(traversible)
selem = skimage.morphology.disk(
int(object_boundary * 100. / map_resolution))
goal_map = skimage.morphology.binary_dilation(
sem_map[goal_idx + 1], selem) != True
goal_map = 1 - goal_map
planner.set_multi_goal(goal_map)
m1 = sem_map[0] > 0
m2 = planner.fmm_dist > (args.min_d - object_boundary) * 20.0
m3 = planner.fmm_dist < (args.max_d - object_boundary) * 20.0
possible_starting_locs = np.logical_and(m1, m2)
possible_starting_locs = np.logical_and(
possible_starting_locs, m3) * 1.
if possible_starting_locs.sum() != 0:
loc_found = True
else:
print("Invalid object: {} / {} / {}".format(
scene_name, floor_height, goal_name))
possible_cats.remove(goal_idx)
scene_info[floor_idx]["sem_map"][goal_idx + 1, :, :] = 0.
self.dataset_info[scene_name][floor_idx][
"sem_map"][goal_idx + 1, :, :] = 0.
loc_found = False
while not loc_found:
pos = self._env.sim.sample_navigable_point()
x = -pos[2]
y = -pos[0]
min_x, min_y = map_obj_origin / 100.0
map_loc = int((-y - min_y) * 20.), int((-x - min_x) * 20.)
if abs(pos[1] - floor_height) < args.floor_thr / 100.0 and \
possible_starting_locs[map_loc[0], map_loc[1]] == 1:
loc_found = True
agent_state = self._env.sim.get_agent_state(0)
rotation = agent_state.rotation
rvec = quaternion.as_rotation_vector(rotation)
rvec[1] = np.random.rand() * 2 * np.pi
rot = quaternion.from_rotation_vector(rvec)
self.gt_planner = planner
self.starting_loc = map_loc
self.object_boundary = object_boundary
self.goal_idx = goal_idx
self.goal_name = goal_name
self.map_obj_origin = map_obj_origin
self.starting_distance = self.gt_planner.fmm_dist[self.starting_loc] \
/ 20.0 + self.object_boundary
self.prev_distance = self.starting_distance
self._env.sim.set_agent_state(pos, rot)
self.info["sim_pos"] = pos
self.info["sim_rot"] = quaternion.as_float_array(rot)
self.info["episode_id"] = self.gen_ep_idx
self.gen_ep_idx += 1
self.info["scene"] = scene_name
self.info["floor_idx"] = floor_idx
self.info["goal_name"] = goal_name
# The following two should match approximately
# print(starting_loc)
# print(self.sim_continuous_to_sim_map(self.get_sim_location()))
self.info['gt_pos'] = self.sim_continuous_to_sim_map(self.get_sim_location())
obs = self._env.sim.get_observations_at(pos, rot)
return obs
def sim_map_to_sim_continuous(self, coords):
"""Converts ground-truth 2D Map coordinates to absolute Habitat
simulator position and rotation.
"""
agent_state = self._env.sim.get_agent_state(0)
y, x = coords
min_x, min_y = self.map_obj_origin / 100.0
cont_x = x / 20. + min_x
cont_y = y / 20. + min_y
agent_state.position[0] = cont_y
agent_state.position[2] = cont_x
rotation = agent_state.rotation
rvec = quaternion.as_rotation_vector(rotation)
if self.args.train_single_eps:
rvec[1] = 0.0
else:
rvec[1] = np.random.rand() * 2 * np.pi
rot = quaternion.from_rotation_vector(rvec)
return agent_state.position, rot
def sim_continuous_to_sim_map(self, sim_loc):
"""Converts absolute Habitat simulator pose to ground-truth 2D Map
coordinates.
"""
x, y, o = sim_loc
min_x, min_y = self.map_obj_origin / 100.0
x, y = int((-x - min_x) * 20.), int((-y - min_y) * 20.)
o = np.rad2deg(o) + 180.0
return y, x, o
def reset(self):
"""Resets the environment to a new episode.
Returns:
obs (ndarray): RGBD observations (4 x H x W)
info (dict): contains timestep, pose, goal category and
evaluation metric info
"""
args = self.args
new_scene = self.episode_no % args.num_train_episodes == 0
self.episode_no += 1
# Initializations
self.timestep = 0
self.stopped = False
self.path_length = 1e-5
self.trajectory_states = []
if new_scene:
obs = super().reset()
self.scene_name = self.habitat_env.sim.config.SCENE
print("Changing scene: {}/{}".format(self.rank, self.scene_name))
self.scene_path = self.habitat_env.sim.config.SCENE
if args.gen_episode:
obs = self.generate_new_episode()
elif args.custom_eps:
obs = self.load_incomplete_episode()
elif self.split == "val":
obs = self.load_new_episode()
else:
obs = self.generate_new_episode()
rgb = obs['rgb'].astype(np.uint8)
depth = obs['depth']
state = | np.concatenate((rgb, depth), axis=2) | numpy.concatenate |
from . import Image
import matplotlib.pyplot as plt
import numpy as np
import re
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import SkyCoord
from .fluxes import ApertureFluxes
from . import viz
from astropy.io import fits
from .telescope import Telescope
from . import utils
from astroquery.mast import Catalogs
from astropy.wcs import WCS, utils as wcsutils
import pandas as pd
from scipy.stats import binned_statistic
from .blocks.psf import Gaussian2D, Moffat2D,cutouts
from .console_utils import INFO_LABEL
from astropy.stats import sigma_clipped_stats
from astropy.io.fits.verify import VerifyWarning
from datetime import datetime
import warnings
from .blocks.registration import distances
import requests
import shutil
from pathlib import Path
from . import twirl
import io
from .utils import fast_binning, z_scale
from .console_utils import info
warnings.simplefilter('ignore', category=VerifyWarning)
class Observation(ApertureFluxes):
"""
Class to load and analyze photometry products
Parameters
----------
photfile : str
path of the `.phot` file to load
"""
def __init__(self, photfile, ignore_time=False):
super().__init__(photfile)
utils.remove_sip(self.xarray.attrs)
self.phot = photfile
self.telescope = Telescope.from_name(self.telescope)
self.gaia_data = None
self.tic_data = None
self.wcs = WCS(utils.remove_arrays(self.xarray.attrs))
self._meridian_flip = None
has_bjd = hasattr(self.xarray, "bjd_tdb")
if has_bjd:
has_bjd = ~np.all(self.xarray.bjd_tdb.isnull().values)
if not has_bjd:
try:
self.compute_bjd()
if not ignore_time:
print(f"{INFO_LABEL} Time converted to BJD TDB")
except:
if not ignore_time:
print(f"{INFO_LABEL} Could not convert time to BJD TDB")
def _check_stack(self):
assert 'stack' in self.xarray is not None, "No stack found"
# Loaders and savers (files and data)
# ------------------------------------
def __copy__(self):
copied = Observation(self.xarray.copy(), ignore_time=True)
copied.phot = self.phot
copied.telescope = self.telescope
copied.gaia_data = self.gaia_data
copied.tic_data = self.tic_data
copied.wcs = self.wcs
return copied
def copy(self):
return self.__copy__()
def to_csv(self, destination, sep=" "):
"""Export a typical csv of the observation's data
Parameters
----------
destination : str
Path of the csv file to save
sep : str, optional
separation string within csv, by default " "
"""
df = pd.DataFrame(
{
"BJD-TDB" if self.time_format == "bjd_tdb" else "JD-UTC": self.time,
"DIFF_FLUX": self.diff_flux,
"ERROR": self.diff_error,
"dx_MOVE": self.dx,
"dy_MOVE": self.dy,
"FWHM": self.fwhm,
"FWHMx": self.fwhm,
"FWHMy": self.fwhm,
"SKYLEVEL": self.sky,
"AIRMASS": self.airmass,
"EXPOSURE": self.exptime,
}
)
df.to_csv(destination, sep=sep, index=False)
def save(self, destination=None):
"""Save current observation
Parameters
----------
destination : str, optional
path to phot file, by default None
"""
self.xarray.to_netcdf(self.phot if destination is None else destination)
info(f"saved {self.phot}")
def export_stack(self, destination, **kwargs):
"""Export stack to FITS file
Parameters
----------
destination : str
path of FITS to export
"""
header = {name: value for name, value in self.xarray.attrs.items() if name.isupper()}
data = self.stack
hdul = fits.HDUList([fits.PrimaryHDU(data=data, header=fits.Header(header))])
hdul.writeto(destination, **kwargs)
def import_stack(self, fitsfile):
"""Import FITS as stack to current obs (including WCS) - do not forget to save to keep it
Parameters
----------
fitsfile : str
path of FITS stack to import
"""
data = fits.getdata(fitsfile)
header = fits.getheader(fitsfile)
self.wcs = WCS(header)
self.xarray.attrs.update(utils.header_to_cdf4_dict(header))
self.xarray["stack"] = (('w', 'h'), data)
# Convenience
# -----------
@property
def skycoord(self):
"""astropy SkyCoord object for the target
"""
return SkyCoord(self.RA, self.DEC, frame='icrs', unit=(self.telescope.ra_unit, self.telescope.dec_unit))
@property
def simbad_url(self):
"""
[notebook feature] clickable simbad query url for specified target
"""
from IPython.core.display import display, HTML
display(HTML('<a href="{}">{}</a>'.format(self.simbad, self.simbad)))
@property
def simbad(self):
"""
simbad query url for specified target
"""
return f"http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={self.RA}+{self.DEC}&CooFrame=FK5&CooEpoch=2000&CooEqui=" \
"2000&CooDefinedFrames=none&Radius=2&Radius.unit=arcmin&submit=submit+query&CoordList="
@property
def denominator(self):
"""A conveniant name for the observation: {telescope}_{date}_{name}_{filter}
Returns
-------
[type]
[description]
"""
return f"{self.telescope.name}_{self.date}_{self.name}_{self.filter}"
@property
def meridian_flip(self):
"""Meridian flip time. Supposing EAST and WEST encode orientation
"""
if self._meridian_flip is not None:
return self._meridian_flip
else:
has_flip = hasattr(self.xarray, "flip")
if has_flip:
try:
np.all(np.isnan(self.flip))
return None
except TypeError:
pass
if has_flip:
if "WEST" in self.flip:
flip = (self.flip.copy() == "WEST").astype(int)
diffs = np.abs(np.diff(flip))
if np.any(diffs):
self._meridian_flip = self.time[np.argmax(diffs).flatten()]
else:
self._meridian_flip = None
return self._meridian_flip
else:
return None
else:
return None
# TESS specific methods
# --------------------
@property
def tic_id(self):
"""TIC id from digits found in target name
"""
try:
nb = re.findall('\d*\.?\d+', self.name)
df = pd.read_csv("https://exofop.ipac.caltech.edu/tess/download_toi?toi=%s&output=csv" % nb[0])
tic = df["TIC ID"][0]
return f"{tic}"
except KeyError:
print('TIC ID not found')
return None
@property
def gaia_from_toi(self):
"""Gaia id from TOI id if TOI is in target name
"""
if self.tic_id is not None:
tic_id = ("TIC " + self.tic_id)
catalog_data = Catalogs.query_object(tic_id, radius=.001, catalog="TIC")
return f"{catalog_data['GAIA'][0]}"
else:
return None
@property
def tfop_prefix(self):
return f"TIC{self.tic_id}_{self.date}_{self.telescope.name}_{self.filter}"
# Methods
# -------
def compute_bjd(self, version="prose"):
"""Compute BJD_tdb based on current time
Once this is done self.time is BJD tdb and time format can be checked in self.time_format. Note that half the
exposure time is added to the JD times before conversion. The precision of the returned time is not
guaranteed, especially with "prose" method (~30ms). "eastman" option accuracy is 20ms. See
http://astroutils.astronomy.ohio-state.edu/time/utc2bjd.html for more details.
Parameters
----------
version : str, optiona
- "prose": uses an astropy method
- "eastman": uses the web applet http://astroutils.astronomy.ohio-state.edu (Eastman et al. 2010) [requires
an internet connection]
by default "prose"
"""
assert self.telescope is not None
assert self.skycoord is not None
exposure_days = self.xarray.exposure.values/60/60/24
# For backward compatibility
# --------------------------
if "time_format" not in self.xarray.attrs:
self.xarray.attrs["time_format"] = "jd_utc"
self.xarray["jd_utc"] = ("time", self.time)
if "jd_utc" not in self:
self.xarray["jd_utc"] = ("time", self.jd)
self.xarray.drop("jd")
# -------------------------
if version == "prose":
time = Time(self.jd_utc + exposure_days/2, format="jd", scale="utc", location=self.telescope.earth_location).tdb
light_travel_tbd = time.light_travel_time(self.skycoord, location=self.telescope.earth_location)
bjd_time = (time + light_travel_tbd).value
elif version == "eastman":
bjd_time = utils.jd_to_bjd(self.jd_utc + exposure_days/2, self.skycoord.ra.deg, self.skycoord.dec.deg)
self.xarray = self.xarray.assign_coords(time=bjd_time)
self.xarray["bjd_tdb"] = ("time", bjd_time)
self.xarray.attrs["time_format"] = "bjd_tdb"
# Catalog queries
# ---------------
def query_gaia(self, limit=-1, cone_radius=None):
"""Query gaia catalog for stars in the field
"""
from astroquery.gaia import Gaia
Gaia.ROW_LIMIT = limit
header = self.xarray.attrs
shape = self.stack.shape
if cone_radius is None:
cone_radius = np.sqrt(2) * np.max(shape) * self.telescope.pixel_scale / 120
coord = self.skycoord
radius = u.Quantity(cone_radius, u.arcminute)
gaia_query = Gaia.cone_search_async(coord, radius, verbose=False, )
self.gaia_data = gaia_query.get_results()
self.gaia_data.sort("phot_g_mean_flux", reverse=True)
delta_years = (utils.datetime_to_years(datetime.strptime(self.date, "%Y%m%d")) - \
self.gaia_data["ref_epoch"].data.data) * u.year
dra = delta_years * self.gaia_data["pmra"].to(u.deg / u.year)
ddec = delta_years * self.gaia_data["pmdec"].to(u.deg / u.year)
skycoords = SkyCoord(
ra=self.gaia_data['ra'].quantity + dra,
dec=self.gaia_data['dec'].quantity + ddec,
pm_ra_cosdec=self.gaia_data['pmra'],
pm_dec=self.gaia_data['pmdec'],
radial_velocity=self.gaia_data['radial_velocity'],
obstime=Time(2015.0, format='decimalyear'))
gaias = np.array(wcsutils.skycoord_to_pixel(skycoords, self.wcs)).T
gaias[np.any(np.isnan(gaias), 1), :] = [0, 0]
self.gaia_data["x"], self.gaia_data["y"] = gaias.T
inside = np.all((np.array([0, 0]) < gaias) & (gaias < np.array(self.stack.shape)), 1)
self.gaia_data = self.gaia_data[np.argwhere(inside).squeeze()]
w, h = self.stack.shape
if np.abs(np.mean(self.gaia_data["x"])) > w or np.abs(np.mean(self.gaia_data["y"])) > h:
warnings.warn("Catalog stars seem out of the field. Check that your stack is solved and that telescope "
"'ra_unit' and 'dec_unit' are well set")
def query_tic(self,cone_radius=None):
"""Query TIC catalog (through MAST) for stars in the field
"""
from astroquery.mast import Catalogs
header = self.xarray.attrs
shape = self.stack.shape
if cone_radius is None:
cone_radius = np.sqrt(2) * np.max(shape) * self.telescope.pixel_scale / 120
coord = self.skycoord
radius = u.Quantity(cone_radius, u.arcminute)
self.tic_data = Catalogs.query_region(coord, radius, "TIC", verbose=False)
self.tic_data.sort("Jmag")
skycoords = SkyCoord(
ra=self.tic_data['ra'],
dec=self.tic_data['dec'], unit="deg")
self.tic_data["x"], self.tic_data["y"] = np.array(wcsutils.skycoord_to_pixel(skycoords, self.wcs))
w, h = self.stack.shape
if np.abs(np.mean(self.tic_data["x"])) > w or np.abs(np.mean(self.tic_data["y"])) > h:
warnings.warn("Catalog stars seem out of the field. Check that your stack is solved and that telescope "
"'ra_unit' and 'dec_unit' are well set")
@property
def gaia_target(self):
return None
@gaia_target.setter
def gaia_target(self, gaia_id):
"""Set target with a gaia id
Parameters
----------
gaia_id : int
gaia id
"""
if self.gaia_data is None:
self.query_gaia()
_ = self.gaia_data.to_pandas()[["source_id", "x", "y"]].to_numpy()
ids = _[:, 0]
positions = _[:, 1:3]
gaia_i = np.argmin(np.abs(gaia_id - ids))
self.target = np.argmin(np.power(positions[gaia_i, :] - self.stars[:, ::-1], 2).sum(1))
# Plot
# ----
def show(self, size=10, flip=False, zoom=False, contrast=0.05, wcs=False, cmap="Greys_r", sigclip=None,vmin=None,vmax=None):
"""Show stack image
Parameters
----------
size : int, optional
size of the square figure, by default 10
flip : bool, optional
, by default False
zoom : bool, optional
whether to include a zoom inlay in the image, by default False
contrast : float, optional
contrast for the Zscale of image, by default 0.05
wcs : bool, optional
whether to show grid ans axes to world coordinate
"""
if self.target == -1:
zoom = False
self._check_stack()
fig = plt.figure(figsize=(size, size))
fig.patch.set_facecolor('white')
image = self.stack.copy()
if flip:
image = image[::-1, ::-1]
if sigclip is not None:
mean, median, std = sigma_clipped_stats(image)
image[image - median < 2 * std] = median
if wcs:
ax = plt.subplot(projection=self.wcs, label='overlays')
else:
ax = fig.add_subplot(111)
if all([vmin, vmax]) is False:
_ = ax.imshow(utils.z_scale(image,c=contrast), cmap=cmap, origin="lower")
else:
_ = ax.imshow(image, cmap=cmap, origin="lower",vmin=vmin,vmax=vmax)
if wcs:
ax.coords.grid(True, color='white', ls='solid', alpha=0.3)
ax.coords[0].set_axislabel('Galactic Longitude')
ax.coords[1].set_axislabel('Galactic Latitude')
overlay = ax.get_coords_overlay('fk5')
overlay.grid(color='white', ls='--', alpha=0.3)
overlay[0].set_axislabel('Right Ascension (J2000)')
overlay[1].set_axislabel('Declination (J2000)')
def _check_show(self, **kwargs):
axes = plt.gcf().axes
if len(axes) == 0:
self.show(**kwargs)
def show_stars(self, size=10, view=None, n=None, flip=False,
comp_color="yellow", color=[0.51, 0.86, 1.], stars=None, legend=True, **kwargs):
"""Show detected stars over stack image
Parameters
----------
size : int, optional
size of the square figure, by default 10
flip : bool, optional
whether to flip image, by default False
view : str, optional
"all" to see all stars OR "reference" to have target and comparison stars hilighted, by default None
n : int, optional
max number of stars to show, by default None,
Raises
------
AssertionError
[description]
"""
self._check_show(flip=flip, size=size, **kwargs)
if stars is None:
stars = self.stars
if n is not None:
if view == "reference":
raise AssertionError("'n_stars' kwargs is incompatible with 'reference' view that will display all stars")
else:
n = len(stars)
stars = stars[0:n]
if view is None:
view = "reference" if 'comps' in self else "all"
image_size = np.array(np.shape(self.stack))[::-1]
if flip:
stars = np.array(image_size) - stars
if view == "all":
viz.plot_marks(*stars.T, np.arange(len(stars)), color=color)
if "stars" in self.xarray:
others = np.arange(n, len(self.stars))
others = np.setdiff1d(others, self.target)
viz.plot_marks(*self.stars[others].T, alpha=0.4, color=color)
elif view == "reference":
x = self.xarray.isel(apertures=self.aperture)
assert 'comps' in self, "No differential photometry"
comps = x.comps.values
others = np.setdiff1d(np.arange(len(stars)), x.comps.values)
others = np.setdiff1d(others, self.target)
_ = viz.plot_marks(*stars[self.target], self.target, color=color)
_ = viz.plot_marks(*stars[comps].T, comps, color=comp_color)
_ = viz.plot_marks(*stars[others].T, alpha=0.4, color=color)
if legend:
colors = [comp_color, color]
texts = ["Comparison stars", "Target"]
viz.circles_legend(colors, texts)
def show_gaia(self, color="yellow", alpha=1, n=None, idxs=True, limit=-1, fontsize=8, align=False):
"""Overlay Gaia objects on stack image
Parameters
----------
color : str, optional
color of marks and font, by default "lightblue"
alpha : int, optional
opacity of marks and font, by default 1
n : int, optional
max number of stars to show, by default None, by default None for all stars
idxs : bool, optional
wether to show gaia ids, by default True
"""
self._check_show()
if self.gaia_data is None:
self.query_gaia(limit=limit)
gaias = np.vstack([self.gaia_data["x"].data, self.gaia_data["y"].data]).T
defined = ~np.any(np.isnan(gaias), 1)
gaias = gaias[defined]
labels = self.gaia_data["source_id"].data.astype(str)[defined]
if align:
X = twirl.find_transform(gaias[0:30], self.stars, n=15)
gaias = twirl.affine_transform(X)(gaias)
labels = [f"{_id[0:len(_id) // 2]}\n{_id[len(_id) // 2::]}" for _id in labels]
_ = viz.plot_marks(*gaias.T, labels if idxs else None, color=color, alpha=alpha, n=n, position="top",
fontsize=fontsize)
def show_tic(self, color="white", alpha=1, n=None, idxs=True, align=True):
"""Overlay TIC objects on stack image
Parameters
----------
color : str, optional
color of marks and font, by default "lightblue"
alpha : int, optional
opacity of marks and font, by default 1
n : int, optional
max number of stars to show, by default None, by default None for all stars
idxs : bool, optional
wether to show TIC ids, by default True
"""
self._check_show()
if self.tic_data is None:
self.query_tic()
x = self.tic_data["x"].data
y = self.tic_data["y"].data
tics = np.vstack([x, y]).T
ID = self.tic_data["ID"].data
if align:
X = twirl.find_transform(tics[0:30], self.stars, n=15)
tics = twirl.affine_transform(X)(tics)
_ = viz.plot_marks(*tics.T, ID if idxs else None, color=color, alpha=alpha, n=n, position="top", fontsize=9, offset=10)
def show_cutout(self, star=None, size=200, marks=True,**kwargs):
"""
Show a zoomed cutout around a detected star or coordinates
Parameters
----------
star : [type], optional
detected star id or (x, y) coordinate, by default None
size : int, optional
side size of square cutout in pixel, by default 200
"""
if star is None:
x, y = self.stars[self.target]
elif isinstance(star, int):
x, y = self.stars[star]
elif isinstance(star, (tuple, list, np.ndarray)):
x, y = star
else:
raise ValueError("star type not understood")
self.show(**kwargs)
plt.xlim(np.array([-size / 2, size / 2]) + x)
plt.ylim(np.array([-size / 2, size / 2]) + y)
if marks:
idxs = np.argwhere(np.max(np.abs(self.stars - [x, y]), axis=1) < size).squeeze()
viz.plot_marks(*self.stars[idxs].T, label=idxs)
def plot_comps_lcs(self, n=15, ylim=(0.98, 1.02)):
"""Plot comparison stars light curves along target star light curve
Parameters
----------
n : int, optional
Number max of comparison to show, by default 5
ylim : tuple, optional
ylim of the plot, by default (0.98, 1.02)
"""
idxs = [self.target, *self.xarray.comps.isel(apertures=self.aperture).values[0:n]]
lcs = [self.xarray.diff_fluxes.isel(star=i, apertures=self.aperture).values for i in idxs]
if ylim is None:
ylim = (self.diff_flux.min() * 0.99, self.diff_flux.max() * 1.01)
offset = ylim[1] - ylim[0]
if len(plt.gcf().axes) == 0:
plt.figure(figsize=(5, 10))
for i, lc in enumerate(lcs):
color = "grey" if i != 0 else "black"
viz.plot(self.time, lc - i * offset, bincolor=color)
plt.annotate(idxs[i], (self.time.min() + 0.005, 1 - i * offset + offset / 3))
plt.ylim(1 - (i + 0.5) * offset, ylim[1])
plt.title("Comparison stars", loc="left")
plt.grid(color="whitesmoke")
plt.tight_layout()
def plot_psf_fit(self, size=21, cmap="inferno", c="blueviolet", model=Gaussian2D):
"""Plot a 2D gaussian fit of the global psf (extracted from stack fits)
Parameters
----------
size : int, optional
square size of extracted PSF, by default 21
cmap : str, optional
color map of psf image, by default "inferno"
c : str, optional
color of model plot line, by default "blueviolet"
model : prose.blocks, optional
a PsfFit block, by default Gaussian2D
Returns
-------
dict
PSF fit info (theta, std_x, std_y, fwhm_x, fwhm_y)
"""
psf_fit = model()
image = Image(data=self.stack, stars_coords=self.stars, header=self.xarray.attrs)
psf_fit.run(image)
if len(plt.gcf().get_axes()) == 0:
plt.figure(figsize=(12, 4))
viz.plot_marginal_model(psf_fit.epsf, psf_fit.optimized_model, cmap=cmap, c=c)
return {"theta": image.theta,
"std_x": image.psf_sigma_x,
"std_y": image.psf_sigma_y,
"fwhm_x": image.fwhmx,
"fwhm_y": image.fwhmy }
def plot_star_psf(self,star=None,cutout_size=21,print_values=True,plot=True):
if star is None:
star = self.target
cutout = cutouts(self.stack, [self.stars[star]], size=cutout_size)
psf_fit = Moffat2D(cutout_size=cutout_size)
params = ['fwhmx =', 'fwhmy =', 'theta =']
values = []
for i in range(len(params)):
if print_values is True:
print(params[i], psf_fit(cutout.data[0])[i])
values.append(psf_fit(cutout.data[0])[i])
if plot is True:
viz.plot_marginal_model(psf_fit.epsf, psf_fit.optimized_model)
return values
def plot_rms(self, bins=0.005):
"""Plot binned rms of lightcurves vs the CCD equation
Parameters
----------
bins : float, optional
bin size used to compute error, by default 0.005 (in days)
"""
self._check_diff()
viz.plot_rms(
self.diff_fluxes,
self.lcs,
bins=bins,
target=self.target["id"],
highlights=self.comparison_stars)
def plot_systematics(self, fields=None, ylim=(0.98, 1.02)):
"""Plot systematics measurements along target light curve
Parameters
----------
fields : list of str, optional
list of systematic to include (must be in self), by default None
ylim : tuple, optional
plot ylim, by default (0.98, 1.02)
"""
if fields is None:
fields = ["dx", "dy", "fwhm", "airmass", "sky"]
flux = self.diff_flux.copy()
flux /= np.nanmean(flux)
if ylim is None:
ylim = (flux.nanmin() * 0.99, flux.nanmax() * 1.01)
offset = ylim[1] - ylim[0]
if len(plt.gcf().axes) == 0:
plt.figure(figsize=(5 ,10))
viz.plot(self.time, flux, bincolor="black")
for i, field in enumerate(fields):
if field in self:
scaled_data = self.xarray[field].values.copy()
scaled_data = np.nan_to_num(scaled_data, -1)
scaled_data[scaled_data - np.nanmean(scaled_data) > 5*np.nanstd(scaled_data)] = -1
scaled_data = scaled_data - np.median(scaled_data)
scaled_data = scaled_data / np.std(scaled_data)
scaled_data *= np.std(flux)
scaled_data += 1 - (i + 1) * offset
viz.plot(self.time, scaled_data, bincolor="grey")
plt.annotate(field, (self.time.min() + 0.005, 1 - (i + 1) * offset + offset / 3))
else:
i -= 1
plt.ylim(1 - (i + 1.5) * offset, ylim[1])
plt.title("Systematics", loc="left")
plt.grid(color="whitesmoke")
plt.tight_layout()
def plot_raw_diff(self):
"""Plot raw target flux and differantial flux
"""
plt.subplot(211)
plt.title("Differential lightcurve", loc="left")
self.plot()
plt.grid(color="whitesmoke")
plt.subplot(212)
plt.title("Normalized flux", loc="left")
flux = self.xarray.raw_fluxes.isel(star=self.target, apertures=self.aperture).values
plt.plot(self.time, flux, ".", ms=3, label="target", c="C0")
if 'alc' in self:
plt.plot(self.time, self.xarray.alc.isel(apertures=self.aperture).values*np.median(flux), ".", ms=3, c="k", label="artifical star")
plt.legend()
plt.grid(color="whitesmoke")
plt.xlim([np.min(self.time), np.max(self.time)])
plt.tight_layout()
def plot_precision(self, bins=0.005, aperture=None):
"""Plot observation precision estimate against theorethical error (background noise, photon noise and CCD equation)
Parameters
----------
bins : float, optional
bin size used to estimate error, by default 0.005 (in days)
aperture : int, optional
chosen aperture, by default None
"""
n_bin = int(bins / (np.mean(self.exptime) / (60 * 60 * 24)))
assert len(self.time) > n_bin, "Your 'bins' size is less than the total exposure"
x = self.xarray.isel(apertures=self.aperture if aperture is None else aperture).copy()
fluxes = x.raw_fluxes.values
errors = x.raw_errors.values
mean_fluxes = np.mean(fluxes, axis=1)
mean_errors = np.mean(errors, axis=1)
error_estimate = [np.median(binned_statistic(self.time, f, statistic='std', bins=n_bin)[0]) for f in fluxes]
area = x.apertures_area[0].values
# ccd_equation = phot_prose.telescope.error(
# prose_fluxes, tp_area, np.mean(self.sky), np.mean(self.exptime), np.mean(self.airmass))
ccd_equation = (mean_errors / mean_fluxes)
inv_snr_estimate = error_estimate / mean_fluxes
positive_est = inv_snr_estimate > 0
mean_fluxes = mean_fluxes[positive_est]
inv_snr_estimate = inv_snr_estimate[positive_est]
ccd_equation = ccd_equation[positive_est]
sorted_fluxes_idxs = np.argsort(mean_fluxes)
plt.plot(np.log(mean_fluxes), inv_snr_estimate, ".", alpha=0.5, ms=2, c="k",
label=f"flux rms ({0.005 * (60 * 24):.1f} min bins)")
plt.plot(np.log(mean_fluxes)[sorted_fluxes_idxs], (np.sqrt(mean_fluxes) / mean_fluxes)[sorted_fluxes_idxs],
"--", c="k", label="photon noise", alpha=0.5)
plt.plot(np.log(mean_fluxes)[sorted_fluxes_idxs],
(np.sqrt(np.mean(self.sky) * area) / mean_fluxes)[sorted_fluxes_idxs], c="k", label="background noise",
alpha=0.5)
# plt.plot(np.log(prose_fluxes)[s], (prose_e/prose_fluxes)[s], label="CCD equation")
plt.plot(np.log(mean_fluxes)[sorted_fluxes_idxs], ccd_equation[sorted_fluxes_idxs], label="CCD equation")
plt.legend()
plt.ylim(
0.5 * np.percentile(inv_snr_estimate, 2),
1.5 * np.percentile(inv_snr_estimate, 98))
plt.xlim(np.min(np.log(mean_fluxes)), np.max(np.log(mean_fluxes)))
plt.yscale("log")
plt.xlabel("log(ADU)")
plt.ylabel("$SNR^{-1}$")
plt.title("Photometric precision (raw fluxes)", loc="left")
def plot_meridian_flip(self):
"""Plot meridian flip line over existing axe
"""
if self.meridian_flip is not None:
plt.axvline(self.meridian_flip, c="k", alpha=0.15)
_, ylim = plt.ylim()
plt.text(self.meridian_flip, ylim, "meridian flip ", ha="right", rotation="vertical", va="top", color="0.7")
def plot(self, star=None, meridian_flip=True, bins=0.005, color="k", std=True):
"""Plot observation light curve
Parameters
----------
star : [type], optional
[description], by default None
meridian_flip : bool, optional
whether to show meridian flip, by default True
bins : float, optional
bin size in same unit as Observation.time, by default 0.005
color : str, optional
binned points color, by default "k"
std : bool, optional
whether to see standard deviation of bins as error bar, by default True, otherwise theoretical error bat is shown
"""
super().plot(star=star, bins=bins, color=color, std=std)
if meridian_flip:
self.plot_meridian_flip()
def plot_psf(self, star=None, n=40, zscale=False, aperture=None, rin=None, rout=None):
"""Plot star cutout overalid with aperture and radial flux.
Parameters
----------
star : int or list like, optional
if int: star to plot cutout on, if list like (tuple, np.ndarray) of size 2: coords of cutout, by default None
n : int, optional
cutout width and height, by default 40
zscale : bool, optional
whether to apply a zscale to cutout image, by default False
aperture : float, optional
radius of aperture to display, by default None corresponds to best target aperture
rin : [type], optional
radius of inner annulus to display, by default None corresponds to inner radius saved
rout : [type], optional
radius of outer annulus to display, by default None corresponds to outer radius saved
"""
n /= np.sqrt(2)
if isinstance(star, (tuple, list, np.ndarray)):
x, y = star
else:
if star is None:
star = 0
assert isinstance(star, int), "star must be star coordinates or integer index"
x, y = self.stars[star]
Y, X = np.indices(self.stack.shape)
cutout_mask = (np.abs(X - x + 0.5) < n) & (np.abs(Y - y + 0.5) < n)
inside = np.argwhere((cutout_mask).flatten()).flatten()
radii = (np.sqrt((X - x) ** 2 + (Y - y) ** 2)).flatten()[inside]
idxs = np.argsort(radii)
radii = radii[idxs]
pixels = self.stack.flatten()[inside]
pixels = pixels[idxs]
binned_radii, binned_pixels, _ = fast_binning(radii, pixels, bins=1)
fig = plt.figure(figsize=(9.5, 4))
fig.patch.set_facecolor('xkcd:white')
_ = plt.subplot(1, 5, (1, 3))
plt.plot(radii, pixels, "o", fillstyle='none', c="0.7", ms=4)
plt.plot(binned_radii, binned_pixels, c="k")
plt.xlabel("distance from center (pixels)")
plt.ylabel("ADUs")
_, ylim = plt.ylim()
if "apertures_radii" in self and self.aperture != -1:
apertures = self.apertures_radii[:, 0]
aperture = apertures[self.aperture]
if "annulus_rin" in self:
if rin is None:
rin = self.annulus_rin.mean()
if rout is None:
rout = self.annulus_rout.mean()
if aperture is not None:
plt.xlim(0)
plt.text(aperture, ylim, "APERTURE", ha="right", rotation="vertical", va="top")
plt.axvline(aperture, c="k", alpha=0.1)
plt.axvspan(0, aperture, color="0.9", alpha=0.1)
if rin is not None:
plt.axvline(rin, color="k", alpha=0.2)
if rout is not None:
plt.axvline(rout, color="k", alpha=0.2)
if rin is not None:
plt.axvspan(rin, rout, color="0.9", alpha=0.2)
_ = plt.text(rout, ylim, "ANNULUS", ha="right", rotation="vertical", va="top")
n = np.max([np.max(radii), rout +2 if rout else 0])
plt.xlim(0, n)
ax2 = plt.subplot(1, 5, (4, 5))
im = self.stack[int(y - n):int(y + n), int(x - n):int(x + n)]
if zscale:
im = z_scale(im)
plt.imshow(im, cmap="Greys_r", aspect="auto", origin="lower")
plt.axis("off")
if aperture is not None:
ax2.add_patch(plt.Circle((n, n), aperture, ec='grey', fill=False, lw=2))
if rin is not None:
ax2.add_patch(plt.Circle((n, n), rin, ec='grey', fill=False, lw=2))
if rout is not None:
ax2.add_patch(plt.Circle((n, n), rout, ec='grey', fill=False, lw=2))
ax2.text(0.05, 0.05, f"{star}", fontsize=12, color="white", transform=ax2.transAxes)
plt.tight_layout()
def plot_systematics_signal(self, systematics, signal=None, ylim=None, offset=None, figsize=(6, 7)):
"""Plot a systematics and signal model over diff_flux. systeamtics + signal is plotted on top, signal alone on detrended
data on bottom
Parameters
----------
systematics : np.ndarray
signal : np.ndarray
ylim : tuple, optional
ylim of the plot, by default None, using the dispersion of y
offset : tuple, optional
offset between, by default None
figsize : tuple, optional
figure size as in in plt.figure, by default (6, 7)
"""
viz.plot_systematics_signal(self.time, self.diff_flux, systematics, signal, ylim=ylim, offset=offset,
figsize=figsize)
self.plot_meridian_flip()
plt.legend()
self.xlabel()
plt.ylabel("diff. flux")
plt.tight_layout()
viz.paper_style()
def xlabel(self):
"""Plot xlabel (time) according to its units
"""
plt.xlabel(self.time_format.upper().replace("_", "-"))
def where(self, condition):
"""return filtered observation given a boolean mask of time
Parameters
----------
condition : [type]
[description]
Returns
-------
[type]
[description]
"""
new_obs = self.copy()
new_obs.xarray = new_obs.xarray.sel(time=self.time[condition])
return new_obs
def keep_good_stars(self, lower_threshold=3., upper_threshold=35000., trim=10, keep=None, inplace=True):
"""Keep only stars with a median flux higher than `threshold`*sky.
This action will reorganize stars indexes (target id will be recomputed) and reset the differential fluxes to raw.
Parameters
----------
lower_threshold : float
threshold for which stars with flux/sky > threshold are kept, default is 3
trim : float
value in pixels above which stars are kept, default is 10 to avoid stars too close to the edge
keep : int or list
number of stars to exclude (starting from 0 if int).
inplace: bool
whether to replace current object or return a new one
"""
good_stars = np.argwhere((np.median(self.peaks, 1)/np.median(self.sky) > lower_threshold) & (np.median(self.peaks, 1) < upper_threshold)).squeeze()
mask = np.any(np.abs(self.stars[good_stars] - max(self.stack.shape) / 2) > (max(self.stack.shape) - 2 * trim) / 2, axis=1)
bad_stars = np.argwhere(mask == True).flatten()
final_stars = np.delete(good_stars, bad_stars)
if isinstance(keep,int):
final_stars = np.concatenate([final_stars,np.arange(0,keep+1)],axis=0)
final_stars = np.unique(final_stars)
if isinstance(keep,list):
final_stars = np.concatenate([final_stars,keep ], axis=0)
final_stars = | np.unique(final_stars) | numpy.unique |
#!/usr/bin/env python3
"""Where library module including functions for GNSS modeling
Example:
--------
from where.lib import gnss
...
Description:
------------
This module will provide functions for GNSS modeling.
TODO: How to move routines to Midgard?
========================================
check_satellite_eclipse(dset)
findsun(time) -> Midgard: planetary_motion?
gsdtime_sun(time) -> Midgard: planetary_motion?
get_earth_rotation(dset) -> Midgard: PosVel (see function)
get_code_observation(dset) -> Midgard: gnss
get_flight_time(dset) -> Midgard: PosVel (see function)
obstype_to_freq(sys, obstype) -> Midgard: gnss
get_initial_flight_time(dset, sat_clock_corr=None, rel_clock_corr=None) -> Midgard: gnss
get_line_of_sight(dset) -> Midgard: Position library
get_rinex_file_version(file_key, file_vars) -> Is that needed in the future?
gpssec2jd(wwww, sec) -> in time.gps_ws
Example:
from where.data import time
t = time.Time([2000, 2000, 2000, 2004], [0, 100000, 200000, 86400], fmt="gps_ws", scale="gps")
t.gps_ws
jd2gps(jd) -> in time
linear_combination(dset) -> Midgard: gnss
llh2xyz(lat, lon, h) -> midgard.math.transformation.llh2trs
plot_skyplot(dset) -> Midgard: plot
Should we more specific in using arguments, that instead of using 'dset'? -> Maybe
"""
# Standard library imports
from typing import Tuple
# External library imports
import numpy as np
import matplotlib.pyplot as plt
# Migard imports
from midgard.collections import enums
from midgard.math.constant import constant
# Where imports
from where.lib import config
from where.lib import log
from where.lib import mathp
from where.lib import rotation
from where.data.time import TimeDelta
def check_satellite_eclipse(dset):
"""Check if a satellite is an eclipse
TODO: Check if a better algorithm exists (e.g. based on beta angle).
Args:
dset(Dataset): Model data
"""
cos_gamma = np.einsum(
"ij,ij->i", mathp.unit_vector(dset.sat_posvel.itrs_pos), dset.sat_posvel.itrs_pos_sun
) # TODO: dot product -> better solution dot() function in mathp
h = np.linalg.norm(dset.sat_posvel.itrs_pos, axis=1) * np.sqrt(1.0 - cos_gamma ** 2)
satellites_in_eclipse = list()
for satellite in dset.unique("satellite"):
idx = dset.filter(satellite=satellite)
satellite_eclipse = np.logical_and(cos_gamma[idx] < 0, h[idx] < constant.a)
if np.any(satellite_eclipse == True):
satellites_in_eclipse.append(satellite)
return satellites_in_eclipse
def findsun(time):
"""Obtains the position vector of the Sun in relation to Earth (in ECEF).
This routine is a reimplementation of routine findSun() in model.c of gLAB 3.0.0 software.
Args:
time(Time): Time object
Returns:
numpy.ndarray: Sun position vector given in ECEF [m]
"""
AU = 1.495_978_70e8
gstr, slong, sra, sdec = gsdtime_sun(time)
sun_pos_x = np.cos(np.deg2rad(sdec)) * np.cos(np.deg2rad(sra)) * AU
sun_pos_y = np.cos(np.deg2rad(sdec)) * np.sin(np.deg2rad(sra)) * AU
sun_pos_z = np.sin(np.deg2rad(sdec)) * AU
sun_pos_eci = np.vstack((sun_pos_x, sun_pos_y, sun_pos_z)).T
# Rotate from inertial to non inertial system (ECI to ECEF)
sun_pos_ecef = (rotation.R3(np.deg2rad(gstr)) @ sun_pos_eci.T)[:, :, 0] # remove 1 dimension
return sun_pos_ecef
def gsdtime_sun(time):
"""Get position of the sun (low-precision)
This routine is a reimplementation of routine GSDtime_sun() in model.c of gLAB 3.0.0 software.
Args:
time(Time): Time object
Returns:
tuple: with following entries
=============== =============== ==================================================================================
Elements Type Description
=============== =============== ==================================================================================
gstr numpy.ndarray GMST0 (to go from ECEF to inertial) [deg]
slong numpy.ndarray Sun longitude [deg]
sra numpy.ndarray Sun right Ascension [deg]
sdec numpy.ndarray Sun declination in [deg]
=============== =============== ==================================================================================
"""
jd = time.mjd_int - 15019.5
frac = time.jd_frac
vl = np.mod(279.696_678 + 0.985_647_335_4 * jd, 360)
gstr = np.mod(279.690_983 + 0.985_647_335_4 * jd + 360 * frac + 180, 360)
g = np.deg2rad(np.mod(358.475_845 + 0.985_600_267 * jd, 360))
slong = vl + (1.91946 - 0.004_789 * jd / 36525) * np.sin(g) + 0.020_094 * np.sin(2 * g)
obliq = np.deg2rad(23.45229 - 0.013_012_5 * jd / 36525)
slp = np.deg2rad(slong - 0.005_686)
sind = np.sin(obliq) * np.sin(slp)
cosd = np.sqrt(1 - sind * sind)
sdec = np.rad2deg(np.arctan2(sind, cosd))
sra = 180 - np.rad2deg(np.arctan2(sind / cosd / np.tan(obliq), -np.cos(slp) / cosd))
return gstr, slong, sra, sdec
# TODO: pv.trs.observed - pv.trs # calculate property 'observed' = rotation.R3(rotation_angle[idx]).dot(dset.sat_posvel.itrs_pos[idx])
# def get_earth_rotation(posvel: PositionVelocityArray, flight_time: np.ndarray):
def get_earth_rotation(dset):
"""Get corrections for satellite position and velocity by Earth rotation
In a Earth-fixed reference system the Earth's rotation has to be applied, which accounts for time effect of Earth
rotation during the signal propagates from the satellite to the receiver. Eq. 5.11 in :cite:`subirana2013` is used
for correcting the satellite position and velocity in the Dataset field 'sat_posvel' about the Earth's rotation
effect.
Args:
dset(Dataset): Model data
Returns:
tuple: with following entries
=============== =============== ==================================================================================
Elements Type Description
=============== =============== ==================================================================================
sat_pos numpy.ndarray Satellite position vector corrections in ITRS and [m]
vel_pos numpy.ndarray Satellite velocity corrections in ITRS and [m/s]
=============== =============== ==================================================================================
"""
sat_pos = np.zeros((dset.num_obs, 3))
sat_vel = np.zeros((dset.num_obs, 3))
flight_time = get_flight_time(dset)
rotation_angle = flight_time * constant.omega
sat_pos = (
rotation.R3(rotation_angle) @ dset.sat_posvel.trs.pos.val[:, :, None] - dset.sat_posvel.trs.pos.val[:, :, None]
)
sat_vel = (
rotation.R3(rotation_angle) @ dset.sat_posvel.trs.vel.val[:, :, None] - dset.sat_posvel.trs.vel.val[:, :, None]
)
return sat_pos[:, :, 0], sat_vel[:, :, 0]
def get_code_observation(dset):
"""Get pseudo-range (code) observations depending on given observation types
The first element of the observation type variable `dset.meta['obstypes'][sys]` is selected as observation for
single frequency solution. The order of the observation type variable `dset.meta['obstypes'][sys]` depends on
the priority list given in the configuration file and the given observations.
The ionospheric-free linear combination is applied for dual frequency solution.
Args:
dset: Dataset
Returns:
numpy.ndarray: Pseudo-range (code) observation choosen depending on priority list and for dual frequency
solution given as ionospheric-free linear combination
"""
freq_type = config.tech.freq_type.str
code_obs = np.zeros(dset.num_obs)
if freq_type == "single":
for sys in dset.unique("system"):
idx = dset.filter(system=sys)
obstypes = dset.meta["obstypes"][sys]
# TODO: Exists a better solution. Complete routine should be named get_observation(dset).
if dset.vars["pipeline"] == "gnss_spv":
obstype = None
for type_ in obstypes:
if type_.startswith("D"):
obstype = type_
break
if not obstype:
log.fatal(f"No Doppler observations are defined for {sys}: {', '.join(obstypes)}")
else:
obstype = obstypes[0]
code_obs = dset.obs[obstype][idx]
elif freq_type == "dual":
code_obs, _ = linear_combination("ionosphere-free", dset)
else:
log.fatal(
"Configuration option 'freq_type = {}' is not valid (Note: Triple frequency solution is not " "in use.).",
freq_type,
)
return code_obs
# TODO: Connect needed between station and satellite position
# Already part of Position library: posistion.distance / constant.c
def get_flight_time(dset):
"""Get flight time of GNSS signal between satellite and receiver
Args:
dset(Dataset): Model data
Return:
numpy.ndarray: Flight time of GNSS signal between satellite and receiver in [s]
"""
from where.models.delay import gnss_range # Local import to avoid cyclical import
# Get geometric range between satellite and receiver position
geometric_range = gnss_range.gnss_range(dset)
return geometric_range / constant.c
def obstype_to_freq(sys, obstype):
"""Get GNSS frequency based on given GNSS observation type
Args:
sys(str): GNSS identifier (e.g. 'E', 'G', ...)
obstype(str): Observation type (e.g. 'L1', 'P1', 'C1X', ...)
Return:
float: GNSS frequency in [Hz]
"""
try:
freq = getattr(enums, "gnss_freq_" + sys)[getattr(enums, "gnss_num2freq_" + sys)["f" + obstype[1]]]
except KeyError:
log.fatal(f"Frequency for GNSS '{sys}' and observation type '{obstype}' is not defined.")
return freq
def get_initial_flight_time(dset, sat_clock_corr=None, rel_clock_corr=None):
r"""Get initial flight time of GNSS signal between satellite and receiver
In the following it will be described, how the satellite transmission time is determined. The GNSS receiver
registers the observation time, i.e. when the satellite signal is tracked by the receiver. In addition the
pseudorange :math:`P_r^s` between the satellite and the receiver is observed by the GNSS receiver. The first guess
of time of transmission :math:`t^s` can be determined if we subtract from the receiver time :math:`t_r` the time of
flight of the GNSS signal based on the pseudorange as follows:
.. math::
t_0^s = t_r - \frac{P_r^s}{c}
with the speed of light :math:`c` and the flight time of the GNSS signal fromt the satellite to the receiver
:math:`\frac{P_r^s}{c}`, which is determined in this function.
The time of satellite transmission has to be corrected like:
.. math::
\Delta t^s = t_0^s - \Delta t_{sv} - \Delta t_r,
with the satellite clock correction :math:`\Delta t_{sv}`:
.. math::
\Delta t_{sv} = a_0 + a_1 (t_0^s) + a_2 (t_0^s)^2,
and the relativistic correction due to orbit eccentricity :math:`\Delta t_r`.
The satellite clock correction and the relativistic eccentricity correction are applied, if this information is
already available by the routine call.
Args:
dset (Dataset): Model data.
sat_clock_corr (numpy.ndarray): Satellite clock correction
rel_clock_corr (numpy.ndarray): Relativistic clock correction due to orbit eccentricity corrections for each
observation
Return:
TimeDelta: Flight time of GNSS signal between satellite and receiver
"""
# Note: It can be that the observation table 'obs' is not given. For example if different orbit solutions are
# compared, it is not necessary to read GNSS observation data. In this case the Dataset time entries
# are not corrected for time of flight determined based on pseudorange observations. Instead the given
# Dataset time entries are directly used.
flight_time = np.zeros(dset.num_obs)
if "obs" in dset.fields:
for sys in dset.unique("system"):
# Get code observation type defined by given observation and observation type priority list
# Note: First element of GNSS observation type list should be used.
obstype = dset.meta["obstypes"][sys][0]
log.debug(
f"Code observation '{obstype}' for GNSS '{sys}' is selected for determination of initial flight time."
)
idx = dset.filter(system=sys)
flight_time[idx] = dset.obs[obstype][idx] / constant.c
if sat_clock_corr is not None:
flight_time += sat_clock_corr / constant.c
if rel_clock_corr is not None:
flight_time += rel_clock_corr / constant.c
return TimeDelta(flight_time, fmt="seconds", scale="gps")
# TODO: already in Position via 'direction'
def get_line_of_sight(dset):
"""Get the Line of Sight vector from receiver to satellite in the ITRS.
"""
# TODO: Other solution dset.site_pos.convert_gcrs_to_itrs(dset.site_pos.direction)
return mathp.unit_vector(dset.sat_posvel.itrs_pos - dset.site_pos.itrs)
def get_rinex_file_version(file_key=None, file_vars=None, file_path=None):
""" Get RINEX file version for a given file key
Args:
file_key: File key defined in files.conf file (e.g. given for RINEX navigation or observation file)
file_vars: Variables needed to identify RINEX file based on definition in files.conf file.
file_path (pathlib.PosixPath): File path to broadcast orbit file.
Returns:
tuple: with following elements
=============== ==================================================================================
Elements Description
=============== ==================================================================================
version RINEX file version
filepath RINEX file path
=============== ==================================================================================
"""
file_vars = dict() if file_vars is None else file_vars
if file_path is None:
file_path = config.files.path(file_key, file_vars=file_vars)
with config.files.open_path(file_path, mode="rt") as infile:
try:
version = infile.readline().split()[0]
except IndexError:
log.fatal(f"Could not find Rinex version in file {file_path}")
return version, file_path
def gpssec2jd(wwww, sec):
"""
FUNCTION: gpsSec2jd(wwww,sec)
PURPOSE: Conversion from GPS week and second to Julian Date (JD)
RETURN: (float) jd_day, jd_frac - Julian Day and fractional part
INPUT: (int) wwww, (float) sec - GPS week and second
"""
SEC_OF_DAY = 86400.0
JD_1980_01_06 = 2_444_244 # Julian date of 6-Jan-1980 + 0.5 d
# .. Determine GPS day
wd = np.floor((sec + 43200.0) / 3600.0 / 24.0) # 0.5 d = 43200.0 s
# .. Determine remainder
fracSec = sec + 43200.0 - wd * 3600.0 * 24.0
# .. Conversion GPS week and day to from Julian Date (JD)
jd_day = wwww * 7.0 + wd + JD_1980_01_06
jd_frac = fracSec / SEC_OF_DAY
return jd_day, jd_frac
def jd2gps(jd):
"""
FUNCTION: jd2gps(jd)
PURPOSE: Conversion from Julian Date (JD) to GPS week and day (started 6-Jan-1980).
RETURN: (int) wwww, wd, frac - GPS week, GPS day and fractional part / GPS seconds
INPUT: (float) jd - Julian Date
"""
JD_1980_01_06 = 2_444_244.5 # Julian date of 6-Jan-1980
if np.any(jd < JD_1980_01_06):
log.fatal("Julian Day exceeds the GPS time start date of 6-Jan-1980 (JD 2444244.5).")
# .. Conversion from Julian Date (JD) to GPS week and day
wwww = | np.floor((jd - JD_1980_01_06) / 7) | numpy.floor |
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tqdm import tqdm
import ExpUtils as util
class MFNN:
def __init__(self, config, TF_GPU_USAGE=0.25):
self.config = config
self.SynData = self.config['SynData']
self.dim = self.SynData.dim
self.M = self.SynData.Nfid
self.MfData = self.SynData.data
self.encode = self.config['feature']
# Train/Test Input/Output holders
self.tf_Xtrain_list = []
self.tf_ytrain_list = []
self.tf_Xtest_list = []
self.tf_ytest_list = []
for m in range(self.M):
self.tf_Xtrain_list.append(tf.compat.v1.placeholder(util.tf_type, [None, self.dim]))
self.tf_ytrain_list.append(tf.compat.v1.placeholder(util.tf_type, [None, 1]))
self.tf_Xtest_list.append(tf.compat.v1.placeholder(util.tf_type, [None, self.dim]))
self.tf_ytest_list.append(tf.compat.v1.placeholder(util.tf_type, [None, 1]))
# Linear Mapping Weights
self.tf_Wvar_Chol_list = []
for m in range(self.M):
Km = self.encode['Klist'][m]
scale=1.0 # initialize with smaller values when there are numerical erros
Lm = tf.linalg.band_part(scale*tf.Variable(tf.eye(Km+1), dtype=util.tf_type), -1, 0)
self.tf_Wvar_Chol_list.append(Lm)
#
self.tf_W_list = []
self.tf_Wm_list = []
for m in range(self.M):
Km = self.encode['Klist'][m]
dist_noise = tfp.distributions.MultivariateNormalDiag(loc=tf.zeros([Km+1]), scale_diag=tf.ones([Km+1]))
Wm = tf.Variable(tf.random.truncated_normal([Km+1,1]), dtype=util.tf_type)
self.tf_Wm_list.append(Wm)
self.tf_W_list.append(Wm+self.tf_Wvar_Chol_list[m]@tf.reshape(dist_noise.sample(),[-1,1]))
# noise prior
self.tf_log_gam_a = tf.Variable(-10, dtype=util.tf_type)
self.tf_log_gam_b = tf.Variable(-10, dtype=util.tf_type)
self.noise_gam_prior = tfp.distributions.Gamma(
tf.exp(self.tf_log_gam_a), tf.exp(self.tf_log_gam_b)
)
# noise observations
self.tf_tau_list = []
for m in range(self.M):
logtau_m = tf.Variable(0.0, dtype=util.tf_type)
self.tf_tau_list.append(tf.exp(logtau_m))
# initialize NN
self.mf_encode_list = self.init_feature_encode(self.encode)
# concatenate NN with linear projection
self.mf_outputs, self.mf_aug_features = self.init_mf_outputs(self.tf_Xtrain_list, self.tf_W_list)
self.mf_pred_outputs, self.mf_pred_aug_features = self.init_mf_outputs(self.tf_Xtest_list, self.tf_W_list)
self.expect_llh = self.eval_expect_llh()
self.KL = self.eval_divergence()
# negative evidence lower bound
self.nelbo = -(self.expect_llh - self.KL)
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.config['learning_rate'])
self.minimizer = self.optimizer.minimize(self.nelbo)
self.Xquery = tf.Variable(tf.random.uniform(minval=self.SynData.lb, maxval=self.SynData.ub, shape=[1,self.dim]), dtype=util.tf_type)
self.tf_Ws_list = []
for m in range(self.M):
Km = self.encode['Klist'][m]
self.tf_Ws_list.append(tf.compat.v1.placeholder(util.tf_type, [Km+1, 1]))
self.ws_fstar, self.ws_aug_feature = self.mf_output(self.Xquery, self.M-1, self.tf_Ws_list)
self.nfstar = -tf.squeeze(self.ws_fstar)
self.nfstar_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.nfstar,
method='L-BFGS-B',
var_to_bounds={self.Xquery: [self.SynData.lb, self.SynData.ub]},
var_list=[self.Xquery],
options={'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'eps':self.config['Fstar']['rate'],
'ftol' : 1.0 * np.finfo(float).eps},)
# finding inference maximum
self.Xinfer = tf.Variable(tf.random.uniform(minval=self.SynData.lb, maxval=self.SynData.ub, shape=[1,self.dim]), dtype=util.tf_type)
self.infer_star, self.infer_aug_feature = self.mf_output(self.Xinfer, self.M-1, self.tf_W_list)
self.neg_infer_maximum = -tf.squeeze(self.infer_star)
self.neg_infer_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.neg_infer_maximum,
method='L-BFGS-B',
var_to_bounds={self.Xinfer: [self.SynData.lb, self.SynData.ub]},
var_list=[self.Xinfer],
options={'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'eps':self.config['Infer']['rate'],
'ftol' : 1.0 * np.finfo(float).eps},)
gpu_options =tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=TF_GPU_USAGE)
self.sess = tf.compat.v1.Session(
config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
gpu_options=gpu_options,
)
)
self.sess.run(tf.compat.v1.global_variables_initializer())
def init_feature_encode(self, encode):
"""Initialize the feature encoding(NN) weights and biases"""
feature_encode_list = []
for m in range(self.M):
if m == 0:
layers = [self.dim] + encode['hlayers'][m] + [encode['Klist'][m]]
else:
layers = [self.dim+1] + encode['hlayers'][m] + [encode['Klist'][m]]
# end if
nn = util.EncodeNN(layers, init=encode['init'], activation=encode['activation'])
feature_encode_list.append(nn)
# end for
return feature_encode_list
def mf_output(self, X, m, Wlist):
# base fidelity
feature = self.mf_encode_list[0].forward(X)
augment_feature = tf.pad(feature, tf.constant([[0,0],[0,1]]), constant_values=1.0)
output = tf.matmul(augment_feature, Wlist[0])
for l in range(1, m+1):
augment_input = tf.concat([output, X], axis=1)
feature = self.mf_encode_list[l].forward(augment_input)
augment_feature = tf.pad(feature, tf.constant([[0,0],[0,1]]), constant_values=1.0)
output = tf.matmul(augment_feature, Wlist[l])
# end for
return output, augment_feature
def init_mf_outputs(self, Xlist, Wlist):
outputs = []
features = []
for m in range(self.M):
output, feature = self.mf_output(Xlist[m], m, Wlist)
outputs.append(output)
features.append(feature)
return outputs, features
def eval_divergence(self):
expect = []
for m in range(self.M):
Km = self.encode['Klist'][m]
Lm = self.tf_Wvar_Chol_list[m]
mu = self.tf_W_list[m]
log_det_Lm = -0.5*tf.reduce_sum(tf.math.log(tf.square(tf.linalg.diag_part(Lm))))
log_expect_m = -0.5*(Km+1)*tf.math.log(2*np.pi) -\
0.5*(tf.linalg.trace(tf.matmul(Lm, tf.transpose(Lm))) + tf.reduce_sum(mu*mu))
expect.append(log_det_Lm - log_expect_m)
return tf.add_n(expect)
def eval_expect_llh(self):
expect = []
Nlist = self.config['SynData'].Ntrain_list
for m in range(self.M):
Nm = Nlist[m]
phi_m = self.mf_aug_features[m]
mu_m = self.tf_W_list[m]
Lm = self.tf_Wvar_Chol_list[m]
tau_m = self.tf_tau_list[m]
ym = self.tf_ytrain_list[m]
LmLmT = tf.matmul(Lm, tf.transpose(Lm))
mumuT = tf.matmul(mu_m, tf.transpose(mu_m))
tr_phi_Lm_mu = tf.linalg.trace(tf.transpose(phi_m) @ phi_m @ (LmLmT + mumuT))
ym_phi_mu = tf.squeeze(ym*ym - 2*ym*tf.matmul(phi_m, mu_m))
expect_m = 0.5*Nm*tf.math.log(tau_m) - 0.5*tau_m*(tf.reduce_sum(ym_phi_mu) + tr_phi_Lm_mu) +\
self.noise_gam_prior.log_prob(tau_m)
expect.append(expect_m)
# end for
return tf.add_n(expect)
def train(self):
hist_train_err = []
hist_test_err = []
fdict = {}
for m in range(self.M):
Dm = self.MfData[m]
fdict[self.tf_Xtrain_list[m]] = Dm['Xtrain']
fdict[self.tf_ytrain_list[m]] = Dm['ytrain']
fdict[self.tf_Xtest_list[m]] = Dm['Xtest']
for it in tqdm(range(self.config['epochs'] + 1)):
self.sess.run(self.minimizer, feed_dict = fdict)
if it % 100 == 0:
nelbo = self.sess.run(self.nelbo, feed_dict=fdict)
mf_pred = self.sess.run(self.mf_pred_outputs, feed_dict=fdict)
mf_pred_train = self.sess.run(self.mf_outputs, feed_dict=fdict)
if self.config['verbose']:
print('it %d: nelbo = %.5f' % (it, nelbo))
for m in range(self.M):
pred_m = mf_pred[m]
pred_m_train = mf_pred_train[m]
ground_ytest = self.MfData[m]['ytest']
ground_ytrain = self.MfData[m]['ytrain']
err_test = np.sqrt(np.mean(np.square(pred_m - ground_ytest)))
err_train = np.sqrt(np.mean(np.square(pred_m_train - ground_ytrain)))
hist_train_err.append(err_train)
hist_test_err.append(err_test)
if self.config['verbose'] or it == self.config['epochs']:
print(' - fid %d: train_nrmse = %.5f, test_nrmse = %.5f' % (m, err_train, err_test))
Fstar, Xstar = self.collect_fstar()
infer_opt, infer_optser = self.eval_infer_opt()
return Fstar, infer_optser, self.sess
def collect_fstar(self):
Wpost = []
Lpost = []
for m in range(self.M):
Wpost.append(self.sess.run(self.tf_W_list[m]))
Lpost.append(self.sess.run(self.tf_Wvar_Chol_list[m]))
Fstar = []
Xstar = []
for s in range(self.config['Fstar']['Ns']):
fdict = {}
for m in range(self.M):
Ws = np.random.multivariate_normal(np.squeeze(Wpost[m]), np.matmul(Lpost[m], Lpost[m].T))
fdict[self.tf_Ws_list[m]] = Ws.reshape([-1,1])
Fs = []
Xs = []
for t in range(self.config['Fstar']['RandomStart']):
self.sess.run(tf.compat.v1.variables_initializer(var_list=[self.Xquery]))
self.nfstar_optimizer.minimize(self.sess, feed_dict=fdict)
fstar = -self.sess.run(self.nfstar, feed_dict=fdict)
xstar = self.sess.run(self.Xquery, feed_dict=fdict)
Fs.append(fstar)
Xs.append(xstar)
argx = np.argmax(np.array(Fs))
Fstar.append(Fs[argx])
Xstar.append(Xs[argx])
return Fstar, Xstar
def eval_infer_opt(self):
infer_opt = []
infer_optser = []
for t in range(self.config['Infer']['RandomStart']):
self.sess.run(tf.compat.v1.variables_initializer(var_list=[self.Xinfer]))
self.neg_infer_optimizer.minimize(self.sess)
infer_fstar = -self.sess.run(self.neg_infer_maximum)
infer_xstar = self.sess.run(self.Xinfer)
infer_opt.append(infer_fstar)
infer_optser.append(infer_xstar)
argx = np.argmax( | np.array(infer_opt) | numpy.array |
# flake8: noqa: F821,F841
import itertools
import re
from typing import Optional, Union
import numpy as np
import pytest
import torch
from numpy.random import RandomState
import triton
import triton._C.libtriton.triton as _triton
import triton.language as tl
from triton.code_gen import JITFunction, TensorWrapper, reinterpret
int_dtypes = ['int8', 'int16', 'int32', 'int64']
uint_dtypes = ['uint8', 'uint16', 'uint32', 'uint64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + uint_dtypes + float_dtypes
def _bitwidth(dtype: str) -> int:
# ex.: "int64" -> 64
return int(re.search(r'(\d+)$', dtype).group(1))
def numpy_random(shape, dtype_str, rs: Optional[RandomState] = None, low=None, high=None):
"""
Override `rs` if you're calling this function twice and don't want the same
result for both calls.
"""
if isinstance(shape, int):
shape = (shape, )
if rs is None:
rs = RandomState(seed=17)
dtype = getattr(np, dtype_str)
if dtype_str in int_dtypes + uint_dtypes:
iinfo = np.iinfo(getattr(np, dtype_str))
low = iinfo.min if low is None else max(low, iinfo.min)
high = iinfo.max if high is None else min(high, iinfo.max)
x = rs.randint(low, high, shape, dtype=dtype)
x[x == 0] = 1 # Hack. Never return zero so tests of division don't error out.
return x
elif dtype_str in float_dtypes:
return rs.normal(0, 1, shape).astype(dtype)
else:
raise RuntimeError(f'Unknown dtype {dtype_str}')
def to_triton(x: np.ndarray, device='cuda') -> Union[TensorWrapper, torch.Tensor]:
t = x.dtype.name
if t in uint_dtypes:
signed_type_name = t.lstrip('u') # e.g. "uint16" -> "int16"
x_signed = x.astype(getattr(np, signed_type_name))
return reinterpret(torch.tensor(x_signed, device=device), getattr(tl, t))
else:
return torch.tensor(x, device=device)
def torch_dtype_name(dtype) -> str:
if isinstance(dtype, triton.language.dtype):
return dtype.name
elif isinstance(dtype, torch.dtype):
# 'torch.int64' -> 'int64'
m = re.match(r'^torch\.(\w+)$', str(dtype))
return m.group(1)
else:
raise TypeError(f'not a triton or torch dtype: {type(dtype)}')
def to_numpy(x):
if isinstance(x, TensorWrapper):
return x.base.cpu().numpy().astype(getattr(np, torch_dtype_name(x.dtype)))
elif isinstance(x, torch.Tensor):
return x.cpu().numpy()
else:
raise ValueError(f"Not a triton-compatible tensor: {x}")
def patch_kernel(template, to_replace):
kernel = triton.JITFunction(template.fn)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
@pytest.mark.parametrize("dtype_x", [dtype_x for dtype_x in dtypes])
def test_empty_kernel(dtype_x, device='cuda'):
SIZE = 128
@triton.jit
def kernel(X, SIZE: tl.constexpr):
pass
x = to_triton(numpy_random(SIZE, dtype_str=dtype_x), device=device)
kernel[(1, )](x, SIZE=SIZE, num_warps=4)
# generic test functions
def _test_unary(dtype_x, expr, numpy_expr=None, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = numpy_random(SIZE, dtype_str=dtype_x)
if 'log' in expr:
x = np.abs(x) + 0.01
# reference result
z_ref = eval(expr if numpy_expr is None else numpy_expr)
# triton result
x_tri = to_triton(x, device=device)
z_tri = to_triton(np.empty_like(z_ref), device=device)
kernel[(1, )](z_tri, x_tri, SIZE=SIZE, num_warps=4)
# compare
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
def _binary_op_dtype_override(a: str, b: str) -> Optional[np.dtype]:
"""
Given two dtype strings, returns the numpy dtype Triton thinks binary
operations on the two types should return. Returns None if the return value
matches numpy. This is generally needed because Triton and pytorch return
narrower floating point types than numpy in mixed operations, and because
Triton follows C/C++ semantics around mixed signed/unsigned operations, and
numpy/pytorch do not.
"""
overrides = {
('float16', 'int16'): np.float16,
('float16', 'int32'): np.float16,
('float16', 'int64'): np.float16,
('float16', 'uint16'): np.float16,
('float16', 'uint32'): np.float16,
('float16', 'uint64'): np.float16,
('int8', 'uint8'): np.uint8,
('int8', 'uint16'): np.uint16,
('int8', 'uint32'): np.uint32,
('int8', 'uint64'): np.uint64,
('int16', 'uint16'): np.uint16,
('int16', 'uint32'): np.uint32,
('int16', 'uint64'): np.uint64,
('int32', 'uint32'): np.uint32,
('int32', 'uint64'): np.uint64,
('int64', 'uint64'): np.uint64,
}
key = (a, b) if a < b else (b, a)
return overrides.get(key)
def _test_binary(dtype_x, dtype_y, expr, numpy_expr=None, mode_x='real', mode_y='real', device='cuda', y_low=None, y_high=None):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
rs = RandomState(17)
x = numpy_random(SIZE, dtype_str=dtype_x, rs=rs)
y = numpy_random(SIZE, dtype_str=dtype_y, rs=rs, low=y_low, high=y_high)
if mode_x == 'nan':
x[:] = float('nan')
if mode_y == 'nan':
y[:] = float('nan')
# reference result
z_ref = eval(expr if numpy_expr is None else numpy_expr)
dtype_z = _binary_op_dtype_override(dtype_x, dtype_y)
if dtype_z is not None:
z_ref = z_ref.astype(dtype_z)
# triton result
x_tri = to_triton(x, device=device)
y_tri = to_triton(y, device=device)
z_tri = to_triton(np.empty(SIZE, dtype=z_ref.dtype), device=device)
kernel[(1, )](z_tri, x_tri, y_tri, SIZE=SIZE, num_warps=4)
np.testing.assert_allclose(z_ref, to_numpy(z_tri), err_msg=expr, rtol=0.01)
def _mod_operation_ill_conditioned(dtype_x, dtype_y) -> bool:
# The result of x % y is ill-conditioned if x % y is much smaller than x.
# pytorch/CUDA has slightly different (probably better) rounding on
# remainders than stock LLVM. We currently don't expect to match it
# bit-for-bit.
return (dtype_x, dtype_y) in [
('int32', 'float16'),
('int32', 'float32'),
('int64', 'float16'),
('int64', 'float32'),
('int64', 'float64'),
('uint16', 'float16'),
('uint16', 'float32'),
('uint32', 'float16'),
('uint32', 'float32'),
('uint64', 'float16'),
('uint64', 'float32'),
('uint64', 'float64'),
]
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['+', '-', '*', '/', '%']
for dtype_x in dtypes
for dtype_y in dtypes
])
def test_bin_op(dtype_x, dtype_y, op, device='cuda'):
expr = f' x {op} y'
if op == '%' and dtype_x in int_dtypes + uint_dtypes and dtype_y in int_dtypes + uint_dtypes:
# LLVM has 'numpy.fmod', not 'numpy.remainder', semantics on integer remainders.
numpy_expr = 'np.fmod(x, y)'
elif op in ('/', '%') and dtype_x in ('int16', 'float16') and dtype_y in ('int16', 'float16'):
# Triton promotes 16-bit floating-point / and % to 32-bit because there
# are no native div or FRem operations on float16. Since we have to
# convert anyway, we may as well take the accuracy bump.
numpy_expr = f'x.astype(np.float32) {op} y.astype(np.float32)'
elif (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
if op == '%' and _mod_operation_ill_conditioned(dtype_x, dtype_y):
with pytest.raises(AssertionError, match='Not equal to tolerance'):
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
elif (op in ('%', '/') and
((dtype_x in int_dtypes and dtype_y in uint_dtypes) or
(dtype_x in uint_dtypes and dtype_y in int_dtypes))):
with pytest.raises(triton.code_gen.CompilationError) as exc_info:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
assert re.match('Cannot use .* because they have different signedness', str(exc_info.value.__cause__))
else:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
@pytest.mark.parametrize("dtype_x, dtype_y",
[(dtype_x, dtype_y) for dtype_x in int_dtypes for dtype_y in int_dtypes] +
[(dtype_x, dtype_y) for dtype_x in uint_dtypes for dtype_y in uint_dtypes]
)
def test_floordiv(dtype_x, dtype_y, device='cuda'):
# Triton has IEEE, not numpy/torch, semantics for %, and those carry
# through to //, so we have to use a nonstandard expression to get a
# reference result for //.
expr = 'x // y'
numpy_expr = '((x - np.fmod(x, y)) / y)'
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['&', '|', '^']
for dtype_x in dtypes
for dtype_y in dtypes
])
def test_bitwise_op(dtype_x, dtype_y, op, device='cuda'):
expr = f'x {op} y'
if (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
if 'float' in dtype_x + dtype_y:
with pytest.raises(triton.code_gen.CompilationError) as exc_info:
_test_binary(dtype_x, dtype_y, expr, numpy_expr='np.array([])', device=device)
# The CompilationError must have been caused by a C++ exception with this text.
assert re.match('invalid operands of type', str(exc_info.value.__cause__))
else:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['<<', '>>']
for dtype_x in int_dtypes + uint_dtypes
for dtype_y in int_dtypes + uint_dtypes
])
def test_shift_op(dtype_x, dtype_y, op, device='cuda'):
expr = f'x {op} y'
bw = max(_bitwidth(dtype_x), _bitwidth(dtype_y))
dtype_z = f'uint{bw}'
numpy_expr = f'x.astype(np.{dtype_z}) {op} y.astype(np.{dtype_z})'
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device, y_low=0, y_high=65)
# ---------------
# test compare ops
# ---------------
ops = ['==', '!=', '>', '<', '>=', '<=']
@pytest.mark.parametrize("dtype_x, dtype_y, op, mode_x, mode_y",
# real
[
(dtype_x, dtype_y, op, 'real', 'real')
for op in ops
for dtype_x in dtypes
for dtype_y in dtypes
] +
# NaNs
[('float32', 'float32', op, mode_x, mode_y)
for op in ops
for mode_x, mode_y in [('nan', 'real'),
('real', 'nan'),
('nan', 'nan')]
])
def test_compare_op(dtype_x, dtype_y, op, mode_x, mode_y, device='cuda'):
expr = f'x {op} y'
if (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
_test_binary(dtype_x, dtype_y, expr, numpy_expr, mode_x=mode_x, mode_y=mode_y, device=device)
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, ' -x') for dtype_x in dtypes
] + [
(dtype_x, ' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device='cuda'):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
# @pytest.mark.paramterize("expr", [
# 'exp', 'log', 'cos', 'sin'
# ])
@pytest.mark.parametrize("expr", [
'exp', 'log', 'cos', 'sin'
])
def test_math_op(expr, device='cuda'):
_test_unary('float32', f'tl.{expr}(x)', f'np.{expr}(x) ', device=device)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
@pytest.mark.parametrize("expr, dtype_str", [
(f'x[{s}]', d)
for s in ['None, :', ':, None', 'None, :, :', ':, :, None']
for d in ['int32', 'uint32', 'uint16']
])
def test_index1d(expr, dtype_str, device='cuda'):
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, SIZE: tl.constexpr):
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
kernel = patch_kernel(kernel, to_replace)
# torch result
x = numpy_random(shape_x, dtype_str=dtype_str)
y = np.zeros(shape_z, dtype=getattr(np, dtype_str))
z_ref = eval(expr) + y
# triton result
z_tri = to_triton(np.empty_like(z_ref), device=device)
x_tri = to_triton(x)
kernel[(1, )](z_tri, x_tri, num_warps=1, SIZE=shape_x[0])
# compare
assert (z_ref == to_numpy(z_tri)).all()
# ---------------
# test tuples
# ---------------
@triton.jit
def fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples():
device = 'cuda'
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x_str, mode", itertools.chain.from_iterable([
[
('add', 'float16', mode),
('add', 'uint32', mode), ('add', 'int32', mode), ('add', 'float32', mode),
('max', 'uint32', mode), ('max', 'int32', mode), ('max', 'float32', mode),
('min', 'uint32', mode), ('min', 'int32', mode), ('min', 'float32', mode),
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']]))
def test_atomic_rmw(op, dtype_x_str, mode, device='cuda'):
n_programs = 5
# triton kernel
@triton.jit
def kernel(X, Z):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x)'})
numpy_op = {'add': np.sum, 'max': np.max, 'min': np.min}[op]
max_neutral = float('-inf') if dtype_x_str in float_dtypes else np.iinfo(getattr(np, dtype_x_str)).min
min_neutral = float('inf') if dtype_x_str in float_dtypes else np.iinfo(getattr(np, dtype_x_str)).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
rs = RandomState(17)
x = numpy_random((n_programs, ), dtype_str=dtype_x_str, rs=rs)
if mode == 'all_neg':
x = -np.abs(x)
if mode == 'all_pos':
x = np.abs(x)
if mode == 'min_neg':
idx = rs.randint(n_programs, size=(1, )).item()
x[idx] = -np.max(np.abs(x)) - 1
if mode == 'max_pos':
idx = rs.randint(n_programs, size=(1, )).item()
x[idx] = np.max(np.abs(x)) + 1
x_tri = to_triton(x, device=device)
z_tri = to_triton(np.array([neutral], dtype=getattr(np, dtype_x_str)), device=device)
kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = numpy_op(x).astype(getattr(np, dtype_x_str))
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == to_numpy(z_tri).item()
else:
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False)
for dtype_x in dtypes
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True),
] + [
(f'uint{x}', f'int{x}', True) for x in [8, 16, 32, 64]
] + [
(f'int{x}', f'uint{x}', True) for x in [8, 16, 32, 64]
])
def test_cast(dtype_x, dtype_z, bitcast, device='cuda'):
# This is tricky because numpy doesn't have bfloat, and torch doesn't have uints.
x0 = 43 if dtype_x in int_dtypes else 43.5
if dtype_x.startswith('bfloat'):
x_tri = torch.tensor([x0], dtype=getattr(torch, dtype_x), device=device)
else:
x = np.array([x0], dtype=getattr(np, dtype_x))
x_tri = to_triton(x)
# triton kernel
@triton.jit
def kernel(X, Z, BITCAST: tl.constexpr):
x = tl.load(X)
z = x.to(Z.dtype.element_ty, bitcast=BITCAST)
tl.store(Z, z)
# triton result
if dtype_z.startswith('bfloat'):
z_tri = torch.empty((1,), dtype=getattr(torch, dtype_z), device=device)
else:
z_tri = to_triton(np.empty((1, ), dtype=getattr(np, dtype_z)), device=device)
kernel[(1, )](x_tri, z_tri, BITCAST=bitcast)
# torch result
if dtype_z.startswith('bfloat') or dtype_x.startswith('bfloat'):
assert bitcast is False
z_ref = x_tri.to(z_tri.dtype)
assert z_tri == z_ref
else:
if bitcast:
z_ref = x.view(getattr(np, dtype_z))
else:
z_ref = x.astype(getattr(np, dtype_z))
assert to_numpy(z_tri) == z_ref
def test_f8_f16_roundtrip():
"""Tests that converting an f8 to f16 and back to f8 doesn't change its value"""
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
f8_tensor = torch.tensor(range(-128, 128), dtype=torch.int8, device='cuda')
f8 = triton.reinterpret(f8_tensor, tl.float8)
n_elements = f8_tensor.numel()
f16 = torch.empty_like(f8_tensor, dtype=torch.float16)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
copy_kernel[grid](f8, f16, n_elements, BLOCK_SIZE=1024)
f8_output_tensor = torch.empty_like(f16, dtype=torch.int8)
f8_output = triton.reinterpret(f8_output_tensor, tl.float8)
copy_kernel[grid](f16, f8_output, n_elements, BLOCK_SIZE=1024)
assert torch.all(f8_tensor == f8_output_tensor)
def test_f16_to_f8_rounding():
"""Takes all float16s, converts them to float8 and back to float16. Checks that the absolute
error is the minimum over all float8.
Or the same explanation a bit mathier:
for all f16 |f16 - fromf8(tof8(f16))| == min over all f8 |f16 - fromf8(f8)|"""
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
# torch.view with a dtype isn't supported in triton's torch yet so use numpy's view
f16_input_np = (
np.array(
range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=np.int16,
)
.view(np.float16)
)
f16_input = torch.tensor(f16_input_np, dtype=torch.float16, device='cuda')
n_elements = f16_input.numel()
f8_output_tensor = torch.empty_like(f16_input, dtype=torch.int8)
f8_output = triton.reinterpret(f8_output_tensor, tl.float8)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
copy_kernel[grid](f16_input, f8_output, n_elements, BLOCK_SIZE=1024)
f16_output = torch.empty_like(f16_input, dtype=torch.float16)
copy_kernel[grid](f8_output, f16_output, n_elements, BLOCK_SIZE=1024)
abs_error = torch.abs(f16_input - f16_output)
all_f8_vals_tensor = torch.tensor(range(2 ** 8), dtype=torch.uint8, device='cuda')
all_f8_vals = triton.reinterpret(all_f8_vals_tensor, tl.float8)
all_f8_vals_in_f16 = torch.empty_like(all_f8_vals_tensor, dtype=torch.float16)
copy_kernel[grid](all_f8_vals, all_f8_vals_in_f16, n_elements=256, BLOCK_SIZE=1024)
all_finite_f8_vals_in_f16 = all_f8_vals_in_f16[
torch.isfinite(all_f8_vals_in_f16)
]
min_error = torch.min(
torch.abs(
f16_input.reshape((-1, 1))
- all_finite_f8_vals_in_f16.reshape((1, -1))
),
dim=1,
)[0]
# 1.9375 is float8 max
mismatch = torch.logical_and(
abs_error != min_error, torch.logical_and(torch.isfinite(f16_input), torch.abs(f16_input) < 1.9375)
)
assert torch.all(
torch.logical_not(mismatch)
), f"f16_input[mismatch]={f16_input[mismatch]} f16_output[mismatch]={f16_output[mismatch]} abs_error[mismatch]={abs_error[mismatch]} min_error[mismatch]={min_error[mismatch]}"
# ---------------
# test reduce
# ---------------
@pytest.mark.parametrize("dtype_str, shape",
[(dtype, shape)
for dtype in dtypes
for shape in [128, 512]])
def test_reduce1d(dtype_str, shape, device='cuda'):
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.store(Z, tl.sum(x, axis=0))
rs = RandomState(17)
x = numpy_random((shape,), dtype_str=dtype_str, rs=rs)
# numpy result
z_ref = np.sum(x).astype(getattr(np, dtype_str))
# triton result
x_tri = to_triton(x, device=device)
z_tri = to_triton(numpy_random((1,), dtype_str=dtype_str, rs=rs), device=device)
kernel[(1,)](x_tri, z_tri, BLOCK=shape)
# compare
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
reduce_configs1 = [
(dtype, (1, 1024), axis) for dtype in ['float32', 'uint32']
for axis in [1]
]
reduce_configs2 = [
('float32', shape, 1) for shape in [(2, 32), (4, 128), (32, 64), (64, 128), (128, 256), (32, 1024)]
]
@pytest.mark.parametrize("dtype_str, shape, axis", reduce_configs1 + reduce_configs2)
def test_reduce2d(dtype_str, shape, axis, device='cuda'):
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, AXIS: tl.constexpr):
range_m = tl.arange(0, BLOCK_M)
range_n = tl.arange(0, BLOCK_N)
x = tl.load(X + range_m[:, None] * BLOCK_N + range_n[None, :])
z = tl.sum(x, axis=AXIS)
tl.store(Z + range_m, z)
# input
x = numpy_random(shape, dtype_str=dtype_str)
# triton result
x_tri = to_triton(x)
z_tri = to_triton(np.empty((shape[0],), dtype=getattr(np, dtype_str)), device=device)
kernel[(1,)](x_tri, z_tri, BLOCK_M=shape[0], BLOCK_N=shape[1], AXIS=axis)
# numpy reference result
z_ref = np.sum(x, axis=axis).astype(x.dtype)
# compare
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
# ---------------
# test permute
# ---------------
@pytest.mark.parametrize("dtype_str, shape, perm",
[(dtype, shape, perm)
for dtype in ['float32']
for shape in [(128, 128)]
for perm in [(1, 0)]])
def test_permute(dtype_str, shape, perm, device='cuda'):
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xn,
Z, stride_zm, stride_zn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * stride_xn
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
tl.store(Zs, tl.load(Xs))
# input
x = numpy_random(shape, dtype_str=dtype_str)
# triton result
z_tri = to_triton( | np.empty_like(x) | numpy.empty_like |
# -*- coding: UTF-8 -*
""" Transform the classification score (especially the one of the SVM) """
import numpy
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.prediction_vector import PredictionVector
class EmptyBinException(Exception): pass
class PlattsSigmoidFitNode(BaseNode):
""" Map prediction scores to probability estimates with a sigmoid fit
This node uses a sigmoid fit to map a prediction score to a class
probability estimate, i.e. a value between 0 and 1, where e.g. 0.5 means
50% probability of the positive class which must not necessarily correspond
to a SVM score of 0.
For more information see 'Probabilistic Outputs for Support Vector
Machines and Comparisons to Regularized Likelihood Methods' (Platt) 1999.
The parametric form of the sigmoid function is:
.. math::
P(c|x) =\\text{appr } P_{A,B}(s(x)) = \\frac{1}{1+e^{As(x)+B}}
where c is the actual class, x the data, s(x) the prediction score and
A and B are calculated through the training examples.
.. note:: Learning this transformation on the same training data
than the classifier was trained is not recommended
for non-linear kernels (due to over-fitting).
The best parameter setting z*=(A*,B*) is determined by solving the
following regularized maximum likelihood problem:
.. math::
\\min F(z)= - \\sum_{i=1}^l{(t_i \\log(p_i) + (1-t_i) \\log(1-p_i))},
for :math:`p_i=P_{A,B}(s(x_i))` and :math:`t_i` are target probabilities defined according
to *priors* and :math:`c_i`.
The implementation is improved to ensure convergence and to avoid numerical
difficulties (see 'A Note on Platt's Probabilistic Outputs for Support
Vector Machines' (HT Lin, RC Weng) 2007).
**Parameters**
:priors:
A tuple that consists the number of examples expected for
each class (first element negative class, second element
positive class). If the parameter is not specified, the numbers in
the training set are used.
(*optional, default: None*)
:class_labels:
Determines the order of classes, i.e. the mapping of class labels
onto integers. The first element of the list should be the negative
class, the second should be the positive class.
If this parameter is not specified, the order is determined based on
the order of occurrence in the training data (which is more or less
arbitrary).
(*optional, default: []*)
:oversampling:
If True different class distributions are balanced by oversampling
and random drawing where appropriate (if the overrepresented class
is not divisible by the underrepresented class).
(*optional, default: False*)
:store_plots:
If True 'reliable diagrams' of the training and test data are stored.
A discretization of the scores is made to calculate empirical
probabilities. The number of scores per bin is displayed on every
data point in the figure and shows how accurate the estimate
is (the higher the number the better). If the fit is reliable the
empirical probabilities should scatter around the diagonal in the
right plots. Although the store variable is set to True if this
variable is set.
:store_probabilities:
If True the calculated probability and the corresponding label for
each prediction is pickeled and saved in the results directory.
Although the store variable is set to True if this variable is set.
(*optional, default: False*)
:store:
If True store_plots and store_probabilities are set to True.
This is the "simple" way to store both the plots and the
probabilities.
**Exemplary Call**
.. code-block:: yaml
-
node : PSF
parameters :
class_labels : ['Target','Standard']
"""
def __init__(self, priors = None, class_labels = [], oversampling = False,
store_plots = False, store_probabilities = False, **kwargs):
super(PlattsSigmoidFitNode, self).__init__(**kwargs)
if ( store_plots or store_probabilities ):
self.store = True
elif(self.store):
store_plots = True
store_probabilities = True
self.set_permanent_attributes(priors = priors,
class_labels = class_labels,
oversampling = oversampling,
scores = [],
labels = [],
probabilities = [],
store_plots = store_plots,
store_probabilities = store_probabilities)
def is_trainable(self):
return True
def is_supervised(self):
return True
def _train(self, data, class_label):
""" Collect SVM output and true labels. """
self._train_phase_started = True
self.scores.append(data.prediction)
if class_label not in self.class_labels:
self.class_labels.append(class_label)
self.labels.append(self.class_labels.index(class_label))
def _stop_training(self):
""" Compute parameter A and B for sigmoid fit."""
def func_value(s,t,a,b):
""" Compute the function value avoiding 'catastrophic cancellation'.
-(t_i log(p_i) + (1-t_i)log(1- p_i))
= t_i log(1+exp(as_i+b)) + (t_i-1)((as_i+b)-log(1+exp(as_i+b)))
= t_i log(1+exp(as_i+b)) + (t_i-1)(as_i+b) -
t_i log(1+exp(as_i+b)) + log(1+exp(as_i+b))
= (t_i-1)(as_i+b) + log(1+exp(as_i+b)) *
= t_i(as_i+b) + log(exp(-as_i-b)) + log(1+exp(as_i+b))
= t_i(as_i+b) + log((1+exp(as_i+b))/exp(as_i+b))
= t_i(as_i+b) + log(1+exp(-as_i-b)) **
"""
fapb = s*a+b
# * is used if fapb[i]<0, ** otherwise
f = sum([(t[i]-1)*fapb[i] + numpy.log1p(numpy.exp(fapb[i])) if fapb[i]<0 \
else t[i]*fapb[i] + numpy.log1p(numpy.exp(-fapb[i])) \
for i in range(len(fapb))])
return f
self._log("Performing training of sigmoid mapping.")
if self.oversampling:
# first assume that the negative class is the overrepresented class
overrepresented_inst = [score for score,label in \
zip(self.scores,self.labels) if label==0]
underrepresented_inst = [score for score,label in \
zip(self.scores,self.labels) if label==1]
if len(overrepresented_inst) != len(underrepresented_inst):
# check if assumption was correct
if len(overrepresented_inst) < len(underrepresented_inst):
tmp = overrepresented_inst
overrepresented_inst = underrepresented_inst
underrepresented_inst = tmp
oversampling_factor = len(overrepresented_inst) / \
len(underrepresented_inst)
self.scores.extend((oversampling_factor-1)*underrepresented_inst)
self.labels.extend((oversampling_factor-1) * \
len(underrepresented_inst) * range(1,2))
if len(overrepresented_inst) % len(underrepresented_inst) != 0:
num_draw_random = len(overrepresented_inst) - \
oversampling_factor * len(underrepresented_inst)
# Randomizer has to be fixed for reproducibility
import random
randomizer = random.Random(self.run_number)
for i in range(num_draw_random):
selected_score=randomizer.choice(underrepresented_inst)
self.scores.append(selected_score)
underrepresented_inst.remove(selected_score)
self.labels.extend(num_draw_random * range(1,2))
if self.priors == None:
self.priors = (self.labels.count(0),self.labels.count(1))
self.scores = numpy.array(self.scores)
# Parameter settings
maxiter = 100 # Maximum number of iterations
minstep = 1.0e-10 # Minimum step taken in line search
sigma = 1.0e-12 # Set to any value > 0
# Construct initial value: target support in array targets,
hiTarget = (self.priors[1]+1.0)/(self.priors[1]+2.0)
loTarget = 1/(self.priors[0]+2.0)
h = (loTarget,hiTarget)
targets = numpy.array([h[index] for index in self.labels])
# initial function value in fval
self.A = 0
self.B = numpy.log((self.priors[0]+1.0)/(self.priors[1]+1.0))
fval = func_value(self.scores,targets,self.A,self.B)
for it in range(maxiter):
# update gradient and Hessian (use H' = H + sigma I)
h11 = h22 = sigma
h21 = g1 = g2 = 0.0
fApB = self.scores*self.A+self.B
pq= numpy.array([[1/(1.0+numpy.exp(fApB[i])),
numpy.exp(fApB[i])/(1.0+numpy.exp(fApB[i]))] if fApB[i]<0 \
else [numpy.exp(-fApB[i])/(1.0+numpy.exp(-fApB[i])),
1/(1.0+numpy.exp(-fApB[i]))] \
for i in range(len(fApB))])
d1 = targets - pq[:,0]
d2 = pq[:,0] * pq[:,1]
h11 = sum(self.scores**2*d2)
h21 = sum(self.scores*d2)
h22 = sum(d2)
g1 = sum(self.scores*d1)
g2 = sum(d1)
# stopping criteria: if gradient is really tiny, stop
if (abs(g1) < 1.0e-5) and (abs(g2) < 1.0e-5):
break
# finding Newton direction: -inv(H')*g
det = h11*h22-h21**2
dA = -(h22*g1-h21*g2)/det
dB = -(-h21*g1+h11*g2)/det
gd = g1*dA+g2*dB
# line search
stepsize = 1
while stepsize >= minstep:
newA = self.A+stepsize*dA
newB = self.B+stepsize*dB
newf = func_value(self.scores,targets,newA,newB)
# check sufficient decrease
if (newf < fval+0.0001*stepsize*gd):
self.A = newA
self.B = newB
fval = newf
break
else:
stepsize /= 2.0
if stepsize < minstep:
import logging
self._log("Line search fails. A= "+str(self.A)+" B= " \
+str(self.B)+" g1= "+str(g1)+" g2= "+str(g2) \
+" dA= "+str(dA)+" dB= "+str(dB)+" gd= "+str(gd),
level = logging.WARNING)
break
if it>=maxiter-1:
import logging
self._log("Reaching maximal iterations. g1= "+str(g1)+" g2= " \
+str(g2), level=logging.WARNING)
self._log("Finished training of sigmoid mapping in %d iterations." % it)
# Clean up of not needed variables
self.scores = []
self.labels = []
def _execute(self, x):
""" Evaluate each prediction with the sigmoid mapping learned."""
fApB = x.prediction * self.A + self.B
if fApB<0:
new_prediction=1/(1.0+numpy.exp(fApB))
else:
new_prediction=numpy.exp(-fApB)/(numpy.exp(-fApB)+1.0)
# enforce mapping to interval [0,1]
new_prediction = max(0,min(1,new_prediction))
new_label = self.class_labels[0] if new_prediction <= 0.5 \
else self.class_labels[1]
# Safe the new calculated probabilities
if self.store_probabilities:
self.probabilities.append( [new_prediction , new_label] )
return PredictionVector(label=new_label,
prediction=new_prediction,
predictor=x.predictor)
def _discretize(self, predictions, labels, bins=12):
""" Discretize predictions into bins.
Return bin scores and 2d list of discretized labels. """
while(True):
try:
cut = (abs(predictions[0])+ abs(predictions[-1]))/bins
current_bin=0
l_discrete={0:[]}
bin_scores = [predictions[0]+cut/2.0]
for p,l in zip(predictions,labels):
if p > predictions[0]+cut*(current_bin+2):
raise EmptyBinException("One bin without any examples!")
if p > predictions[0]+cut*(current_bin+1):
current_bin += 1
bin_scores.append(bin_scores[-1]+cut)
l_discrete[current_bin]=[l]
else:
l_discrete[current_bin].append(l)
if len(l_discrete)==bins+1:
l_discrete[bins-1].extend(l_discrete[bins])
del l_discrete[bins]
del bin_scores[-1]
bin_scores[-1]= bin_scores[-1]-cut/2.0 + \
(predictions[-1]-(bin_scores[-1]-cut/2.0))/2.0
except EmptyBinException:
if bins>1:
bins-=1
else:
raise Exception("Could not discretize data!")
else:
return bin_scores, l_discrete
def _empirical_probability(self, l_discrete):
""" Return dictionary of empirical class probabilities for discretized label list."""
plot_emp_prob = {}
len_list = {}
for label in range(len(self.class_labels)):
plot_emp_prob[label]=[]
len_list[label]=[]
for score_list in l_discrete.values():
len_list[label].append(len(score_list))
plot_emp_prob[label].append(score_list.count(label)/ \
float(len(score_list)))
return len_list, plot_emp_prob
def store_state(self, result_dir, index=None):
""" Stores plots of score distribution and sigmoid fit or/and
the calculated probabilities with the corresponding label.
.. todo:: change plot calculations to upper if else syntax
.. todo:: add the corresponding data point to the saved probabilities
"""
if self.store :
# Create the directory for the stored results
from pySPACE.tools.filesystem import create_directory
import os
node_dir = os.path.join(result_dir, self.__class__.__name__)
create_directory(node_dir)
# Safe the probabilities in a pickle file
if( self.store_probabilities ):
import pickle
f_name=node_dir + "/probabilities_%d.pickle" % self.current_split
pickle.dump(self.probabilities, open(f_name,'w'))
if self.store_plots:
# reliable plot of training (before sigmoid fit)
sort_index = numpy.argsort(self.scores)
labels = numpy.array(self.labels)[sort_index]
predictions = numpy.array(self.scores)[sort_index]
plot_scores_train,l_discrete_train=self._discretize(predictions, labels)
len_list_train, plot_emp_prob_train = self._empirical_probability(l_discrete_train)
# training data after sigmoid fit
fApB = predictions * self.A + self.B
new_predictions = [(int(fApB[i]<0)+int(fApB[i]>=0)*numpy.exp(-fApB[i]))/ \
(1.0+numpy.exp((-1)**int(fApB[i]>=0)*fApB[i])) \
for i in range(len(fApB))]
plot_scores_train_fit, l_discrete_train_fit = \
self._discretize(new_predictions,labels)
len_list_train_fit, plot_emp_prob_train_fit = \
self._empirical_probability(l_discrete_train_fit)
# test data before sigmoid fit
test_scores = []
test_labels = []
for data, label in self.input_node.request_data_for_testing():
test_scores.append(data.prediction)
test_labels.append(self.class_labels.index(label))
sort_index = numpy.argsort(test_scores)
labels = numpy.array(test_labels)[sort_index]
predictions = numpy.array(test_scores)[sort_index]
plot_scores_test,l_discrete_test = self._discretize(predictions, labels)
len_list_test, plot_emp_prob_test = self._empirical_probability(l_discrete_test)
# test data after sigmoid fit
fApB = predictions * self.A + self.B
new_predictions = [(int(fApB[i]<0)+int(fApB[i]>=0)*numpy.exp(-fApB[i]))/ \
(1.0+numpy.exp((-1)**int(fApB[i]>=0)*fApB[i])) \
for i in range(len(fApB))]
plot_scores_test_fit, l_discrete_test_fit = \
self._discretize(new_predictions,labels)
len_list_test_fit, plot_emp_prob_test_fit = \
self._empirical_probability(l_discrete_test_fit)
import pylab
from matplotlib.transforms import offset_copy
pylab.close()
fig = pylab.figure(figsize=(10,10))
ax = pylab.subplot(2,2,1)
transOffset=offset_copy(ax.transData,fig=fig,x=0.05,y=0.1,units='inches')
for x,y,s in zip(plot_scores_train,plot_emp_prob_train[1],len_list_train[1]):
pylab.plot((x,),(y,),'ro')
pylab.text(x,y,'%d' % s, transform=transOffset)
pylab.plot((plot_scores_train[0],plot_scores_train[-1]),(0,1),'-')
x = numpy.arange(plot_scores_train[0],plot_scores_train[-1],.02)
y = 1/(1+numpy.exp(self.A*x+self.B))
pylab.plot(x,y,'-')
pylab.xlim(plot_scores_train[0],plot_scores_train[-1])
pylab.ylim(0,1)
pylab.xlabel("SVM prediction Score (training data)")
pylab.ylabel("Empirical Probability")
ax = pylab.subplot(2,2,2)
transOffset=offset_copy(ax.transData,fig=fig,x=0.05,y=0.1,units='inches')
for x, y, s in zip(plot_scores_train_fit, plot_emp_prob_train_fit[1],
len_list_train_fit[1]):
pylab.plot((x,),(y,),'ro')
pylab.text(x,y,'%d' % s, transform=transOffset)
pylab.plot((plot_scores_train_fit[0],plot_scores_train_fit[-1]),(0,1),'-')
pylab.xlim(plot_scores_train_fit[0],plot_scores_train_fit[-1])
pylab.ylim(0,1)
pylab.xlabel("SVM Probability (training data)")
pylab.ylabel("Empirical Probability")
ax = pylab.subplot(2,2,3)
transOffset=offset_copy(ax.transData,fig=fig,x=0.05,y=0.1,units='inches')
for x,y,s in zip(plot_scores_test,plot_emp_prob_test[1],len_list_test[1]):
pylab.plot((x,),(y,),'ro')
pylab.text(x,y,'%d' % s, transform=transOffset)
pylab.plot((plot_scores_test[0],plot_scores_test[-1]),(0,1),'-')
x = numpy.arange(plot_scores_test[0],plot_scores_test[-1],.02)
y = 1/(1+numpy.exp(self.A*x+self.B))
pylab.plot(x,y,'-')
pylab.xlim(plot_scores_test[0],plot_scores_test[-1])
pylab.ylim(0,1)
pylab.xlabel("SVM prediction Scores (test data)")
pylab.ylabel("Empirical Probability")
ax = pylab.subplot(2,2,4)
transOffset=offset_copy(ax.transData,fig=fig,x=0.05,y=0.1,units='inches')
for x, y, s in zip(plot_scores_test_fit, plot_emp_prob_test_fit[1],
len_list_test_fit[1]):
pylab.plot((x,),(y,),'ro')
pylab.text(x,y,'%d' % s, transform=transOffset)
pylab.plot((plot_scores_test_fit[0],plot_scores_test_fit[-1]),(0,1),'-')
pylab.xlim(plot_scores_test_fit[0],plot_scores_test_fit[-1])
pylab.ylim(0,1)
pylab.xlabel("SVM Probability (test data)")
pylab.ylabel("Empirical Probability")
pylab.savefig(node_dir + "/reliable_diagrams_%d.png" % self.current_split)
class SigmoidTransformationNode(BaseNode):
""" Transform score to interval [0,1] with a sigmoid function
The new decision border will be at 0.5.
.. warning::
This is NOT a probability mapping and parameters should be set for
the function.
This node is intended to be externally optimized, such that it
generalizes the threshold optimization for soft metrics.
The used sigmoid fit function is :math:`\\frac{1}{1+e^{Ax+B}}`.
It is 0.5 at :math:`x = -\\frac{B}{A}`.
**Parameters**
:A:
Scaling of prediction value. See above.
(*optional, default: -1*)
:B:
Shifting of scaled prediction. See above.
(*optional, default: 0*)
:offset:
Has the meaning of :math:`-\\frac{B}{A}` and replaces the parameter B if used.
(*optional, default: None*)
:class_labels:
Determines the order of classes, i.e. the mapping of class labels
onto integers. The first element of the list should be the negative
class, the second should be the positive class.
In the context positive should be the class mapped greater than 0.5
and the other class should be the negative one.
If the original prediction value had the same orientation,
*A* should be chosen negative.
(*optional, default: ['Standard','Target']*)
**Exemplary Call**
.. code-block:: yaml
-
node : SigTrans
parameters :
class_labels : ['Standard','Target']
"""
input_types=["PredictionVector"]
def __init__(self, class_labels = ['Standard','Target'],
A = -1, B = 0, offset = None,
**kwargs):
super(SigmoidTransformationNode, self).__init__(**kwargs)
if not(offset is None):
B = -A *offset
self.set_permanent_attributes(class_labels = class_labels,
A = A,
B = B)
def is_trainable(self):
return False
def is_supervised(self):
return False
def _execute(self, data):
""" Evaluate each prediction with the sigmoid mapping learned. """
# code simply copied from PlattsSigmoidFitNode fur eventual future changes
fApB = data.prediction * self.A + self.B
if fApB<0:
new_prediction=1/(1.0+numpy.exp(fApB))
else:
new_prediction=numpy.exp(-fApB)/(numpy.exp(-fApB)+1.0)
# enforce mapping to interval [0,1]
new_prediction = max(0,min(1,new_prediction))
new_label = self.class_labels[0] if new_prediction <= 0.5 \
else self.class_labels[1]
return PredictionVector(label=new_label,
prediction=new_prediction,
predictor=data.predictor)
class LinearTransformationNode(BaseNode):
""" Scaling and offset shift, and relabeling due to new decision boundary
Having a prediction value x it is mapped to (x+*offset*)*scaling*.
If the result is lower than the *decision boundary* it is mapped to the
first class label for the negative class and otherwise to the second
positive class.
**Parameters**
:class labels: This mandatory parameter defines the ordering of class
labels for the mapping after the transformation.
If this parameter is not specified, the label remains
unchanged. This is for example feasible for regression
mappings.
.. note:: This parameter could be also used to change
class label strings, but this would probably
cause problems in the evaluation step.
(*recommended, default: None*)
:offset: Shift of the prediction value.
(*optional, default: 0*)
:scaling: Scaling factor applied after offset shift.
(*optional, default: 1*)
:decision_boundary: Everything lower this value is classified as
class one and everything else as class two. By default
no labels are changed.
**Exemplary Call**
.. code-block:: yaml
- node : LinearTransformation
parameters :
class_labels : ['Standard', 'Target']
offset : 1
scaling : 42
decision_boundary : 3
"""
def __init__(self, class_labels=None, offset=0, scaling=1,
decision_boundary=None, **kwargs):
super(LinearTransformationNode, self).__init__(**kwargs)
if class_labels is None or decision_boundary is None:
decision_boundary = None
class_labels = None
self.set_permanent_attributes(class_labels=class_labels,
scaling=scaling,
offset=offset,
decision_boundary=decision_boundary,
)
def _execute(self, x):
""" (x+o)*s < d """
p = x.prediction
prediction = (p+self.offset)*self.scaling
if self.decision_boundary is None:
label = x.label
elif self.decision_boundary < prediction:
label = self.class_labels[0]
else:
label = self.class_labels[1]
return PredictionVector(prediction=prediction, label=label,
predictor=x.predictor)
class LinearFitNode(BaseNode):
""" Linear mapping between score and [0,1]
This node maps the unbounded SVM score linear to bound it between [0,1].
If the result can be interpreted as probability can be seen in the
reliable diagrams.
**Parameters**
:class_labels:
Determines the order of classes, i.e. the mapping of class labels
onto integers. The first element of the list should be the negative
class, the second should be the positive class.
If this parameter is not specified, the order is determined based on
the order of occurrence in the training data (which is more or less
arbitrary).
(*optional, default: []*)
:store:
If True 'reliable diagrams' of the training and test data are stored.
A discretization of the scores is made to calculate empirical
probabilities. The number of scores per bin is displayed on every
data point in the figure and shows how accurate the estimate
is (the higher the number the better). If the fit is reliable the
empirical probabilities should scatter around the diagonal in the
right plots.
**Exemplary Call**
.. code-block:: yaml
-
node : LinearFit
parameters :
class_labels : ['Standard','Target']
"""
def __init__(self, class_labels = [], **kwargs):
super(LinearFitNode, self).__init__(**kwargs)
self.set_permanent_attributes(class_labels = class_labels,
scores = [],
labels = [])
def is_trainable(self):
return True
def is_supervised(self):
return True
def _train(self, data, class_label):
""" Collect SVM output and true labels. """
self._train_phase_started = True
self.scores.append(data.prediction)
if class_label not in self.class_labels:
self.class_labels.append(class_label)
self.labels.append(self.class_labels.index(class_label))
def _stop_training(self):
""" Compute max range of the score according to the class."""
positive_inst = [score for score,label in \
zip(self.scores,self.labels) if label==1]
negative_inst = [score for score,label in \
zip(self.scores,self.labels) if label==0]
self.max_range = (abs(min(negative_inst)),max(positive_inst))
def _execute(self, x):
""" Evaluate each prediction with the linear mapping learned."""
if x.prediction < -1.0*self.max_range[0]:
new_prediction = 0.0
elif x.prediction < self.max_range[1]:
new_prediction = (x.prediction + \
self.max_range[self.class_labels.index(x.label)]) / \
(2.0 * self.max_range[self.class_labels.index(x.label)])
else:
new_prediction = 1.0
return PredictionVector(label=x.label,
prediction=new_prediction,
predictor=x.predictor)
def _discretize(self, predictions, labels, bins=12):
""" Discretize predictions into bins. Return bin scores and 2d list of discretized labels. """
while(True):
try:
cut = (abs(predictions[0])+ abs(predictions[-1]))/bins
current_bin=0
l_discrete={0:[]}
bin_scores = [predictions[0]+cut/2.0]
for p,l in zip(predictions,labels):
if p > predictions[0]+cut*(current_bin+2):
raise EmptyBinException("One bin without any examples!")
if p > predictions[0]+cut*(current_bin+1):
current_bin += 1
bin_scores.append(bin_scores[-1]+cut)
l_discrete[current_bin]=[l]
else:
l_discrete[current_bin].append(l)
if len(l_discrete)==bins+1:
l_discrete[bins-1].extend(l_discrete[bins])
del l_discrete[bins]
del bin_scores[-1]
bin_scores[-1]= bin_scores[-1]-cut/2.0 + \
(predictions[-1]-(bin_scores[-1]-cut/2.0))/2.0
except EmptyBinException:
if bins>1:
bins-=1
else:
raise Exception("Could not discretize data!")
else:
return bin_scores, l_discrete
def _empirical_probability(self, l_discrete):
""" Return dictionary of empirical class probabilities for discretized label list."""
plot_emp_prob = {}
len_list = {}
for label in range(len(self.class_labels)):
plot_emp_prob[label]=[]
len_list[label]=[]
for score_list in l_discrete.values():
len_list[label].append(len(score_list))
plot_emp_prob[label].append(score_list.count(label)/ \
float(len(score_list)))
return len_list, plot_emp_prob
def store_state(self, result_dir, index=None):
""" Stores plots of score distribution and sigmoid fit. """
if self.store :
# reliable plot of training (before linear fit)
sort_index = numpy.argsort(self.scores)
labels = numpy.array(self.labels)[sort_index]
predictions = numpy.array(self.scores)[sort_index]
plot_scores_train,l_discrete_train=self._discretize(predictions, labels)
len_list_train, plot_emp_prob_train = self._empirical_probability(l_discrete_train)
# training data after linear fit
new_predictions = []
for score in predictions:
if score < 0.0:
new_predictions.append((score + self.max_range[0]) / \
(2.0 * self.max_range[0]))
else:
new_predictions.append((score + self.max_range[1]) / \
(2.0 * self.max_range[1]))
plot_scores_train_fit, l_discrete_train_fit = \
self._discretize(new_predictions,labels)
len_list_train_fit, plot_emp_prob_train_fit = \
self._empirical_probability(l_discrete_train_fit)
# test data before sigmoid fit
test_scores = []
test_labels = []
for data, label in self.input_node.request_data_for_testing():
test_scores.append(data.prediction)
test_labels.append(self.class_labels.index(label))
sort_index = | numpy.argsort(test_scores) | numpy.argsort |
import os
import glob
import torch
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch.utils.data as data
from xml.etree.ElementTree import parse
from matplotlib.patches import Rectangle
from dataset.trasform import transform
class VOC_Dataset(data.Dataset):
class_names = ('aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
def __init__(self, root="D:\Data\VOC_ROOT", split='TRAIN'):
super(VOC_Dataset, self).__init__()
root = os.path.join(root, split)
self.img_list = sorted(glob.glob(os.path.join(root, '*/JPEGImages/*.jpg')))
self.anno_list = sorted(glob.glob(os.path.join(root, '*/Annotations/*.xml')))
self.class_dict = {class_name: i for i, class_name in enumerate(self.class_names)}
self.class_dict_inv = {i : class_name for i, class_name in enumerate(self.class_names)}
self.split = split
self.img_size = 416
def __getitem__(self, idx):
visualize = False
# load img
image = Image.open(self.img_list[idx]).convert('RGB')
# load labels
boxes, labels, is_difficult = self.parse_voc(self.anno_list[idx])
# load img name for string
img_name = os.path.basename(self.anno_list[idx]).split('.')[0]
img_name_to_ascii = [ord(c) for c in img_name]
# load img width and height
img_width, img_height = float(image.size[0]), float(image.size[1])
# convert to tensor
boxes = torch.FloatTensor(boxes)
labels = torch.LongTensor(labels)
difficulties = torch.ByteTensor(is_difficult) # (n_objects)
img_name = torch.FloatTensor([img_name_to_ascii])
additional_info = torch.FloatTensor([img_width, img_height])
# data augmentation
image, boxes, labels, difficulties = transform(image, boxes, labels, difficulties, self.split)
# visualization
if visualize:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
# tensor to img
img_vis = np.array(image.permute(1, 2, 0), np.float32) # C, W, H
# img_vis += np.array([123, 117, 104], np.float32)
img_vis *= std
img_vis += mean
img_vis = np.clip(img_vis, 0, 1)
plt.figure('img')
plt.imshow(img_vis)
print('num objects : {}'.format(len(boxes)))
for i in range(len(boxes)):
print(boxes[i], labels[i])
plt.gca().add_patch(Rectangle((boxes[i][0] * self.img_size, boxes[i][1] * self.img_size),
boxes[i][2] * self.img_size - boxes[i][0] * self.img_size,
boxes[i][3] * self.img_size - boxes[i][1] * self.img_size,
linewidth=1, edgecolor='r', facecolor='none'))
plt.text(boxes[i][0] * self.img_size - 10, boxes[i][1] * self.img_size - 10,
str(self.class_dict_inv[labels[i].item()]),
bbox=dict(boxstyle='round4', color='grey'))
plt.show()
if self.split == "TEST":
return image, boxes, labels, difficulties, img_name, additional_info # for evaluations
return image, boxes, labels, difficulties
def __len__(self):
return len(self.img_list)
def set_image_size(self, img_size):
self.img_size = img_size
def parse_voc(self, xml_file_path):
tree = parse(xml_file_path)
root = tree.getroot()
boxes = []
labels = []
is_difficult = []
for obj in root.iter("object"):
# stop 'name' tag
name = obj.find('./name')
class_name = name.text.lower().strip()
labels.append(self.class_dict[class_name])
# stop to bbox tag
bbox = obj.find('./bndbox')
x_min = bbox.find('./xmin')
y_min = bbox.find('./ymin')
x_max = bbox.find('./xmax')
y_max = bbox.find('./ymax')
# from str to int
x_min = float(x_min.text) - 1
y_min = float(y_min.text) - 1
x_max = float(x_max.text) - 1
y_max = float(y_max.text) - 1
boxes.append([x_min, y_min, x_max, y_max])
# is_difficult
is_difficult_str = obj.find('difficult').text
is_difficult.append(int(is_difficult_str) if is_difficult_str else 0)
return (np.array(boxes, dtype=np.float32),
| np.array(labels, dtype=np.int64) | numpy.array |
import numpy as np
# get_bin_digits(6) -> array([0, 1, 1])
# get_bin_digits(6, 5) -> array([0, 1, 1, 0, 0])
def get_bin_digits(num, min_digits = None):
digits = | np.array([], dtype=int) | numpy.array |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = | N.array([2,2,2]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.