prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# TODO better way of storing data inside nodes.
from numpy import array,concatenate
from copy import deepcopy
from pydot import Dot,Edge,Node,Subgraph
from math import ceil
# Algorithm credits:
# http://www.cburch.com/cs/340/reading/btree/index.html
def main():
''' Do something! '''
test_remove()
### Testing and example suite ################################
def test_insert(size=4,ninsert=100):
''' Test insertions by adding a bunch of things and checking that they are there.
Args:
size (int): max size of the nodes.
ninsert (int): number of insertions to attempt.
'''
from random import randint,random,seed
from time import sleep
seed(10)
tree = BplusTree(size)
ans = {}
for i in range(ninsert):
key = randint(0,size*30)
val = round(random(),3)
ans[key] = val
print()
print("({}) About to insert {},{}".format(i,key,val))
#input("Press enter to continue")
tree.insert(key,val)
tree.make_pydot_graph()
shakedown(tree,ans)
def test_scan(size=3,ninsert=25):
''' Test insertions by adding a bunch of things and checking that they are there.
Args:
size (int): max size of the nodes.
ninsert (int): number of insertions to attempt.
'''
tree = BplusTree(size)
for key in range(ninsert):
val = str(chr(key+97))
tree.insert(key,val)
tree.make_pydot_graph()
print(tree.serialize())
def test_remove(size=4,ninsert=40):
''' Test insertions by adding a bunch of things and checking that they are there.
Args:
size (int): max size of the nodes.
ninsert (int): number of insertions to attempt.
'''
from random import randint,random,seed,shuffle
seed(1023)
tree = BplusTree(size)
ans = {}
for i in range(ninsert):
key = randint(0,size*30)
val = round(random(),3)
ans[key] = val
tree.insert(key,val)
shakedown(tree,ans)
tree.make_pydot_graph()
keys = list(ans.keys())
shuffle(keys)
for key in keys:
print("About to remove {}".format(key))
input("Press enter to continue.")
ans.pop(key)
tree.remove(key)
tree.make_pydot_graph()
shakedown(tree,ans)
def shakedown(tree,solutions):
for key in solutions:
print(key)
guess = tree.query(key)
assert guess == solutions[key],'{} yields {} instead of {}'.format(key,guess,solutions[key])
print("Shakedown!")
### Implementation suite #####################################
class BplusTree:
''' Contains all nodes and facilitates storing, querying, and deletion.'''
def __init__(self,maxsize):
'''
Args:
maxsize (int): largest node size for all nodes.
'''
self.maxsize = maxsize
self.root = LeafNode(maxsize)
def make_pydot_graph(self,figname='tree'):
''' Use Graphviz to generate a graphical representation of the tree.
Args:
figname (str): where to store the figure.
'''
graph = Dot(graph_type='graph')
#graph.add_node(Node(self.root.display(),shape='diamond'))
edgelist = self.root.append_edgelist()
for edge in edgelist:
graph.add_node(edge[0])
graph.add_node(edge[1])
graph.add_edge(Edge(*edge))
graph.write_png(figname+'.png')
def insert(self,key,value):
''' Insert new data into tree.
Args:
key (orderable): label for data. Must interact with < sign (i.e. be orderable).
value: Data to store.
'''
self.root.insert(key,value)
if self.root.parent is not None:
#print("Root update.")
self.root = self.root.parent
def query(self,key):
''' Recall data stored under key.
Args:
key (orderable): key where data was stored previously.
Returns:
data or None: the data that was stored, or None if data is missing.
'''
return self.root.query(key)
def serialize(self):
''' Return all the leaves data as an array.'''
focus = self.root
while isinstance(focus,InternalNode):
focus = focus.children[0]
leaves = [focus.data]
while focus.rsibling is not None:
leaves.append(focus.rsibling.data)
focus = focus.rsibling
return concatenate(leaves)
def remove(self,key):
''' Remove data stored under key from the tree. If key doesn't exist, does nothing.
Args:
key (orderable): key where data was stored previously.
'''
self.root.remove(key)
if isinstance(self.root,LeafNode) and self.root.keys.shape[0]==0:
raise AssertionError("Database is empty. I ain't coding for this edge (corner!) case, so fuck off...")
elif len(self.root.children) == 1:
print("Root update.")
self.root = self.root.children[0]
self.root.parent = None
class InternalNode:
''' Internal direction-finding class. Never exists at the bottom of the tree.'''
def __init__(self,maxsize,parent):
'''
Args:
maxsize (int): Maximum leaf size.
parent (InternalNode): node which directs queries to this node.
'''
self.parent = parent
self.maxsize = maxsize
if self.parent is None:
self.minsize = 1
else:
self.minsize = maxsize//2
self.keys = array([])
self.children = array([])
def display(self):
''' Return string of contents '''
return '[ ' + ' v '.join(self.keys.astype(int).astype(str)) + ' ]'
def append_edgelist(self):
''' Internal function for generating Graphviz tree.'''
edgelist = []
for child in self.children:
edgelist.append((
Node(self.display(),shape='invhouse',fontname='monospace'),
Node(child.display(),shape='rectangle',fontname='monospace')
))
edgelist += child.append_edgelist()
return edgelist
# There is such beauty in this world!
def birth(self,key,child):
''' Insert a key to this node.
Args:
key (orderable): smallest value larger than keys in child.
child (LeafNode or InternalNode): node to keep track of.
'''
place = (self.keys < key).sum()
# Case: normal insertion. TODO should insert after split for small speedup.
self.keys = concatenate((self.keys[:place],array([key]),self.keys[place:]))
self.children = concatenate((self.children[:place],array([child]),self.children[place:]))
# Case: split required.
if self.keys.shape[0] > self.maxsize:
median = self.keys.shape[0]//2
if self.parent is None:
#print("Root split.")
self.parent = InternalNode(self.maxsize,parent=None)
self.parent.children = array([self])
newleft = InternalNode(self.maxsize,parent=self.parent)
newkey = self.keys[median]
newleft.keys = self.keys[:median]
newleft.children = self.children[:median+1]
for child in newleft.children:
child.parent = newleft # How sad that they are seperated!
self.keys = self.keys[median+1:]
self.children = self.children[median+1:]
self.parent.birth(newkey,newleft)
def insert(self,key,value):
''' Insert a value the correct descendent node.
Args:
key (orderable): label for the data.
value: Data to be stored.
'''
place = (self.keys <= key).sum()
self.children[place].insert(key,value)
def query(self,key):
''' Direct query for key to the next step. '''
place = (self.keys <= key).sum()
#print(key)
#print(self.keys,self.children)
return self.children[place].query(key)
def remove(self,key):
''' Direct the appropriate descendent to remove a data entry and its key from the tree.
Args:
key (orderable): label for the data.
'''
place = (self.keys <= key).sum()
self.children[place].remove(key)
# What is create can will is destroy.
def filicide(self,place):
''' Remove a child.'''
kplace = max(0,place-1)
key = self.keys[kplace]
self.keys = concatenate((self.keys[:kplace],self.keys[kplace+1:]))
self.children = concatenate((self.children[:place],self.children[place+1:]))
# If size is below minimum and not parent.
if self.parent is not None and self.keys.shape[0] < self.minsize:
parplace = (self.parent.keys <= key).sum()
if parplace+1 < self.parent.children.shape and \
self.parent.children[parplace+1].keys.shape[0] > self.parent.children[parplace+1].minsize:
print("Internal rotating from right.")
self.rotate_right(parplace)
elif parplace != 0 and \
self.parent.children[parplace-1].keys.shape[0] > self.parent.children[parplace-1].minsize:
print("Internal rotating from left.")
self.rotate_left(parplace)
elif parplace+1 < self.parent.children.shape:
print("Internal merging right.")
self.merge_right(parplace)
elif parplace != self.parent.children.shape:
print("Internal merging left.")
self.merge_left(parplace)
else:
raise AssertionError("Must either have siblings or be root. Fuck off!")
def rotate_right(self,parplace):
''' Take a key from a right neighbor by rotating through parent.
Args:
parplace (int): index of self in parent.
'''
right = self.parent.children[parplace+1]
# Move parent key here.
self.keys = concatenate((self.keys,
|
array([self.parent.keys[parplace]])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 00:01:50 2020
@author: <NAME>
@version: 5.1
"""
import sys
import numpy as np
from sklearn import preprocessing # solo con vecchio scikit
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
train=sys.argv[1]
test=sys.argv[2]
num1=int(sys.argv[3])
out=open(sys.argv[4],'a')
val=sys.argv[5]
out2=sys.argv[6]
out3=sys.argv[7]
out4=sys.argv[8]
out5=sys.argv[9]
#train set preparation
X1 = np.loadtxt(train,usecols=range(num1) , delimiter=',')
Y1= np.loadtxt(train, usecols=[num1],dtype='str',delimiter=',')
nclass=len(np.unique(Y1))
le = preprocessing.LabelEncoder()
le.fit(Y1)
Y1=le.transform(Y1)
enc=OneHotEncoder(handle_unknown='ignore')
Y1=enc.fit_transform(Y1.reshape(-1,1)).toarray()
#test set preparation
Tx1 = np.loadtxt(test,usecols=range(num1) , delimiter=',')
Ty1= np.loadtxt(test, usecols=[num1],dtype='str',delimiter=',')
le.fit(Ty1)
Ty1=le.transform(Ty1)
Ty1=enc.fit_transform(Ty1.reshape(-1,1)).toarray()
#model creation
early_stopping_monitor = EarlyStopping(patience=5)
n_feat=X1.shape[1]
model9=Sequential()
model9.add(Dense(800,activation='relu',input_shape=(n_feat,)))
model9.add(Dense(200,activation='relu'))
model9.add(Dense(nclass))
model9.compile(optimizer='sgd', loss='mean_squared_error',metrics=['accuracy'])
model9.fit(X1, Y1, validation_split=0, epochs=300, callbacks=[early_stopping_monitor])
_,accuracy_pred9 = model9.evaluate(Tx1, Ty1)
test_prediction=model9.predict(Tx1)
model9.save(out2)
print(accuracy_pred9*100 , file = out)
#validation set preparation and testing
val=np.loadtxt(val,usecols=range(num1) , delimiter=',')
val_prediction=model9.predict(val)
train_prediction=model9.predict(X1)
#results export
np.savetxt(out5, train_prediction,delimiter=",",fmt='%s')
np.savetxt(out3,test_prediction, delimiter=",",fmt='%s')
|
np.savetxt(out4, val_prediction, delimiter=",",fmt='%s')
|
numpy.savetxt
|
#!/usr/local/sci/python
# python3
from __future__ import absolute_import
from __future__ import print_function
#************************************************************************
#
# Plot figures and output numbers for Phenology (PHEN) section.
# For BAMS SotC 2016
#
#************************************************************************
# SVN Info
# $Rev:: 21 $: Revision of last commit
# $Author:: rdunn $: Author of last commit
# $Date:: 2017-12-22 11:57:17 +0000 (Fri, 22 Dec #$: Date of last commit
#************************************************************************
# START
#************************************************************************
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.path as mpath
from matplotlib.ticker import MultipleLocator
import matplotlib.image as mpimg
import iris
import cartopy
import utils # RJHD utilities
import settings
data_loc = "{}/{}/data/PHEN/".format(settings.ROOTLOC, settings.YEAR)
reanalysis_loc = "{}/{}/data/RNL/".format(settings.ROOTLOC, settings.YEAR)
image_loc = "{}/{}/images/".format(settings.ROOTLOC, settings.YEAR)
LEGEND_LOC = 'lower left'
#************************************************************************
def read_modis_ts(filename):
'''
Read the timeseries data, and returning Timeseries objects.
:param str filename: file to read
:returns: Timeseries object s
'''
raw_data = np.genfromtxt(filename, dtype=(float), skip_header=1, delimiter=",")
years = raw_data[:, 0]
# 2018 entries
sos_nh = utils.Timeseries("SOS", years, raw_data[:, 1])
sos_na = utils.Timeseries("SOS", years, raw_data[:, 2])
sos_ea = utils.Timeseries("SOS", years, raw_data[:, 3])
eos_nh = utils.Timeseries("EOS", years, raw_data[:, 5])
eos_na = utils.Timeseries("EOS", years, raw_data[:, 6])
eos_ea = utils.Timeseries("EOS", years, raw_data[:, 7])
sprt_nh = utils.Timeseries("Spring T", years, raw_data[:, 9])
sprt_na = utils.Timeseries("Spring T", years, raw_data[:, 10])
sprt_ea = utils.Timeseries("Spring T", years, raw_data[:, 11])
falt_nh = utils.Timeseries("Autumn T", years, raw_data[:, 13])
falt_na = utils.Timeseries("Autumn T", years, raw_data[:, 14])
falt_ea = utils.Timeseries("Autumn T", years, raw_data[:, 15])
return sos_na, sos_ea, sprt_na, sprt_ea # read_modis_ts
#************************************************************************
def read_us_phenocam(filename):
raw_data = np.genfromtxt(filename, dtype=(str), skip_header=1)
lat = raw_data[:, 1].astype(float)
lon = raw_data[:, 2].astype(float)
return lat, lon # read_us_phenocam
#************************************************************************
def plot_modis_ts(axl, sos, sprt, dummy, label, anomalies, legend_loc):
utils.plot_ts_panel(axl, [sos, dummy], "-", "phenological", loc=legend_loc)
# make twin
axr = axl.twinx()
utils.plot_ts_panel(axr, [sprt], "-", "phenological", loc="")
# prettify
axl.set_ylim([-10, 10])
axr.set_ylim([3, -3])
# labels
axl.text(0.02, 0.83, label, transform=axl.transAxes, fontsize=settings.FONTSIZE*0.8)
axl.text(0.47, 0.88, anomalies[0], transform=axl.transAxes)
axl.text(0.47, 0.78, anomalies[1], transform=axl.transAxes)
# ticks etc
minorLocator = MultipleLocator(1)
for ax in [axl]:
utils.thicken_panel_border(ax)
ax.set_yticks(ax.get_yticks()[1:-1])
ax.xaxis.set_minor_locator(minorLocator)
for ax in [axr]:
ax.yaxis.tick_right()
utils.thicken_panel_border(ax)
ax.set_yticks(ax.get_yticks()[1:-1])
ax.xaxis.set_minor_locator(minorLocator)
axl.set_xlim([1999, 2020])
return # plot_modis_ts
#************************************************************************
def read_uk_oak_csv(filename):
'''
Read the timeseries data, and returning Timeseries objects.
:param str filename: file to read
:returns: Timeseries object s
'''
raw_data = np.genfromtxt(filename, dtype=(str), skip_header=2, delimiter=",")
indata = raw_data[:, 1].astype(float)
indata = np.ma.masked_where(indata == -99, indata)
oak = utils.Timeseries("<NAME>", raw_data[:, 0].astype(int), indata)
return oak # read_uk_oak_csv
#************************************************************************
def read_windermere_csv(filename):
'''
Read the timeseries data, and returning Timeseries objects.
:param str filename: file to read
:returns: Timeseries objects
'''
raw_data = np.genfromtxt(filename, dtype=(int), skip_header=1, delimiter=",")
times = raw_data[:, 0]
north = utils.Timeseries("North Basin", times, raw_data[:, 1])
south = utils.Timeseries("South Basin", times, raw_data[:, 2])
return north, south # read_windermere_csv
#************************************************************************
def read_us_phenocam_csv(filename):
raw_data = np.genfromtxt(filename, dtype=(str), skip_header=1, delimiter=",")
# remove "" or NA
locs = np.where(raw_data == "")
raw_data[locs] = "-99.9"
locs =
|
np.where(raw_data == "NA")
|
numpy.where
|
# The Hazard Library
# Copyright (C) 2012-2018 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import math
import numpy
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.geo.polygon import Polygon
from openquake.hazardlib.geo.mesh import Mesh, RectangularMesh
from openquake.hazardlib.geo import utils as geo_utils
from openquake.hazardlib.tests import assert_angles_equal
from openquake.hazardlib.tests.geo import _mesh_test_data
aac = numpy.testing.assert_allclose
class _BaseMeshTestCase(unittest.TestCase):
def _make_mesh(self, lons, lats, depths=None):
mesh = Mesh(lons, lats, depths)
self.assertIs(mesh.lons, lons)
self.assertIs(mesh.lats, lats)
self.assertIs(mesh.depths, depths)
return mesh
class MeshCreationTestCase(_BaseMeshTestCase):
def test_1d(self):
mesh = self._make_mesh(numpy.array([1, 2, 3, 5]),
numpy.array([-1, -2, 4, 0]))
self.assertEqual(len(mesh), 4)
mesh = self._make_mesh(numpy.array([1, 2]), numpy.array([0, 0]),
numpy.array([10, 10]))
self.assertEqual(len(mesh), 2)
def test_2d(self):
mesh = self._make_mesh(numpy.array([[1, 2], [3, 5]]),
numpy.array([[-1, -2], [4, 0]]))
self.assertEqual(len(mesh), 4)
mesh = self._make_mesh(numpy.array([[1, 2], [5, 6]]),
numpy.array([[0, 0], [10, 10]]),
numpy.array([[10, 10], [30, 30]]))
self.assertEqual(len(mesh), 4)
def test_one_point(self):
co = numpy.array([0])
mesh = self._make_mesh(co, co, co)
self.assertEqual(len(mesh), 1)
def test_wrong_arguments(self):
self.assertRaises(AttributeError, self._make_mesh, [1, 2], [2, 3])
self.assertRaises(AssertionError, self._make_mesh,
numpy.array([1, 2]), numpy.array([2, 3, 4]))
self.assertRaises(AssertionError, self._make_mesh,
numpy.array([1, 2]), numpy.array([2, 3]),
numpy.array([0]))
self.assertRaises(AssertionError, self._make_mesh,
numpy.array([[1], [2]]), numpy.array([[2], [3]]),
numpy.array([0, 1]))
def test_from_points_list_no_depth(self):
points = [Point(0, 1), Point(2, 3), Point(5, 7)]
mesh = Mesh.from_points_list(points)
self.assertTrue((mesh.lons == [0, 2, 5]).all())
self.assertTrue((mesh.lats == [1, 3, 7]).all())
self.assertEqual(mesh.lons.dtype, numpy.float)
self.assertEqual(mesh.lats.dtype, numpy.float)
self.assertIs(mesh.depths, None)
def test_from_points_list_with_depth(self):
points = [Point(0, 1, 2), Point(2, 3, 4), Point(5, 7, 10)]
mesh = Mesh.from_points_list(points)
self.assertTrue((mesh.depths == [2, 4, 10]).all())
self.assertEqual(mesh.depths.dtype, numpy.float)
class MeshIterTestCase(_BaseMeshTestCase):
def test_1d(self):
mesh = self._make_mesh(numpy.array([1, 2, 3, 5]),
numpy.array([-1, -2, 4, 0]))
self.assertEqual(list(mesh), [Point(1, -1), Point(2, -2),
Point(3, 4), Point(5, 0)])
mesh = self._make_mesh(numpy.array([0.1, 0.2, 0.3]),
numpy.array([0.9, 0.8, 0.7]),
numpy.array([0.4, 0.5, 0.6]))
self.assertEqual(list(mesh),
[Point(0.1, 0.9, 0.4), Point(0.2, 0.8, 0.5),
Point(0.3, 0.7, 0.6)])
def test_2d(self):
lons = numpy.array([[1.1, 2.2], [2.2, 3.3]])
lats = numpy.array([[-7, -8], [-9, -10]])
points = list(self._make_mesh(lons, lats))
self.assertEqual(points, [Point(1.1, -7), Point(2.2, -8),
Point(2.2, -9), Point(3.3, -10)])
depths = numpy.array([[11, 12], [13, 14]])
points = list(self._make_mesh(lons, lats, depths))
self.assertEqual(points, [Point(1.1, -7, 11), Point(2.2, -8, 12),
Point(2.2, -9, 13), Point(3.3, -10, 14)])
class MeshSlicingTestCase(_BaseMeshTestCase):
def test_1d(self):
lons = numpy.array([1, 2, 3, 4, 5, 6])
lats = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
mesh = self._make_mesh(lons, lats)
submesh = mesh[1:4]
self.assertIsNot(submesh, mesh)
self.assertEqual(len(submesh), 3)
self.assertTrue((submesh.lons == [2, 3, 4]).all())
self.assertTrue((submesh.lats == [0.2, 0.3, 0.4]).all())
self.assertIs(submesh.depths, None)
depths = numpy.array([7.1, 7.2, 7.3, 7.4, 7.5, 7.6])
mesh = self._make_mesh(lons, lats, depths)
submesh = mesh[-4:]
self.assertEqual(len(submesh), 4)
self.assertTrue((submesh.lons == [3, 4, 5, 6]).all())
self.assertTrue((submesh.lats == [0.3, 0.4, 0.5, 0.6]).all())
self.assertTrue((submesh.depths == [7.3, 7.4, 7.5, 7.6]).all())
with self.assertRaises(AssertionError):
submesh = mesh[0:0]
def test_2d(self):
lons = lats = numpy.arange(100).reshape((10, 10))
mesh = self._make_mesh(lons, lats)
submesh = mesh[:3, 5:7]
self.assertEqual(submesh.lons.shape, (3, 2))
self.assertEqual(submesh.lats.shape, (3, 2))
self.assertTrue((submesh.lons == [[5, 6], [15, 16], [25, 26]]).all())
self.assertTrue((submesh.lats == submesh.lons).all())
depths = lons + 3.1
mesh = self._make_mesh(lons, lats, depths)
submesh = mesh[2:4, 2:6]
self.assertEqual(submesh.lons.shape, (2, 4))
self.assertEqual(submesh.lats.shape, (2, 4))
self.assertTrue((submesh.lats == submesh.lons).all())
self.assertTrue((submesh.depths == [[25.1, 26.1, 27.1, 28.1],
[35.1, 36.1, 37.1, 38.1]]).all())
def test_wrong_indexing(self):
coords = numpy.arange(16)
mesh = self._make_mesh(coords, coords, coords)
with self.assertRaises(ValueError):
mesh[1]
coords = coords.reshape((4, 4))
mesh = self._make_mesh(coords, coords, coords)
with self.assertRaises(ValueError):
mesh[1]
with self.assertRaises(IndexError):
mesh[1:, 5]
def test_preserving_the_type(self):
lons = lats = numpy.arange(100).reshape((10, 10))
mesh = RectangularMesh(lons, lats, depths=None)
submesh = mesh[1:2, 3:4]
self.assertIsInstance(submesh, RectangularMesh)
class MeshEqualityTestCase(_BaseMeshTestCase):
def test_meshes_equal(self):
"""
Tests if two meshes are equal
"""
mesh1 = self._make_mesh(lons=numpy.array([1., 2., 3., 4.]),
lats=numpy.array([5., 6., 7., 8.]),
depths=numpy.array([0.5, 0.5, 0.5, 0.5]))
mesh2 = self._make_mesh(mesh1.lons, mesh1.lats, mesh1.depths)
self.assertTrue(mesh1 == mesh2)
def test_meshes_unequal(self):
"""
Checks unequal meshes
"""
# Test 1 - depths present but values different
mesh1 = self._make_mesh(lons=numpy.array([1., 2., 3., 4.]),
lats=numpy.array([5., 6., 7., 8.]),
depths=numpy.array([0.5, 0.5, 0.5, 0.5]))
mesh2 = self._make_mesh(lons=numpy.array([1., 2., 3., 4.]),
lats=numpy.array([5.01, 6., 7., 8.3]),
depths=numpy.array([0.5, 0.5, 0.5, 0.5]))
self.assertFalse(mesh1 == mesh2)
# Test 2 - depths present in the first case, missing in the second
mesh3 = self._make_mesh(lons=numpy.array([1., 2., 3., 4.]),
lats=numpy.array([5., 6., 7., 8.]),
depths=None)
self.assertFalse(mesh1 == mesh3)
# Depths missing in first case, present in the second
self.assertFalse(mesh3 == mesh2)
class MeshGetMinDistanceTestCase(unittest.TestCase):
# test case depends on Point.distance() working right
def _test(self, mesh, target_mesh, expected_distance_indices):
mesh_points = list(mesh)
target_points = list(target_mesh)
dists = mesh.get_min_distance(target_mesh)
expected_dists = [mesh_points[mi].distance(target_points[ti])
for ti, mi in enumerate(expected_distance_indices)]
aac(dists.flat, expected_dists, atol=1)
closest_points_mesh = mesh.get_closest_points(target_mesh)
numpy.testing.assert_equal(closest_points_mesh.lons.flat,
mesh.lons.take(expected_distance_indices))
numpy.testing.assert_equal(closest_points_mesh.lats.flat,
mesh.lats.take(expected_distance_indices))
if mesh.depths is None:
self.assertIsNone(closest_points_mesh.depths)
else:
numpy.testing.assert_equal(
closest_points_mesh.depths.flat,
mesh.depths.take(expected_distance_indices)
)
self.assertEqual(closest_points_mesh.lats.shape, target_mesh.shape)
def test_mesh_and_point_on_surface(self):
self._test(Mesh.from_points_list([Point(0, 0), Point(0, 1),
Point(0, 2)]),
Mesh.from_points_list([Point(1, 1), Point(-1, 0)]),
expected_distance_indices=[1, 0])
def test_mesh_on_surface(self):
self._test(Mesh.from_points_list([Point(0, 0), Point(0, 1),
Point(0, 2)]),
Mesh.from_points_list([Point(-1, -1, 3.4), Point(2, 5)]),
expected_distance_indices=[0, 2])
def test_point_on_surface(self):
self._test(Mesh.from_points_list([Point(0, 0, 1), Point(0, 1, 2),
Point(0, 2, 3)]),
Mesh.from_points_list([Point(0.5, 1.5)]),
expected_distance_indices=[1])
def test_mesh_and_point_not_on_surface(self):
self._test(Mesh.from_points_list([Point(0, 0, 1), Point(0, 1, 2),
Point(0, 2, 3)]),
Mesh.from_points_list([Point(0, 1.5, 3),
Point(0, 1.5, 0.9)]),
expected_distance_indices=[2, 1])
def test_2d_mesh(self):
mesh = Mesh(numpy.array([[0., 1.], [2., 3.]]),
numpy.array([[0., 0.], [0., 0.]]), None)
target_mesh = Mesh(
numpy.array([[3., 4., 5.], [-6., -7., 8.], [9., 10., 11.]]),
numpy.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]), None
)
self._test(mesh, target_mesh,
expected_distance_indices=[3, 3, 3, 0, 0, 3, 3, 3, 3])
class MeshGetDistanceMatrixTestCase(unittest.TestCase):
def test_zeroes(self):
mesh = Mesh(numpy.zeros(1000), numpy.zeros(1000), None)
matrix = mesh.get_distance_matrix()
self.assertIsInstance(matrix, numpy.matrix)
self.assertEqual(matrix.shape, (1000, 1000))
self.assertTrue((matrix == 0).all())
def test(self):
mesh = Mesh(numpy.array([0., 1., 2., 3.]), numpy.zeros(4), None)
matrix = mesh.get_distance_matrix()
aaae = numpy.testing.assert_array_almost_equal
aaae(matrix[0], [[0, 111.2, 222.4, 333.6]], decimal=1)
aaae(matrix[1], [[111.2, 0, 111.2, 222.4]], decimal=1)
aaae(matrix[2], [[222.4, 111.2, 0, 111.2]], decimal=1)
aaae(matrix[3], [[333.6, 222.4, 111.2, 0]], decimal=1)
for i in range(4):
for j in range(i, 4):
self.assertEqual(matrix[i, j], matrix[j, i])
class MeshConvexHullTestCase(unittest.TestCase):
def test_two_points(self):
mesh = Mesh(numpy.array([-10., -11.]), numpy.array([-12., -13.]), None)
polygon = mesh.get_convex_hull()
self.assertIsInstance(polygon, Polygon)
elons = [-10.99996704, -11.0000323, -11.00003296, -10.00003295,
-9.99996795, -9.99996705]
elats = [-13.00003147, -13.00003212, -12.99996853, -11.99996865,
-11.99996776, -12.00003135]
numpy.testing.assert_allclose(polygon.lons, elons)
numpy.testing.assert_allclose(polygon.lats, elats)
def test_many_points(self):
lons = numpy.array([0.7, 0.6, 0.4, 0.6, 0.3, 0.9, 0.5, 0.4])
lats = numpy.array([0.8, 0.5, 0.2, 0.7, 0.2, 0.4, 0.9, 0.4])
mesh = Mesh(lons, lats, None)
polygon = mesh.get_convex_hull()
elons = [0.4, 0.3, 0.5, 0.7, 0.9]
elats = [0.2, 0.2, 0.9, 0.8, 0.4]
numpy.testing.assert_allclose(polygon.lons, elons)
numpy.testing.assert_allclose(polygon.lats, elats)
def test_one_point(self):
mesh = Mesh.from_points_list([Point(7, 7)])
polygon = mesh.get_convex_hull()
elons = [7.0000453, 7., 6.9999547, 7]
elats = [7., 6.99995503, 7., 7.00004497]
numpy.testing.assert_allclose(polygon.lons, elons)
numpy.testing.assert_allclose(polygon.lats, elats)
class RectangularMeshCreationTestCase(unittest.TestCase):
def test_wrong_shape(self):
with self.assertRaises(AssertionError):
RectangularMesh(numpy.array([0, 1, 2]),
numpy.array([0, 0, 0]), None)
RectangularMesh(numpy.array([0, -1]), numpy.array([2, 10]),
numpy.array([5, 44]))
def test_from_points_list(self):
lons = [[0, 1], [2, 3], [4, 5]]
lats = [[1, 2], [-1, -2], [10, 20]]
depths = [[11.1, 11.2], [11.3, 11.4], [11.5, 11.6]]
points = [
[Point(lons[i][j], lats[i][j], depths[i][j])
for j in range(len(lons[i]))]
for i in range(len(lons))
]
mesh = RectangularMesh.from_points_list(points)
self.assertTrue((mesh.lons == lons).all())
self.assertTrue((mesh.lats == lats).all())
self.assertTrue((mesh.depths == depths).all())
points = [
[Point(lons[i][j], lats[i][j], depth=0)
for j in range(len(lons[i]))]
for i in range(len(lons))
]
mesh = RectangularMesh.from_points_list(points)
self.assertTrue((mesh.lons == lons).all())
self.assertTrue((mesh.lats == lats).all())
self.assertIsNone(mesh.depths)
class MeshJoynerBooreDistanceTestCase(unittest.TestCase):
def test_simple(self):
lons = numpy.array([numpy.arange(-1, 1.2, 0.2)] * 11)
lats = lons.transpose() + 1
depths = lats + 10
mesh = RectangularMesh(lons, lats, depths)
check = lambda lon, lat, depth, expected_distance, **kwargs: \
self.assertAlmostEqual(
mesh.get_joyner_boore_distance(
Mesh.from_points_list([Point(lon, lat, depth)])
)[0],
expected_distance, **kwargs
)
check(lon=0, lat=0.5, depth=0, expected_distance=0)
check(lon=1, lat=1, depth=0, expected_distance=0)
check(lon=0.6, lat=-1, depth=0,
expected_distance=Point(0.6, -1).distance(Point(0.6, 0)),
delta=0.1)
check(lon=-0.8, lat=2.1, depth=10,
expected_distance=Point(-0.8, 2.1).distance(Point(-0.8, 2)),
delta=0.02)
check(lon=0.75, lat=2.3, depth=3,
expected_distance=Point(0.75, 2.3).distance(Point(0.75, 2)),
delta=0.04)
def test_vertical_mesh(self):
lons = numpy.array([[0, 0.5, 1, 2], [0, 0.5, 1, 2]], float)
lats = numpy.array([[0, 0, 0, 0], [0, 0, 0, 0]], float)
depths = numpy.array([[1, 1, 1, 1], [2, 2, 2, 2]], float)
mesh = RectangularMesh(lons, lats, depths)
target_mesh = Mesh.from_points_list([Point(0.5, 0), Point(0.5, 1),
Point(0.5, 5)])
dists = mesh.get_joyner_boore_distance(target_mesh)
expected_dists = [
0, Point(0.5, 1).distance(Point(0.5, 0)),
Point(0.5, 5).distance(Point(0.5, 0))]
aac(dists, expected_dists, atol=1)
def test_mesh_of_two_points(self):
lons = numpy.array([[0, 0.5, 1]], float)
lats = numpy.array([[0, 0, 0]], float)
depths = numpy.array([[1, 0, 1]], float)
mesh = RectangularMesh(lons, lats, depths)
target_mesh = Mesh.from_points_list([Point(0.5, 1), Point(0.5, 0)])
dists = mesh.get_joyner_boore_distance(target_mesh)
expected_dists = [Point(0.5, 1).distance(Point(0.5, 0)), 0]
aac(dists, expected_dists, atol=.01)
def test_mesh_of_one_point(self):
lons = numpy.array([[1.]])
lats = numpy.array([[0.]])
depths = numpy.array([[1.]])
mesh = RectangularMesh(lons, lats, depths)
target_mesh = Mesh.from_points_list([Point(1, 0), Point(0.5, 0)])
dists = mesh.get_joyner_boore_distance(target_mesh)
expected_dists = [0, Point(0.5, 0).distance(Point(1, 0))]
aac(dists, expected_dists, atol=0.2)
def _test(self, points, site, expected_distance):
lons, lats, depths = numpy.array(points).transpose()
lons = lons.transpose()
lats = lats.transpose()
depths = depths.transpose()
mesh = RectangularMesh(lons, lats, depths)
distance = mesh.get_joyner_boore_distance(
Mesh.from_points_list([Point(*site)])
)[0]
self.assertAlmostEqual(distance, expected_distance, delta=0.02)
def test3(self):
self._test(_mesh_test_data.TEST3_MESH, _mesh_test_data.TEST3_SITE,
_mesh_test_data.TEST3_JB_DISTANCE)
def test4(self):
self._test(_mesh_test_data.TEST4_MESH, _mesh_test_data.TEST4_SITE,
_mesh_test_data.TEST4_JB_DISTANCE)
def test5(self):
self._test(_mesh_test_data.TEST5_MESH, _mesh_test_data.TEST5_SITE,
_mesh_test_data.TEST5_JB_DISTANCE)
def test_version(self):
# this test is sensitive to different versions of shapely/libgeos
lons = numpy.array(
[[-121.3956, -121.41050474, -121.42542273, -121.44035399,
-121.45529855, -121.47025643],
[-121.3956, -121.41050474, -121.42542273, -121.44035399,
-121.45529855, -121.47025643]])
lats = numpy.array(
[[36.8257, 36.85963772, 36.89357357, 36.92750756,
36.96143968, 36.99536993],
[36.8257, 36.85963772, 36.89357357, 36.92750756,
36.96143968, 36.99536993]])
mesh = RectangularMesh(lons, lats)
dist = mesh.get_joyner_boore_distance(
Mesh.from_points_list([Point(-121.76, 37.23)]))
dist_ubuntu_12_04 = 36.61260128
dist_ubuntu_14_04 = 36.61389245
self.assertTrue(numpy.allclose(dist, dist_ubuntu_12_04) or
numpy.allclose(dist, dist_ubuntu_14_04))
class RectangularMeshGetMiddlePointTestCase(unittest.TestCase):
def test_odd_rows_odd_columns_no_depths(self):
lons = numpy.array([numpy.arange(-1, 1.2, 0.2)] * 11)
lats = lons.transpose() * 10
mesh = RectangularMesh(lons, lats, depths=None)
self.assertEqual(mesh.get_middle_point(), Point(0, 0, 0))
def test_odd_rows_odd_columns_with_depths(self):
lons = numpy.array([numpy.arange(-1, 1.2, 0.2)] * 11)
lats = lons.transpose() * 10
depths = lats + 10
mesh = RectangularMesh(lons, lats, depths)
self.assertEqual(mesh.get_middle_point(), Point(0, 0, 10))
def test_odd_rows_odd_columns_with_topo(self):
lons = numpy.array([numpy.arange(-1, 1.2, 0.2)] * 11)
lats = lons.transpose()
depths = lats - 1
mesh = RectangularMesh(lons, lats, depths)
self.assertEqual(mesh.get_middle_point(), Point(0, 0, -1))
def test_odd_rows_even_columns_no_depths(self):
lons = numpy.array([[10, 20, 30, 40]])
lats = numpy.array([[30] * 4])
mesh = RectangularMesh(lons, lats, depths=None)
self.assertEqual(mesh.get_middle_point(), Point(25, 30.094679))
def test_odd_rows_even_columns_with_depths(self):
lons = numpy.array([[0, 20, 30, 90]])
lats = numpy.array([[30] * 4])
depths = numpy.array([[2, 7, 8, 10]])
mesh = RectangularMesh(lons, lats, depths=depths)
self.assertEqual(mesh.get_middle_point(), Point(25, 30.094679, 7.5))
def test_odd_rows_even_columns_with_topo(self):
lons = numpy.array([[0, 20, 30, 90]])
lats = numpy.array([[30] * 4])
depths = numpy.array([[2, 1, -3, -5]])
mesh = RectangularMesh(lons, lats, depths=depths)
self.assertEqual(mesh.get_middle_point(), Point(25, 30.094679, -1))
def test_even_rows_odd_columns_no_depths(self):
lons = numpy.array([[-1, 0, 1, 2, 3], [-1.5, 0.5, 1.5, 2.5, 3.5]])
lats = numpy.array([[-0.01] * 5, [-0.015] * 5])
mesh = RectangularMesh(lons, lats, depths=None)
self.assertEqual(mesh.get_middle_point(), Point(1.25, -0.0125, 0))
def test_even_rows_odd_columns_with_depth(self):
lons = numpy.array([[20], [21]])
lats = numpy.array([[-1], [1]])
depths = numpy.array([[11.1], [11.3]])
mesh = RectangularMesh(lons, lats, depths=depths)
self.assertEqual(mesh.get_middle_point(), Point(20.5, 0, 11.2))
def test_even_rows_odd_columns_with_topo(self):
lons = numpy.array([[20], [21]])
lats = numpy.array([[-1], [1]])
depths = numpy.array([[-1.1], [-1.3]])
mesh = RectangularMesh(lons, lats, depths=depths)
self.assertEqual(mesh.get_middle_point(), Point(20.5, 0, -1.2))
def test_even_rows_even_columns_no_depths(self):
lons = numpy.array([[10, 20], [10.002, 20.002]])
lats = numpy.array([[10, -10], [8, -8]])
mesh = RectangularMesh(lons, lats, depths=None)
self.assertEqual(mesh.get_middle_point(), Point(15.001, 0))
def test_even_rows_even_columns_with_depths(self):
lons = numpy.array([[10, 20], [12, 22]])
lats = numpy.array([[10, -10], [8, -9]])
depths = numpy.array([[2, 3], [4, 5]])
mesh = RectangularMesh(lons, lats, depths=depths)
self.assertEqual(mesh.get_middle_point(),
Point(15.996712, -0.250993, 3.5))
def test_even_rows_even_columns_with_topo(self):
lons = numpy.array([[10, 20], [12, 22]])
lats = numpy.array([[10, -10], [8, -9]])
depths = numpy.array([[-2, -3], [-4, -5]])
mesh = RectangularMesh(lons, lats, depths=depths)
self.assertEqual(mesh.get_middle_point(),
Point(15.996712, -0.250993, -3.5))
class RectangularMeshGetMeanInclinationAndAzimuthTestCase(unittest.TestCase):
def test_on_surface(self):
row1 = [Point(0, 0), Point(0, 1)]
row2 = [Point(1, 0), Point(1, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertEqual(dip, 0)
self.assertAlmostEqual(strike, 0, delta=0.5)
row1 = [Point(0, 0), Point(0, -1)]
row2 = [Point(1, 0), Point(1, -1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertEqual(dip, 0)
self.assertAlmostEqual(strike, 180, delta=0.5)
row1 = [Point(0, 0), Point(1, 1)]
row2 = [Point(1, 0), Point(2, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertEqual(dip, 0)
self.assertAlmostEqual(strike, 45, delta=0.01)
row1 = [Point(0, 0), Point(1, -1)]
row2 = [Point(1, 0), Point(2, -1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertEqual(dip, 0)
self.assertAlmostEqual(strike, 135, delta=0.01)
row1 = [Point(0, 0), Point(-1, -1)]
row2 = [Point(-1, 0), Point(-2, -1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertEqual(dip, 0)
self.assertAlmostEqual(strike, 225, delta=0.01)
row1 = [Point(0, 0), Point(-1, 1)]
row2 = [Point(-1, 0), Point(-2, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertEqual(dip, 0)
self.assertAlmostEqual(strike, 315, delta=0.01)
def test_one_cell(self):
top = [Point(0, -0.01), Point(0, 0.01)]
bottom = [Point(0.01, -0.01, 1.11), Point(0.01, 0.01, 1.11)]
mesh = RectangularMesh.from_points_list([top, bottom])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, 45, delta=0.05)
self.assertAlmostEqual(strike, 0, delta=0.05)
row1 = [Point(45, -0.1), Point(45.2, 0.1)]
row2 = [Point(45, -0.1, 1), Point(45.2, 0.1, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, 90)
self.assertAlmostEqual(strike, 45, delta=0.1)
row1 = [Point(90, -0.1), Point(90, 0.1)]
row2 = [Point(90, -0.1, 1), Point(90, 0.1, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, 90)
self.assertAlmostEqual(strike, 0, delta=0.1)
def test_one_cell_topo(self):
top = [Point(0, -0.01, -3.00), Point(0, 0.01, -3.00)]
bottom = [Point(0.01, -0.01, -1.89), Point(0.01, 0.01, -1.89)]
mesh = RectangularMesh.from_points_list([top, bottom])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, 45, delta=0.1)
self.assertAlmostEqual(strike, 0, delta=0.05)
row1 = [Point(45, -0.1, -3.00), Point(45.2, 0.1, -3.00)]
row2 = [Point(45, -0.1, -2.00), Point(45.2, 0.1, -2.00)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, 90)
self.assertAlmostEqual(strike, 225, delta=0.1)
row1 = [Point(90, -0.1, -3.00), Point(90, 0.1, -3.00)]
row2 = [Point(90, -0.1, -2.00), Point(90, 0.1, -2.00)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, 90)
self.assertAlmostEqual(strike, 0, delta=0.1)
def test_two_cells(self):
top = [Point(0, -0.01), Point(0, 0.01)]
middle = [Point(0.01, -0.01, 1.11), Point(0.01, 0.01, 1.11)]
bottom = [Point(0.01, -0.01, 2.22), Point(0.01, 0.01, 2.22)]
mesh = RectangularMesh.from_points_list([top, middle, bottom])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, math.degrees(math.atan2(2, 1)), delta=0.1)
self.assertAlmostEqual(strike, 0, delta=0.02)
bottom = [Point(0.01, -0.01, 3.33), Point(0.01, 0.01, 3.33)]
mesh = RectangularMesh.from_points_list([top, middle, bottom])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, math.degrees(math.atan2(3, 1)), delta=0.1)
self.assertAlmostEqual(strike, 0, delta=0.02)
row1 = [Point(90, -0.1), Point(90, 0), Point(90, 0.1)]
row2 = [Point(90, -0.1, 1), Point(90, 0, 1), Point(90, 0.1, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, 90)
assert_angles_equal(self, strike, 360, delta=1e-7)
row1 = [Point(-90.1, -0.1), Point(-90, 0), Point(-89.9, 0.1)]
row2 = [Point(-90.0, -0.1, 1), Point(-89.9, 0, 1),
Point(-89.8, 0.1, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(strike, 45, delta=1e-4)
row1 = [Point(-90.1, -0.1), Point(-90, 0), Point(-89.9, 0.1)]
row2 = [Point(-90.0, -0.1, 1), Point(-89.9, 0, 1),
Point(-89.8, 0.1, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(strike, 45, delta=1e-3)
row1 = [Point(-90.1, -0.1), Point(-90, 0), Point(-89.9, 0.1)]
row2 = [Point(-90.2, -0.1, 1), Point(-90.1, 0, 1), Point(-90, 0.1, 1)]
mesh = RectangularMesh.from_points_list([row1, row2])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(strike, 225, delta=1e-3)
def test_one_cell_unequal_area(self):
# top-left triangle is vertical, has dip of 90 degrees, zero
# strike and area of 1 by 1 over 2. bottom-right one has dip
# of atan2(1, sqrt(2) / 2.0) which is 54.73561 degrees, strike
# of 45 degrees and area that is 1.73246136 times area of the
# first one's. weighted mean dip is 67.5 degrees and weighted
# mean strike is 28.84 degrees
top = [Point(0, -0.01), Point(0, 0.01)]
bottom = [Point(0, -0.01, 2.22), Point(0.02, 0.01, 2.22)]
mesh = RectangularMesh.from_points_list([top, bottom])
dip, strike = mesh.get_mean_inclination_and_azimuth()
self.assertAlmostEqual(dip, 67.5, delta=0.05)
self.assertAlmostEqual(strike, 28.84, delta=0.05)
def test_dip_over_90_degree(self):
top = [Point(0, -0.01), Point(0, 0.01)]
bottom = [Point(-0.01, -0.01, 1.11), Point(-0.01, 0.01, 1.11)]
mesh = RectangularMesh.from_points_list([top, bottom])
dip, strike = mesh.get_mean_inclination_and_azimuth()
# dip must be still in a range 0..90
self.assertAlmostEqual(dip, 45, delta=0.05)
# strike must be reversed
self.assertAlmostEqual(strike, 180, delta=0.05)
class RectangularMeshGetCellDimensionsTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.original_spherical_to_cartesian = geo_utils.spherical_to_cartesian
geo_utils.spherical_to_cartesian = lambda lons, lats, depths: (
self.points)
def tearDown(self):
geo_utils.spherical_to_cartesian = self.original_spherical_to_cartesian
def _test(self, points, centroids, lengths, widths, areas):
fake_coords = numpy.array([[0]])
self.points = numpy.array(points, dtype=float)
mesh = RectangularMesh(fake_coords, fake_coords, fake_coords)
cell_center, cell_length, cell_width, cell_area \
= mesh.get_cell_dimensions()
self.assertTrue(numpy.allclose(cell_length, lengths),
'%s != %s' % (cell_length, lengths))
self.assertTrue(numpy.allclose(cell_width, widths),
'%s != %s' % (cell_width, widths))
self.assertTrue(numpy.allclose(cell_area, areas),
'%s != %s' % (cell_area, areas))
self.assertTrue(numpy.allclose(cell_center, centroids),
'%s != %s' % (cell_center, centroids))
def test_one_cell(self):
self._test(
points=[[(1., 1., 1.), (2., 1., 1.)],
[(1., 1., -2.), (2., 1., -2.)]],
centroids=[(1.5, 1, -0.5)],
lengths=[1],
widths=[3],
areas=[3]
)
def test_unequal_triangle_areas(self):
self._test(
points=[[(10, 0, 0), (11, 0, 0)],
[(10, -1, 0), (11, -2, 0)]],
centroids=[(((10 + 1/3.) * 0.5 + (10 + 2/3.) * 1) / 1.5,
((-1/3.) * 0.5 + (-1) * 1) / 1.5,
0)],
lengths=[(1 * 0.5 + math.sqrt(2) * 1) / (0.5 + 1)],
widths=[(1 * 0.5 + 2 * 1) / (0.5 + 1)],
areas=[0.5 + 1]
)
def test_unequal_triangle_areas_topo(self):
self._test(
points=[[(10, 0, 0), (11, 0, 0)],
[(10, 0, -1), (11, 0, -2)]],
centroids=[(((10 + 1/3.) * 0.5 + (10 + 2/3.) * 1) / 1.5, 0,
((-1/3.) * 0.5 + (-1) * 1) / 1.5)],
lengths=[(1 * 0.5 + math.sqrt(2) * 1) / (0.5 + 1)],
widths=[(1 * 0.5 + 2 * 1) / (0.5 + 1)],
areas=[0.5 + 1]
)
def test_two_unequal_cells(self):
self._test(
points=[[(0, 0, 0), (0, 0, 1), (0, 0, 3)],
[(0, 1, 0), (0, 1, 1), (0, 1, 3)]],
centroids=[(0, 0.5, 0.5), (0, 0.5, 2)],
lengths=[1, 2],
widths=[1, 1],
areas=[1, 2]
)
def test_two_unequal_cells_topo(self):
self._test(
points=[[(0, 0, -1), (0, 0, 0), (0, 0, 2)],
[(0, 1, -1), (0, 1, 0), (0, 1, 2)]],
centroids=[(0, 0.5, -0.5), (0, 0.5, 1)],
lengths=[1, 2],
widths=[1, 1],
areas=[1, 2]
)
class RectangularMeshTriangulateTestCase(unittest.TestCase):
def test_simple(self):
lons = numpy.array([[0, 0.0089946277931563321],
[0, 0.0089974527390248322]])
lats = numpy.array([[0, 0], [0, 0]], dtype=float)
depths = numpy.array([[1, 0.99992150706475513],
[3, 2.9999214824129012]])
mesh = RectangularMesh(lons, lats, depths)
points, along_azimuth, updip, diag = mesh.triangulate()
self.assertTrue(numpy.allclose(points, [
[(6370, 0, 0), (6370, 1, 0)],
[(6368, 0, 0), (6368, 1, 0)]
]))
self.assertTrue(numpy.allclose(along_azimuth, [
[(0, 1, 0)], [(0, 1, 0)]
]))
self.assertTrue(numpy.allclose(updip, [
[(2, 0, 0)], [(2, 0, 0)],
]))
self.assertTrue(numpy.allclose(diag, [
[(2, 1, 0)]
]))
class RectangularMeshGetProjectionEnclosingPolygonTestCase(unittest.TestCase):
def _test(self, lons, lats, depths, expected_coords):
mesh = RectangularMesh(lons, lats, depths)
proj, polygon = mesh._get_proj_enclosing_polygon()
self.assertTrue(polygon.is_valid)
self.assertEqual(list(polygon.interiors), [])
coords = numpy.array(proj(*
|
numpy.array(polygon.exterior)
|
numpy.array
|
# Python modules
# 3rd party modules
import numpy as np
from scipy.stats import distributions
from scipy.optimize import minimize
# Our modules
import vespa.analysis.constants as constants
import vespa.analysis.algos.lowess as lowess
import vespa.analysis.algos.splines as splines
import vespa.analysis.util_initial_values as util_initial_values
import vespa.common.constants as common_constants
import vespa.common.util.ppm as util_ppm
import vespa.common.minf_parabolic_info as minf
import vespa.common.util.generic_spectral as util_spectral
from vespa.analysis.constants import FitLineshapeModel
from vespa.analysis.constants import FitMacromoleculeMethod
from vespa.analysis.algos.constrained_levenberg_marquardt import constrained_levenberg_marquardt
def initial_values(chain):
set = chain._block.set
nmet = chain.nmet
dim0 = chain._dataset.spectral_dims[0]
dat = chain.data
util_initial_values.find_initial_values(chain)
# Calculate parameter initial values
a = np.hstack([chain.init_area,
chain.init_freq,
chain.init_tb,
chain.init_ph0,
chain.init_ph1])
# chain.init_area = np.array([1.08257415e+01, 2.59075328e+01, 5.13168453e+01])
# chain.init_freq = np.array([1.28908112e+04, 1.29652744e+04, 1.33736172e+04])
# chain.init_ta = 9.14242352e-02
# chain.init_tb = 9.14242352e-02
# chain.init_ph0 = -2.82743339e+00
# chain.init_ph1 = -0.00000000e+00
#
# a = [ 1.08257415e+01, 2.59075328e+01, 5.13168453e+01, 1.28908112e+04,
# 1.29652744e+04, 1.33736172e+04, 9.14242352e-02, 9.14242352e-02,
# -2.82743339e+00, -0.00000000e+00]
# a = np.array(a)
# Update parameter constraints
#
# Areas constraints
areamax = chain.init_area * (1.0 + set.optimize_limits_range_area/100.0)
areamin = chain.init_area * 1e-8 # no zeros, else derivatives blow up in optimization
# PPM constraints
fredel = set.optimize_limits_range_ppm * 2.0 * np.pi
fremin = chain.init_freq - fredel
fremax = chain.init_freq + fredel
# Tb constraints
lwBmin = [set.optimize_limits_min_linewidth,] * nmet
lwBmax = [set.optimize_limits_max_linewidth,] * nmet
# Phase0 constraints
ph0min = chain.init_ph0 - (set.optimize_limits_range_phase0 * np.pi / 180.0)
ph0max = chain.init_ph0 + (set.optimize_limits_range_phase0 * np.pi / 180.0)
# Phase1 constraints
ph1min = chain.init_ph1 - set.optimize_limits_range_phase1
ph1max = chain.init_ph1 + set.optimize_limits_range_phase1
# Actual constraints
bot = np.hstack([areamin, fremin, lwBmin, ph0min, ph1min])
top = np.hstack([areamax, fremax, lwBmax, ph0max, ph1max])
if chain._block.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
mmol_area = set.macromol_single_basis_dataset_start_area
mmol_fre = chain.init_b0 # in Hz
mmol_area_max = mmol_area * 10.0
mmol_area_min = mmol_area * 0.01
mmol_fre_max = mmol_fre + 50.0 # in Hz
mmol_fre_min = mmol_fre - 50.0
a = np.hstack([a, mmol_area, mmol_fre])
bot = np.hstack([bot, mmol_area_min, mmol_fre_min])
top =
|
np.hstack([top, mmol_area_max, mmol_fre_max])
|
numpy.hstack
|
# -*- coding: utf-8 -*-
"""
Copyright 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import numpy as np
class MarkovNetwork(object):
"""A Markov Network for neural computing."""
max_markov_gate_inputs = 4
max_markov_gate_outputs = 4
def __init__(self, num_input_states, num_memory_states, num_output_states, seed_num_markov_gates=4, probabilistic=True, genome=None):
"""Sets up a Markov Network
Parameters
----------
num_input_states: int
The number of input states in the Markov Network
num_memory_states: int
The number of internal memory states in the Markov Network
num_output_states: int
The number of output states in the Markov Network
seed_num_markov_gates: int (default: 4)
The number of Markov Gates with which to seed the Markov Network
It is important to ensure that randomly-generated Markov Networks have at least a few Markov Gates to begin with
May sometimes result in fewer Markov Gates if the Markov Gates are randomly seeded in the same location
probabilistic: bool (default: True)
Flag indicating whether the Markov Gates are probabilistic or deterministic
genome: array-like (default=None)
An array representation of the Markov Network to construct
All values in the array must be integers in the range [0, 255]
If None, then a random Markov Network will be generated
Returns
-------
None
"""
self.num_input_states = num_input_states
self.num_memory_states = num_memory_states
self.num_output_states = num_output_states
self.states =
|
np.zeros(num_input_states + num_memory_states + num_output_states, dtype=np.bool)
|
numpy.zeros
|
# try to import libsoundfile for audio import, alternatively try loading audio via scipy or wave
try:
import soundfile
soundfile_found = True
except ImportError:
from scipy.io.wavfile import read
import wave
soundfile_found = False
import numpy as np
import os
__author__ = '<NAME>'
class Tools:
""" Class provides several tools for audio analysis
"""
def __init__(self):
pass
@staticmethod
def set_missing_values(options,
**default_frame_wise_values):
""" Add default frame_wise_values & keys to dictionary if frame_wise_values are not set
Args:
options (dict): Arbitrary dictionary (e.g. containing processing options)
default_frame_wise_values (dict): Keyword list with default frame_wise_values to be set if corresponding
keys are not set in options dict
Returns:
options (dict): Arbitrary dictionary with added default frame_wise_values if required
"""
for param in default_frame_wise_values.keys():
if param not in options:
options[param] = default_frame_wise_values[param]
return options
@staticmethod
def load_wav(fn_wav,
mono=False):
""" Function loads samples from WAV file. Both implementations (wave / scipy package) fail for some WAV files
hence we combine them.
Args:
fn_wav (string): WAV file name
mono (bool): Switch if samples shall be converted to mono
Returns:
samples (np array): Audio samples (between [-1,1]
> if stereo: (2D ndarray with DIM numSamples x numChannels),
> if mono: (1D ndarray with DIM numSamples)
sample_rate (float): Sampling frequency [Hz]
"""
if soundfile_found:
samples, sample_rate = soundfile.read(fn_wav)
else:
try:
samples, sample_rate = Tools._load_wav_file_via_scipy(fn_wav)
except:
try:
samples, sample_rate = Tools._load_wav_file_via_wave(fn_wav)
except:
raise Exception("WAV file could neither be opened using Scipy nor Wave!")
# mono conversion
if mono:
if samples.ndim == 2:
if samples.shape[1] > 1:
samples = np.mean(samples, axis=1)
else:
samples = np.squeeze(samples)
# scaling
if np.max(np.abs(samples)) > 1:
samples = samples.astype(float) / 32768.0
return samples, sample_rate
@staticmethod
def _load_wav_file_via_wave(fn_wav):
""" Load samples & sample rate from WAV file """
fp = wave.open(fn_wav)
num_channels = fp.getnchannels()
num_frames = fp.getnframes()
frame_string = fp.readframes(num_frames*num_channels)
data = np.fromstring(frame_string, np.int16)
samples = np.reshape(data, (-1, num_channels))
sample_rate = float(fp.getframerate())
return samples, sample_rate
@staticmethod
def _load_wav_file_via_scipy(fn_wav):
""" Load samples & sample rate from WAV file """
inputData = read(fn_wav)
samples = inputData[1]
sample_rate = inputData[0]
return samples, sample_rate
@staticmethod
def aggregate_framewise_function_over_notes(frame_wise_values,
time_sec,
onset,
duration):
""" Aggregate a frame-wise function (e.g. loudness) over note durations to obtain note-wise features
:param frame_wise_values: (ndarray) Frame-wise values
:param time_sec: (ndarray) Time frame frame_wise_values in seconds
:param onset: (ndarray) Note onset times in seconds
:param duration: (ndarray) Note durations in seconds
:return: result: (dict of ndarrays) Note-wise aggregation results with keys
'max': Maximum over note duration
'median': Median over note duration
'std': Standard deviation over note duration
'temp_centroid': Temporal centroid over note duration [0,1]
'rel_peak_pos': Position of global maximum over note duration relative to note duration [0,1]
"""
dt = time_sec[1]-time_sec[0]
num_notes = len(onset)
onset_frame = (onset/dt).astype(int)
offset_frame = ((onset+duration)/dt).astype(int)
# initialize
result = dict()
result['max'] = np.zeros(num_notes)
result['median'] = np.zeros(num_notes)
result['std'] = np.zeros(num_notes)
result['temp_centroid'] =
|
np.zeros(num_notes)
|
numpy.zeros
|
'''
@lanhuage: python
@Descripttion:
@version: beta
@Author: xiaoshuyui
@Date: 2020-06-17 17:07:23
@LastEditors: xiaoshuyui
@LastEditTime: 2020-07-16 16:05:51
'''
from keras.models import load_model
import numpy as np
import random
with open('D:\\testALg\\Diy-musics\\novel\\static\\nietzsche.txt', 'r') as f:
text = f.read()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
char_num = len(chars)
print('total chars:', char_num)
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40 # 需要和kears_lstm_train.py里边一样
# build the model: a single LSTM
print('load model...')
# model = load_model('shakes_model.h5')
model = load_model('D:\\testALg\\Diy-musics\\novel\\nz_model.h5')
def sample(preds, temperature=1.0):
"""
helper function to sample an index from a probability array
:param preds: 模型正向传播计算得到的向量a,维度(char_num, 1)
:param temperature: 多样性控制,值越大,随机性越强
:return:
"""
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
# 下面个两行做了softmax
exp_preds =
|
np.exp(preds)
|
numpy.exp
|
import numpy as np
import astropy.constants as ac
import astropy.units as au
from ..util.spline import GlobalSpline2D
# Original C version implemented in Athena-TIGRESS
# See also Gong, Ostriker, & Wolfire (2017) and https://github.com/munan/tigress_cooling
def get_xe_mol(nH, xH2, xe, T=20.0, xi_cr=1e-16, Zg=1.0, Zd=1.0):
phi_s = (1.0 - xe/1.2)*0.67/(1.0 + xe/0.05)
k1619 = 5.0e-8*(T*1e-2)**(-0.48)
k1621 = 1e-9
k1620 = 1e-14*Zd
k1622 = 1e-14*Zd
xS = 5.3e-6*Zg # From Draine's Table 9.5 (Diffuse H2)
A = k1619*(1.0 + k1621/k1622*xS)
B = k1620 + k1621*xS
return 2.0*xH2*((B**2 + 4.0*A*xi_cr*(1.0 + phi_s)/nH)**0.5 - B)/(2.0*k1619)
def get_xCII(nH, xe, xH2, T, Z_d, Z_g, xi_CR, G_PE, G_CI, xCstd=1.6e-4, gr_rec=True):
xCtot = xCstd*Z_g
small_ = 1e-50
k_C_cr = 3.85*xi_CR
k_C_photo = 3.5e-10*G_CI
lnT = np.log(T)
k_Cplus_e = np.where(T < 10.0,
9.982641225129824e-11,
np.exp(-0.7529152*lnT - 21.293937))
if gr_rec:
psi_gr = 1.7*G_PE*np.sqrt(T)/(nH*xe + small_) + small_
cCp_ = np.array([45.58, 6.089e-3, 1.128, 4.331e2, 4.845e-2,0.8120, 1.333e-4])
k_Cplus_gr = 1.0e-14*cCp_[0]/(1.0 + cCp_[1]*np.power(psi_gr, cCp_[2]) *
(1.0 + cCp_[3] * np.power(T, cCp_[4])
* np.power( psi_gr, -cCp_[5]-cCp_[6]*lnT ))) * Z_d
else:
k_Cplus_gr = 0.0
k_Cplus_H2 = 3.3e-13 * np.power(T, -1.3) * np.exp(-23./T)
c = (k_C_cr + k_C_photo) / nH
al = k_Cplus_e*xe + k_Cplus_gr + k_Cplus_H2*xH2 + c
ar = xCtot * c
return ar / al
def get_xCO(nH, xH2, xCII, Z_d, Z_g, xi_CR, chi_CO, xCstd=1.6e-4):
xCtot = xCstd*Z_g
kcr16 = xi_CR*1e16
term1 = np.maximum(4e3*Z_d/kcr16**2,1.0)
ncrit = np.power(term1, chi_CO**(1.0/3.0))*(50*kcr16/np.power(Z_d,1.4))
#xCO = np.where(nH > ncrit2, 1.0, nH/ncrit2)
xCO = nH**2/(nH**2 + ncrit**2)
xCO = xCO*(2.0*xH2)
xCO = xCO*(xCtot - xCII)
# xCO = np.minimum(xCO, 2.0*xH2*xCtot)
# xCO = np.minimum(xCO, xCtot - xCII)
# xCO = np.minimum(xCO, 2.0*xH2)
#xCO = np.minimum(xCO, xCtot - xCII)
return xCO,ncrit
def get_charge_param(nH, T, xe, chi_PE, phi=1.0):
# Charging parameter
# (WD01 does not recommend using their eqaution for x < 100)
# return np.maximum(1.7*chi_PE*np.sqrt(T)/(xe*nH*phi), 100.0)
return 1.7*chi_PE*np.sqrt(T)/(xe*nH*phi) + 50.0
def heatPE(nH, T, xe, Z_d, chi_PE):
# Weingartner & Draine (2001) Table 2
# Rv = 3.1, bC=4.0, distribution A, ISRF
CPE_ = np.array([5.22, 2.25, 0.04996, 0.00430, 0.147, 0.431, 0.692])
x = get_charge_param(nH, T, xe, chi_PE)
eps = (CPE_[0] + CPE_[1]*np.power(T, CPE_[4]))/ \
(1. + CPE_[2]*np.power(x, CPE_[5])*(1. + CPE_[3]*np.power(x, CPE_[6])))
return 1.7e-26*chi_PE*Z_d*eps
def heatPE_BT94(nH, T, xe, Z_d, chi_PE):
x = get_charge_param(nH, T, xe, chi_PE)
eps_BT94 = 4.87e-2/(1.0 + 4e-3*x**0.73) + 3.65e-2*(T*1e-4)**0.7/(1.0 + 2e-4*x)
return 1.7e-24*chi_PE*Z_d*eps_BT94
def heatPE_W03(nH, T, xe, Z_d, chi_PE, phi=0.5):
x = get_charge_param(nH, T, xe, chi_PE, phi=phi)
eps_BT94 = 4.87e-2/(1.0 + 4e-3*x**0.73) + 3.65e-2*(T*1e-4)**0.7/(1.0 + 2e-4*x)
# Multiply by 1.3 (due to increased PAH abundance)
return 1.3*1.7e-24*chi_PE*Z_d*eps_BT94
def heatCR(nH, xe, xHI, xH2, xi_CR):
# Heating rate per ionization in atomic region
# See Eq.30.1 in Draine (2011)
eV_cgs = (1.0*au.eV).cgs.value
xHetot = 0.1
# JKIM: Isn't the last term 1.5*xHetot?
ktot = xi_CR*((2.3*xH2 + 1.5*xHI)*(xHI + 2.0*xH2) + 1.1*xHetot)
qHI = (6.5 + 26.4*np.sqrt(xe / (xe + 0.07)))*eV_cgs
# Heating rate per ionization in molecular region
# See Appendix B in Krumholz 2014 (Despotic)
log_nH = np.log10(nH)
qH2 = np.zeros_like(nH)
qH2 = np.where(log_nH < 2.0, 10.0*eV_cgs, 0.0) + \
np.where(np.logical_and(log_nH >= 2.0, log_nH < 4.0),
(10 + 3*(log_nH - 2.0)*0.5)*eV_cgs, 0.0) + \
np.where(np.logical_and(log_nH >= 4.0, log_nH < 7.0),
(13 + 4*(log_nH - 4.0)/3)*eV_cgs, 0.0) + \
np.where(np.logical_and(log_nH >= 7.0, log_nH < 10.0),
(17 + (log_nH - 7.0)/3)*eV_cgs, 0.0) + \
np.where(log_nH >= 10.0, 18.0*eV_cgs, 0.0)
return ktot*(xHI*qHI + 2.0*xH2*qH2)
def heatH2form(nH, T, xHI, xH2, Z_d):
# Hollenbach & McKee (1978)
eV_cgs = (1.0*au.eV).cgs.value
de = 1.6*xHI*np.exp(-(400.0/T)**2) + 1.4*xH2*np.exp(-12000.0/(1200.0 + T))
ncrit = 1e6/np.sqrt(T)/de
f = nH/(nH + ncrit)
return 3.0e-17*Z_d*nH*xHI*(0.2 + 4.2*f)*eV_cgs
def heatH2pump(nH, T, xHI, xH2, xi_diss_H2):
# Hollenbach & McKee (1978)
eV_cgs = (1.0*au.eV).cgs.value
de = 1.6*xHI*np.exp(-(400.0/T)**2) + 1.4*xH2*np.exp(-12000.0/(1200.0 + T))
ncrit = 1e6/np.sqrt(T)/de
f = nH/(nH + ncrit)
return 9.0*2.2*xi_diss_H2*xH2*f*eV_cgs
def heatH2diss(xH2, xi_diss_H2):
eV_cgs = (1.0*au.eV).cgs.value
return 0.4*xi_diss_H2*xH2*eV_cgs
def heatH2pump_Burton90(nH, T, xHI, xH2, xi_diss_H2):
# Burton, Hollenbach, & Tielens (1990)
kpump = 6.94*xi_diss_H2
Cdex = 1e-12*(1.4*np.exp(-18100.0/(T + 1200.0))*xH2 + \
np.exp(-1000.0/T)*xHI)*np.sqrt(T)*nH
Crad = 2e-7
Epump = 2.0*1.602e-12*Cdex/(Cdex + Crad)
return kpump*Epump*xH2
def q10CII_(nH, T, xe, xHI, xH2):
"""Compute collisional de-excitation rate [s^-1]
"""
# Ortho-to-para ratio of H2
fp_ = 0.25
fo_ = 0.75
# Eqs (17.16) and (17.17) in Draine (2011)
T2 = T*1e-2;
k10e = 4.53e-8*np.sqrt(1.0e4/T)
# Omega10e = (1.55+1.25*T*1e-4)/(1 + 0.35*(T*1e-4)**1.25)
# k10e = 8.629e-8/np.sqrt(T*1e-4)*Omega10e
k10HI = 7.58e-10*np.power(T2, 0.1281+0.0087*np.log(T2))
k10oH2 = np.zeros_like(T)
k10pH2 = np.zeros_like(T)
# For T< 500K, use fit in Wiesenfeld & Goldsmith (2014)
# For high temperature, use Glover+Jappsen 2007; scales similar to HI
tmp = np.power(T, 0.07)
k10oH2 = np.where(T < 500.0,
(5.33 + 0.11*T2)*1.0e-10,
3.74757785025e-10*tmp)
k10pH2 = np.where(T < 500.0,
(4.43 + 0.33*T2)*1.0e-10,
3.88997286356e-10*tmp)
k10H2 = k10oH2*fo_ + k10pH2*fp_
return nH*(k10e*xe + k10HI*xHI + k10H2*xH2)
def coolCII(nH, T, xe, xHI, xH2, xCII):
g0CII_ = 2.
g1CII_ = 4.
A10CII_ = 2.3e-6
E10CII_ = 1.26e-14
kB_cgs = ac.k_B.cgs.value
q10 = q10CII_(nH, T, xe, xHI, xH2)
q01 = (g1CII_/g0CII_)*q10*np.exp(-E10CII_/(kB_cgs*T))
return q01/(q01 + q10 + A10CII_)*A10CII_*E10CII_*xCII
def coolHIion(nH, T, xe, xHI):
eV_cgs = (1.0*au.eV).cgs.value
return 13.6*eV_cgs*coeff_kcoll_H(T)*nH*xe*xHI
def coolCI(nH, T, xe, xHI, xH2, xCI):
kB_cgs = ac.k_B.cgs.value
fp_ = 0.25
fo_ = 0.75
# CI, 3 level system
g0CI_ = 1
g1CI_ = 3
g2CI_ = 5
A10CI_ = 7.880e-08
A20CI_ = 1.810e-14
A21CI_ = 2.650e-07
E10CI_ = 3.261e-15
E20CI_ = 8.624e-15
E21CI_ = 5.363e-15
# e-collisional coefficents (Johnson, Burke, & Kingston 1987; JPhysB, 20, 2553)
T2 = T*1e-2
lnT2 = np.log(T2)
lnT = np.log(T)
# ke(u,l) = fac*gamma(u,l)/g(u)
fac = 8.629e-8*np.sqrt(1.0e4/T)
# Collisional strength (valid for T < 10^4 K)
lngamma10e = np.zeros_like(T)
lngamma20e = np.zeros_like(T)
lngamma21e = np.zeros_like(T)
lngamma10e = np.where(T < 1.0e3,
(((-6.56325e-4*lnT -1.50892e-2)*lnT + 3.61184e-1)*\
lnT -7.73782e-1)*lnT - 9.25141,
(((1.0508e-1*lnT - 3.47620)*lnT + 4.2595e1)*\
lnT- 2.27913e2)*lnT + 4.446e2)
lngamma20e = np.where(T < 1.0e3,
(((0.705277e-2*lnT - 0.111338)*lnT + 0.697638)*
lnT - 1.30743)*lnT -7.69735,
(((9.38138e-2*lnT - 3.03283)*lnT +3.61803e1)*\
lnT - 1.87474e2)*lnT +3.50609e2)
lngamma21e = np.where(T < 1.0e3,
(((2.35272e-3*lnT - 4.18166e-2)*lnT + 0.358264)*\
lnT - 0.57443)*lnT -7.4387,
(((9.78573e-2*lnT - 3.19268)*lnT +3.85049e1)*\
lnT - 2.02193e2)*lnT +3.86186e2)
k10e = fac * np.exp(lngamma10e)/g1CI_
k20e = fac * np.exp(lngamma20e)/g2CI_
k21e = fac * np.exp(lngamma21e)/g2CI_
# Draine's HI/H2 collisional rates (Appendix F Table F.6)
# NOTE: this is more updated than the LAMBDA database.
k10HI = 1.26e-10 * np.power(T2, 0.115+0.057*lnT2)
k20HI = 0.89e-10 * np.power(T2, 0.228+0.046*lnT2)
k21HI = 2.64e-10 * np.power(T2, 0.231+0.046*lnT2)
k10H2p = 0.67e-10 * np.power(T2, -0.085+0.102*lnT2)
k10H2o = 0.71e-10 * np.power(T2, -0.004+0.049*lnT2)
k20H2p = 0.86e-10 * np.power(T2, -0.010+0.048*lnT2)
k20H2o = 0.69e-10 * np.power(T2, 0.169+0.038*lnT2)
k21H2p = 1.75e-10 * np.power(T2, 0.072+0.064*lnT2)
k21H2o = 1.48e-10 * np.power(T2, 0.263+0.031*lnT2)
k10H2 = k10H2p*fp_ + k10H2o*fo_
k20H2 = k20H2p*fp_ + k20H2o*fo_
k21H2 = k21H2p*fp_ + k21H2o*fo_
# The totol collisonal rates
q10 = nH*(k10HI*xHI + k10H2*xH2 + k10e*xe)
q20 = nH*(k20HI*xHI + k20H2*xH2 + k20e*xe)
q21 = nH*(k21HI*xHI + k21H2*xH2 + k21e*xe)
q01 = (g1CI_/g0CI_) * q10 * np.exp(-E10CI_/(kB_cgs*T))
q02 = (g2CI_/g0CI_) * q20 * np.exp(-E20CI_/(kB_cgs*T))
q12 = (g2CI_/g1CI_) * q21 * np.exp(-E21CI_/(kB_cgs*T))
return cool3Level_(q01,q10,q02,q20,q12,q21,A10CI_,A20CI_,
A21CI_,E10CI_,E20CI_,E21CI_,xCI)
def coolOII(nH, T, xe, xOII):
T4 = T*1e-4
kB_cgs = ac.k_B.cgs.value
# OII, 3 level system
g0OII_ = 4 # 4S_3/2
g1OII_ = 6 # 2D_5/2
g2OII_ = 4 # 2D_3/2
A10OII_ = 3.6e-5
A20OII_ = 1.6e-4
A21OII_ = 1.3e-7
E10OII_ = (ac.h*ac.c/(3728.8*au.angstrom)).to('erg').value
E20OII_ = (ac.h*ac.c/(3726.0*au.angstrom)).to('erg').value
E21OII_ = (ac.h*ac.c/(497.1*au.micron)).to('erg').value
# Draine (2011)
Omega10e = 0.803*T4**(0.023-0.008*np.log(T4))
Omega20e = 0.550*T4**(0.054-0.004*np.log(T4))
Omega21e = 1.434*T4**(-0.176+0.004*np.log(T4))
prefactor = 8.629e-8/np.sqrt(T4)
k10e = prefactor*Omega10e/g1OII_
k20e = prefactor*Omega20e/g2OII_
k21e = prefactor*Omega21e/g2OII_
# Total collisional rates
q10 = nH*k10e*xe
q20 = nH*k20e*xe
q21 = nH*k21e*xe
q01 = (g1OII_/g0OII_) * q10 * np.exp(-E10OII_/(kB_cgs*T))
q02 = (g2OII_/g0OII_) * q20 * np.exp(-E20OII_/(kB_cgs*T))
q12 = (g2OII_/g1OII_) * q21 * np.exp(-E21OII_/(kB_cgs*T))
return cool3Level_(q01, q10, q02, q20, q12, q21, A10OII_, A20OII_,
A21OII_, E10OII_, E20OII_, E21OII_, xOII)
def coolOI(nH, T, xe, xHI, xH2, xOI):
kB_cgs = ac.k_B.cgs.value
# Ortho-to-para ratio of H2
fp_ = 0.25
fo_ = 0.75
# OI, 3 level system
g0OI_ = 5
g1OI_ = 3
g2OI_ = 1
A10OI_ = 8.910e-05
A20OI_ = 1.340e-10
A21OI_ = 1.750e-05
E10OI_ = 3.144e-14
E20OI_ = 4.509e-14
E21OI_ = 1.365e-14
T2 = T*1e-2
lnT2 = np.log(T2)
# Collisional rates from Draine (2011) (Appendix F Table F.6)
# HI
k10HI = 3.57e-10*np.power(T2, 0.419-0.003*lnT2)
k20HI = 3.19e-10*np.power(T2, 0.369-0.006*lnT2)
k21HI = 4.34e-10*np.power(T2, 0.755-0.160*lnT2)
# H2
k10H2p = 1.49e-10 * np.power(T2, 0.264+0.025*lnT2)
k10H2o = 1.37e-10 * np.power(T2, 0.296+0.043*lnT2)
k20H2p = 1.90e-10 * np.power(T2, 0.203+0.041*lnT2)
k20H2o = 2.23e-10 * np.power(T2, 0.237+0.058*lnT2)
k21H2p = 2.10e-12 * np.power(T2, 0.889+0.043*lnT2)
k21H2o = 3.00e-12 * np.power(T2, 1.198+0.525*lnT2)
k10H2 = k10H2p*fp_ + k10H2o*fo_
k20H2 = k20H2p*fp_ + k20H2o*fo_
k21H2 = k21H2p*fp_ + k21H2o*fo_
# Electrons; fit from Bell+1998
k10e = 5.12e-10 * np.power(T, -0.075)
k20e = 4.86e-10 * np.power(T, -0.026)
k21e = 1.08e-14 * np.power(T, 0.926)
# Total collisional rates
q10 = nH*(k10HI*xHI + k10H2*xH2 + k10e*xe)
q20 = nH*(k20HI*xHI + k20H2*xH2 + k20e*xe)
q21 = nH*(k21HI*xHI + k21H2*xH2 + k21e*xe)
q01 = (g1OI_/g0OI_) * q10 * np.exp(-E10OI_/(kB_cgs*T))
q02 = (g2OI_/g0OI_) * q20 * np.exp(-E20OI_/(kB_cgs*T))
q12 = (g2OI_/g1OI_) * q21 * np.exp(-E21OI_/(kB_cgs*T))
return cool3Level_(q01, q10, q02, q20, q12, q21, A10OI_, A20OI_,
A21OI_, E10OI_, E20OI_, E21OI_, xOI)
def coolLya(nH, T, xe, xHI):
# HI, 2 level system
A10HI_ = 6.265e8
E10HI_ = 1.634e-11
g0HI_ = 1
g1HI_ = 3
ne = xe*nH
T4 = T*1.0e-4
# fac = 6.3803e-9*np.power(T4, 1.17)
fac = 5.30856e-08*np.power(T4,1.4897e-01)/(1.0 + np.power(0.2*T4, 0.64897))
k01e = fac*np.exp(-11.84/T4)
q01 = k01e*ne
q10 = (g0HI_/g1HI_)*fac*ne
return q01/(q01 + q10 + A10HI_)*A10HI_*E10HI_*xHI
def coolHI(nH, T, xHI, xe):
# Neutral Hydrogen cooling (Lya + Lyb + two photon) taken from DESPOTIC
#TLyA = (3.0/4.0*(ac.h*ac.c*ac.Ryd).to('eV')/ac.k_B).to('K').value
#TLyB = (8.0/9.0*(ac.h*ac.c*ac.Ryd).to('eV')/ac.k_B).to('K').value
TLyA = 118415.63430152694
TLyB = 140344.45546847637
kB = ac.k_B.cgs.value
upsilon2s = 0.35
upsilon2p = 0.69
upsilon3s = 0.077
upsilon3p = 0.14
upsilon3d = 0.073
fac = 8.629e-6/(2*np.sqrt(T))
exfacLyA = np.exp(-TLyA/T)
exfacLyB = np.exp(-TLyB/T)
Lambda2p = fac * exfacLyA * upsilon2s * xHI * xe * nH * kB * TLyA
LambdaLyA = fac * exfacLyA * upsilon2p * xHI * xe * nH * kB * TLyA
LambdaLyB = fac * exfacLyB * (upsilon3s + upsilon3p + upsilon3d) * xHI * xe * nH * kB * TLyB
return Lambda2p + LambdaLyA + LambdaLyB
def coolH2G17(nH, T, xHI, xH2, xHII, xe, xHe=0.1):
"""
H2 Cooling from Gong et al. (2017)
"""
Tmax_H2 = 6000. # maximum temperature above which use Tmax
Tmin_H2 = 10. # minimum temperature below which cut off cooling
# Note: limit extended to T< 10K and T>6000K
T = np.where(T > Tmax_H2, Tmax_H2, T)
logT3 = np.log10(T*1.0e-3)
logT3_2 = logT3 * logT3
logT3_3 = logT3_2 * logT3
logT3_4 = logT3_3 * logT3
logT3_5 = logT3_4 * logT3
# HI
LHI = np.where(T < 100.0,
np.power(10, -16.818342e0 +3.7383713e1*logT3 \
+ 5.8145166e1*logT3_2 + 4.8656103e1*logT3_3 \
+ 2.0159831e1*logT3_4 + 3.8479610e0*logT3_5), 0.0)
LHI += np.where(np.logical_and(T >= 100.0, T < 1000.0),
np.power(10, -2.4311209e1 +3.5692468e0*logT3 \
- 1.1332860e1*logT3_2 - 2.7850082e1*logT3_3 \
- 2.1328264e1*logT3_4 - 4.2519023e0*logT3_5), 0.0)
LHI += np.where(T >= 1000.0,
np.power(10, -2.4311209e1 +4.6450521e0*logT3 + \
- 3.7209846e0*logT3_2 + 5.9369081e0*logT3_3
- 5.5108049e0*logT3_4 + 1.5538288e0*logT3_5), 0.0)
# H2
LH2 = np.power(10, -2.3962112e1 +2.09433740e0*logT3 \
-0.77151436e0*logT3_2 +0.43693353e0*logT3_3 \
-0.14913216e0*logT3_4 -0.033638326e0*logT3_5)
# He
LHe = np.power(10, -2.3689237e1 +2.1892372e0*logT3 \
-0.81520438e0*logT3_2 +0.29036281e0*logT3_3 \
-0.16596184e0*logT3_4 +0.19191375e0*logT3_5)
# H+
LHplus = np.power(10, -2.1716699e1 +1.3865783e0*logT3 \
-0.37915285e0*logT3_2 +0.11453688e0*logT3_3 \
-0.23214154e0*logT3_4 +0.058538864e0*logT3_5)
# e
Le = np.where(T < 200.0,
np.power(10, -3.4286155e1 -4.8537163e1*logT3 \
-7.7121176e1*logT3_2 -5.1352459e1*logT3_3 \
-1.5169150e1*logT3_4 -0.98120322e0*logT3_5),
np.power(10, -2.2190316e1 +1.5728955e0*logT3 \
-0.213351e0*logT3_2 +0.96149759e0*logT3_3 \
-0.91023195e0*logT3_4 +0.13749749e0*logT3_5)
)
# total cooling in low density limit
Gamma_n0 = LHI*xHI*nH + LH2*xH2*nH + LHe*xHe*nH + LHplus*xHII*nH + Le*xe*nH
# cooling rate at LTE, from Hollenbach + McKee 1979
T3 = T*1.0e-3
Gamma_LTE_HR = (9.5e-22*np.power(T3, 3.76))/(1.+0.12*np.power(T3, 2.1))* \
np.exp(-np.power(0.13/T3, 3))+ 3.e-24*np.exp(-0.51/T3)
Gamma_LTE_HV = 6.7e-19*np.exp(-5.86/T3) + 1.6e-18*np.exp(-11.7/T3)
Gamma_LTE = Gamma_LTE_HR + Gamma_LTE_HV
# Total cooling rate
Gamma_tot = np.where(Gamma_n0 > 1e-100,
Gamma_LTE / (1.0 + Gamma_LTE/Gamma_n0),
0.0)
Gamma_tot = np.where(T >= Tmin_H2, Gamma_tot, 0.0)
return Gamma_tot * xH2;
def coolH2(nH, T, xHI, xH2):
"""
Cooling by rotation-vibration lines of H2
from Moseley et al. (2021)
"""
n1 = 50.0
n2 = 450.0
n3 = 25.0
n4 = 900
T3 = T*1e-3
T3inv = 1.0/T3
nH2 = xH2*nH
nHI = xHI*nH
x1 = nHI + 5.0*nH2
x2 = nHI + 4.5*nH2
x3 = nHI + 0.75*nH2
x4 = nHI + 0.05*nH2
sqrtT3 = np.power(T3,0.5)
f1 = 1.1e-25*sqrtT3*np.exp(-0.51*T3inv)* \
(0.7*x1/(1.0 + x1/n1) + 0.3*x1/(1.0 + x1/(10.0*n1)))
f2 = 2.0e-25*T3*
|
np.exp(-T3inv)
|
numpy.exp
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import math
import unittest
import warnings
import grpc
import numpy as np
import pandas as pd
import mlos.global_values as global_values
from mlos.Grpc.OptimizerMicroserviceServer import OptimizerMicroserviceServer
from mlos.Grpc.OptimizerMonitor import OptimizerMonitor
from mlos.Grpc.OptimizerService_pb2 import Empty
from mlos.Grpc.OptimizerService_pb2_grpc import OptimizerServiceStub
from mlos.Logger import create_logger
from mlos.OptimizerEvaluationTools.ObjectiveFunctionFactory import ObjectiveFunctionFactory, objective_function_config_store
from mlos.Optimizers.BayesianOptimizer import bayesian_optimizer_config_store
from mlos.Optimizers.BayesianOptimizerFactory import BayesianOptimizerFactory
from mlos.Optimizers.OptimizationProblem import OptimizationProblem, Objective
class TestBayesianOptimizerGrpcClient(unittest.TestCase):
""" Tests the E2E Grpc Client-Service workflow.
"""
@classmethod
def setUpClass(cls):
warnings.simplefilter("error")
global_values.declare_singletons()
def setUp(self):
self.logger = create_logger(self.__class__.__name__)
# Start up the gRPC service.
#
self.server = OptimizerMicroserviceServer(port=50051, num_threads=10)
self.server.start()
self.optimizer_service_channel = grpc.insecure_channel('localhost:50051')
self.bayesian_optimizer_factory = BayesianOptimizerFactory(grpc_channel=self.optimizer_service_channel, logger=self.logger)
self.optimizer_monitor = OptimizerMonitor(grpc_channel=self.optimizer_service_channel, logger=self.logger)
objective_function_config = objective_function_config_store.get_config_by_name('2d_quadratic_concave_up')
self.objective_function = ObjectiveFunctionFactory.create_objective_function(objective_function_config)
self.optimization_problem = OptimizationProblem(
parameter_space=self.objective_function.parameter_space,
objective_space=self.objective_function.output_space,
objectives=[Objective(name='y', minimize=True)]
)
def tearDown(self):
""" We need to tear down the gRPC server here.
:return:
"""
self.server.stop(grace=None)
def test_echo(self):
optimizer_service_stub = OptimizerServiceStub(channel=self.optimizer_service_channel)
response = optimizer_service_stub.Echo(Empty())
self.assertTrue(isinstance(response, Empty))
def test_optimizer_with_default_config(self):
pre_existing_optimizers = {optimizer.id: optimizer for optimizer in self.optimizer_monitor.get_existing_optimizers()}
print(bayesian_optimizer_config_store.default)
bayesian_optimizer = self.bayesian_optimizer_factory.create_remote_optimizer(
optimization_problem=self.optimization_problem,
optimizer_config=bayesian_optimizer_config_store.default
)
post_existing_optimizers = {optimizer.id: optimizer for optimizer in self.optimizer_monitor.get_existing_optimizers()}
new_optimizers = {
optimizer_id: optimizer
for optimizer_id, optimizer in post_existing_optimizers.items()
if optimizer_id not in pre_existing_optimizers
}
self.assertTrue(len(new_optimizers) == 1)
new_optimizer_id = list(new_optimizers.keys())[0]
new_optimizer = new_optimizers[new_optimizer_id]
self.assertTrue(new_optimizer_id == bayesian_optimizer.id)
self.assertTrue(new_optimizer.optimizer_config == bayesian_optimizer.optimizer_config)
num_iterations = 100
registered_features_df, registered_objectives_df = self.optimize_quadratic(optimizer=bayesian_optimizer, num_iterations=num_iterations)
# Apparently the to_json/from_json loses precision so we explicitly lose it here so that we can do the comparison.
#
registered_features_json = registered_features_df.to_json(orient='index', double_precision=15)
registered_objectives_json = registered_objectives_df.to_json(orient='index', double_precision=15)
# Apparently the jitter is too good and we actually have to use the json strings or they will be optimized away.
#
assert len(registered_features_json) > 0
assert len(registered_objectives_json) > 0
registered_features_df = pd.read_json(registered_features_json, orient='index')
registered_objectives_df = pd.read_json(registered_objectives_json, orient='index')
observed_features_df, observed_objectives_df = bayesian_optimizer.get_all_observations()
self.assertTrue((np.abs(registered_features_df - observed_features_df) < 0.00000001).all().all())
self.assertTrue((np.abs(registered_objectives_df - observed_objectives_df) < 0.00000001).all().all())
convergence_state = bayesian_optimizer.get_optimizer_convergence_state()
# Now let's make sure we the convergence state is looks reasonable.
#
random_forest_fit_state = convergence_state.surrogate_model_fit_state
# Let's look at the goodness of fit.
#
random_forest_gof_metrics = random_forest_fit_state.current_train_gof_metrics
# The model might not have used all of the samples, but should have used a majority of them (I expect about 90%), but 70% is a good sanity check
# and should make this test not very flaky.
self.assertTrue(random_forest_gof_metrics.last_refit_iteration_number > 0.7 * num_iterations)
# The invariants below should be true for all surrogate models: the random forest, and all constituent decision trees. So let's iterate over them all.
models_gof_metrics = [random_forest_gof_metrics]
for decision_tree_fit_state in random_forest_fit_state.decision_trees_fit_states:
models_gof_metrics.append(decision_tree_fit_state.current_train_gof_metrics)
for model_gof_metrics in models_gof_metrics:
self.assertTrue(0 <= model_gof_metrics.relative_absolute_error <= 1) # This could fail if the models are really wrong. Not expected in this unit test though.
self.assertTrue(0 <= model_gof_metrics.relative_squared_error <= 1)
# There is an invariant linking mean absolute error (MAE), root mean squared error (RMSE) and number of observations (n) let's assert it.
n = model_gof_metrics.last_refit_iteration_number
self.assertTrue(model_gof_metrics.mean_absolute_error <= model_gof_metrics.root_mean_squared_error <= math.sqrt(n) * model_gof_metrics.mean_absolute_error)
# We know that the sample confidence interval is wider (or equal to) prediction interval. So hit rates should be ordered accordingly.
self.assertTrue(model_gof_metrics.sample_90_ci_hit_rate >= model_gof_metrics.prediction_90_ci_hit_rate)
self.assertTrue(0 <= model_gof_metrics.coefficient_of_determination <= 1)
def test_optimizer_with_random_config(self):
num_random_restarts = 10
for i in range(num_random_restarts):
optimizer_config = bayesian_optimizer_config_store.parameter_space.random()
optimizer_config.min_samples_required_for_guided_design_of_experiments = min(optimizer_config.min_samples_required_for_guided_design_of_experiments, 100)
if optimizer_config.surrogate_model_implementation == "HomogeneousRandomForestRegressionModel":
rf_config = optimizer_config.homogeneous_random_forest_regression_model_config
rf_config.n_estimators = min(rf_config.n_estimators, 20)
print(f"[{i+1}/{num_random_restarts}] Creating a bayesian optimizer with config: {optimizer_config}")
bayesian_optimizer = self.bayesian_optimizer_factory.create_remote_optimizer(
optimization_problem=self.optimization_problem,
optimizer_config=optimizer_config
)
registered_features_df, registered_objectives_df = self.optimize_quadratic(optimizer=bayesian_optimizer, num_iterations=12)
# Apparently the to_json/from_json loses precision so we explicitly lose it here so that we can do the comparison.
#
registered_features_json = registered_features_df.to_json(orient='index', double_precision=15)
registered_objectives_json = registered_objectives_df.to_json(orient='index', double_precision=15)
# Apparently the jitter is too good and we actually have to use the json strings or they will be optimized away.
#
assert len(registered_features_json) > 0
assert len(registered_objectives_json) > 0
registered_features_df = pd.read_json(registered_features_json, orient='index')
registered_objectives_df = pd.read_json(registered_objectives_json, orient='index')
observed_features_df, observed_objectives_df = bayesian_optimizer.get_all_observations()
self.assertTrue((np.abs(registered_features_df - observed_features_df) < 0.00000001).all().all())
self.assertTrue((
|
np.abs(registered_objectives_df - observed_objectives_df)
|
numpy.abs
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(out1.shape)
out2=np.arange(400,350,-1)/400
#print(out2.shape)
out3=np.zeros(400)
#print(out3.shape)
out4=np.arange(800,850,1)/850
#print(out4.shape)
out5=np.ones(100)
#print(out5.shape)
out6 = np.arange(1100,950,-1)/1100
out7=np.zeros(180)
fin = np.concatenate((out, out1, out2,out3,out4,out5,out6,out7))
fin = np.expand_dims(fin,axis=1)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply threshold
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if orient=='x':
sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
absolute = np.absolute(sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
grad_binary = np.zeros_like(scaled)
grad_binary[(scaled >= thresh[0])&(scaled <= thresh[1])] = 1
return grad_binary
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
absx = np.absolute(sobelx)
absy = np.absolute(sobely)
direction = np.arctan2(absy,absx)
dir_binary = np.zeros_like(gray_img)
dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1
return dir_binary
def hls_select(image,thresh=(0,255)):
hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
s = hls[:,:,2]
binary_output = np.zeros_like(s)
binary_output[(s>thresh[0])&(s<=thresh[1])]=1
return binary_output
def equalize(image):
image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
#plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas
s_binary = hls_select(img_undist,thresh=(151,255)) #151
luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1
#top left,bottom left,bottom right,top right
src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])
#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])
points = np.int32(
|
np.copy(src)
|
numpy.copy
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import *
from keras.models import load_model
import matplotlib.pyplot as plt
# generate training data
x = np.linspace(0.0,2*np.pi,20)
y = np.sin(x)
# save training data to file
data = np.vstack((x,y)).T
np.savetxt('train_data.csv',data,header='x,y',comments='',delimiter=',')
# generate test data
x = np.linspace(0.0,2*np.pi,100)
y = np.sin(x)
# save test data to file
data = np.vstack((x,y)).T
|
np.savetxt('test_data.csv',data,header='x,y',comments='',delimiter=',')
|
numpy.savetxt
|
import logging
import numpy as np
import pytest
from climate_indices import compute
# ------------------------------------------------------------------------------
# disable logging messages
logging.disable(logging.CRITICAL)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"precips_mm_monthly",
"precips_mm_daily",
"data_year_start_monthly",
"data_year_end_monthly",
"data_year_start_daily",
"calibration_year_start_monthly",
"calibration_year_end_monthly",
"calibration_year_start_daily",
"calibration_year_end_daily",
"transformed_gamma_monthly",
"transformed_gamma_daily")
def test_transform_fitted_gamma(
precips_mm_monthly,
precips_mm_daily,
data_year_start_monthly,
data_year_end_monthly,
data_year_start_daily,
calibration_year_start_monthly,
calibration_year_end_monthly,
calibration_year_start_daily,
calibration_year_end_daily,
transformed_gamma_monthly,
transformed_gamma_daily):
"""
Test for the compute.transform_fitted_gamma() function
"""
# confirm that an input array of all NaNs results in the same array returned
all_nans = np.full(precips_mm_monthly.shape, np.NaN)
computed_values = compute.transform_fitted_gamma(all_nans,
data_year_start_monthly,
data_year_start_monthly,
data_year_end_monthly,
compute.Periodicity.monthly)
np.testing.assert_allclose(computed_values,
all_nans,
equal_nan=True,
err_msg="Gamma fit/transform not handling "
"all-NaN arrays as expected")
# compute sigmas of transformed (normalized) values fitted to a gamma
# distribution, using the full period of record as the calibration period
computed_values = compute.transform_fitted_gamma(precips_mm_monthly,
data_year_start_monthly,
data_year_start_monthly,
data_year_end_monthly,
compute.Periodicity.monthly)
np.testing.assert_allclose(computed_values,
transformed_gamma_monthly,
err_msg="Transformed gamma fitted monthly "
"values not computed as expected")
# compute sigmas of transformed (normalized) values fitted to a gamma
# distribution, using the full period of record as the calibration period
computed_values = compute.transform_fitted_gamma(precips_mm_daily.flatten(),
data_year_start_daily,
calibration_year_start_daily,
calibration_year_end_daily,
compute.Periodicity.daily)
np.testing.assert_allclose(computed_values,
transformed_gamma_daily,
atol=0.001,
equal_nan=True,
err_msg="Transformed gamma fitted daily "
"values not computed as expected")
# confirm that we can call with a calibration period outside of valid range
# and as a result use the full period of record as the calibration period instead
computed_values = compute.transform_fitted_gamma(precips_mm_monthly,
data_year_start_monthly,
1500,
2500,
compute.Periodicity.monthly)
np.testing.assert_allclose(computed_values,
transformed_gamma_monthly,
atol=0.001,
equal_nan=True,
err_msg="Transformed Pearson Type III "
"fitted values not computed as expected")
# if we provide a 1-D array then we need to provide a corresponding
# time series type, confirm we can't use an invalid type
flat_array = precips_mm_monthly.flatten()
np.testing.assert_raises(ValueError,
compute.transform_fitted_gamma,
flat_array,
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
"invalid_value")
np.testing.assert_raises(ValueError,
compute.transform_fitted_gamma,
flat_array,
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
None)
# confirm that an input array which is not 1-D or 2-D will raise an error
pytest.raises(ValueError,
compute.transform_fitted_gamma,
np.zeros((9, 8, 7, 6), dtype=float),
data_year_start_daily,
calibration_year_start_daily,
calibration_year_end_daily,
compute.Periodicity.monthly)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"precips_mm_monthly",
"precips_mm_daily",
"data_year_start_monthly",
"data_year_end_monthly",
"data_year_start_daily",
"calibration_year_start_monthly",
"calibration_year_end_monthly",
"calibration_year_start_daily",
"calibration_year_end_daily",
"gamma_monthly",
"gamma_daily",
)
def test_gamma_parameters(
precips_mm_monthly,
precips_mm_daily,
data_year_start_monthly,
data_year_end_monthly,
data_year_start_daily,
calibration_year_start_monthly,
calibration_year_end_monthly,
calibration_year_start_daily,
calibration_year_end_daily,
gamma_monthly,
gamma_daily,
):
"""
Test for the compute.gamma_parameters() function
"""
# confirm that an input array of all NaNs results in the same array returned
all_nans = np.full(precips_mm_monthly.shape, np.NaN)
nan_alphas = np.full(shape=(12,), fill_value=np.NaN)
nan_betas = np.full(shape=(12,), fill_value=np.NaN)
alphas, betas = compute.gamma_parameters(all_nans,
data_year_start_monthly,
data_year_start_monthly,
data_year_end_monthly,
compute.Periodicity.monthly,
)
assert np.allclose(alphas, nan_alphas, equal_nan=True)
assert np.allclose(betas, nan_betas, equal_nan=True)
computed_values = \
compute.gamma_parameters(
precips_mm_monthly,
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
compute.Periodicity.monthly,
)
np.testing.assert_allclose(
computed_values,
gamma_monthly,
equal_nan=True,
err_msg="Monthly gamma fitting parameters not being computed as expected",
)
computed_values = \
compute.gamma_parameters(
precips_mm_daily,
data_year_start_daily,
calibration_year_start_daily,
calibration_year_end_daily,
compute.Periodicity.daily,
)
np.testing.assert_allclose(
computed_values,
gamma_daily,
equal_nan=True,
err_msg="Daily gamma fitting parameters not being computed as expected",
)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures("precips_mm_monthly",
"precips_mm_daily",
"data_year_start_monthly",
"data_year_end_monthly",
"data_year_start_daily",
"calibration_year_start_monthly",
"calibration_year_end_monthly",
"calibration_year_start_daily",
"calibration_year_end_daily",
"transformed_pearson3",
"transformed_pearson3_monthly_fullperiod")
def test_transform_fitted_pearson(precips_mm_monthly,
precips_mm_daily,
data_year_start_monthly,
data_year_end_monthly,
data_year_start_daily,
calibration_year_start_monthly,
calibration_year_end_monthly,
calibration_year_start_daily,
calibration_year_end_daily,
transformed_pearson3,
transformed_pearson3_monthly_fullperiod):
"""
Test for the compute.transform_fitted_pearson() function
"""
# confirm that an input array of all NaNs results in the same array returned
all_nans = np.full(precips_mm_monthly.shape, np.NaN)
computed_values = compute.transform_fitted_pearson(all_nans,
data_year_start_monthly,
data_year_start_monthly,
data_year_end_monthly,
compute.Periodicity.monthly)
np.testing.assert_allclose(
computed_values,
all_nans,
equal_nan=True,
err_msg="Pearson fit/transform not handling all-NaN arrays as expected")
# compute sigmas of transformed (normalized) values
# fitted to a Pearson Type III distribution
computed_values = compute.transform_fitted_pearson(precips_mm_monthly,
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
compute.Periodicity.monthly)
expected_values = transformed_pearson3
np.testing.assert_allclose(
computed_values,
expected_values,
atol=0.001,
err_msg="Transformed Pearson Type III fitted "
"values not computed as expected")
# confirm that an input array of all NaNs will return the same array
all_nans = np.full(precips_mm_monthly.shape, np.NaN)
computed_values = compute.transform_fitted_pearson(
all_nans,
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
compute.Periodicity.monthly)
np.testing.assert_allclose(
computed_values,
all_nans,
equal_nan=True,
err_msg="Transformed Pearson Type III fitted "
"values not computed as expected")
# confirm that we can call with a calibration period outside of valid range
# and as a result use the full period of record as the calibration period instead
computed_values = compute.transform_fitted_pearson(precips_mm_monthly,
data_year_start_monthly,
1500,
2500,
compute.Periodicity.monthly)
np.testing.assert_allclose(computed_values.flatten(),
transformed_pearson3_monthly_fullperiod,
atol=0.001,
equal_nan=True,
err_msg="Transformed Pearson Type III fitted "
"values not computed as expected")
# confirm that we can call with daily values and not raise an error
compute.transform_fitted_pearson(precips_mm_daily,
data_year_start_daily,
calibration_year_start_daily,
calibration_year_end_daily,
compute.Periodicity.daily)
# confirm that we get expected errors when
# using invalid time series type arguments
pytest.raises(ValueError,
compute.transform_fitted_pearson,
precips_mm_monthly.flatten(),
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
None)
pytest.raises(ValueError,
compute.transform_fitted_pearson,
precips_mm_monthly.flatten(),
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
"unsupported_type")
# confirm that an input array which is not 1-D or 2-D will raise an error
pytest.raises(ValueError,
compute.transform_fitted_pearson,
np.zeros((9, 8, 7, 6), dtype=float),
data_year_start_daily,
calibration_year_start_daily,
calibration_year_end_daily,
compute.Periodicity.monthly)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"precips_mm_monthly",
"data_year_start_monthly",
"calibration_year_start_monthly",
"calibration_year_end_monthly",
)
def test_pearson_parameters(
precips_mm_monthly,
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
):
"""
Test for the compute._pearson3_fitting_values() function
"""
# provide some bogus inputs to make sure these raise expected errors
# np.testing.assert_raises(ValueError,
# compute.pearson_parameters,
# np.array([1.0, 0.0, 0.0]),
# compute.Periodicity.monthly)
# np.testing.assert_raises(ValueError,
# compute.pearson_parameters,
# np.array([1.0, 0.0, 0.0]),
# compute.Periodicity.daily)
# np.testing.assert_raises(ValueError,
# compute.pearson_parameters,
# np.array([1.0, 0.0, 0.0]),
# None)
# np.testing.assert_raises(ValueError,
# compute.pearson_parameters,
# np.array([1.0, 0.0, 0.0, 1.0, 0.0, 0.0]))
np.testing.assert_raises(
ValueError,
compute.pearson_parameters,
np.array(
[[1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 5.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 4.7]],
),
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
compute.Periodicity.monthly,
)
np.testing.assert_raises(
ValueError,
compute.pearson_parameters,
None,
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
None)
# try using a subset of the precipitation dataset (1897 - 1915, year indices 2 - 20)
computed_values = \
compute.pearson_parameters(
precips_mm_monthly[2:21, :],
data_year_start_monthly,
calibration_year_start_monthly,
calibration_year_end_monthly,
compute.Periodicity.monthly,
)
expected_probs_of_zero = np.zeros((12,))
expected_locs = np.array([
48.539987664499996,
53.9852487665,
44.284745065842102,
62.583727384894736,
125.72157689160528,
182.03053042784214,
159.00575657926319,
170.92269736865791,
189.8925781252895,
155.13420024692104,
72.953125000026319,
43.31532689144737,
])
expected_scales = np.array([
33.781507724523095,
43.572151699968387,
40.368173442404107,
44.05329691434887,
60.10621716019174,
59.343178125457186,
49.228795303727473,
66.775653341386999,
65.362977393206421,
94.467597091088265,
72.63706898364299,
34.250906049301463,
])
expected_skews = np.array([
0.76530966976335302,
1.2461447518219784,
2.275517179222323,
0.8069305098698194,
-0.6783037020197018,
1.022194696224529,
0.40876120732817578,
1.2372551346168916,
0.73881116931924118,
0.91911763257003465,
2.3846715887263725,
1.4700559294571962,
])
np.testing.assert_allclose(computed_values,
(expected_probs_of_zero, expected_locs, expected_scales, expected_skews),
atol=0.001,
equal_nan=True,
err_msg="Failed to accurately compute Pearson Type III fitting values")
# add some zeros in order to exercise the parts where it gets a percentage of zeros
precips_mm =
|
np.array(precips_mm_monthly, copy=True)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 17:23:20 2019
Functions for smoothing generating the pdf, cdf.
@author: Yanlong
"""
from __future__ import unicode_literals
import numpy as np
#import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
matplotlib.rcParams['xtick.top'] = True
matplotlib.rcParams['ytick.right'] = True
from scipy.optimize import curve_fit
import sys
import glob
import lmfit
from scipy import signal
from scipy import interpolate
from scipy import special
from scipy import optimize
from scipy import stats
def release_list(a):
del a[:]
del a
def func(x, rho, a, b, rc):
return np.log(np.exp(rho)/(np.exp(x)/np.exp(rc))**a / (1.+np.exp(x)/np.exp(rc))**(b-a))
def func_pw(x, rho, a, b, rc):
return np.log(np.exp(rho)/((np.exp(x)/np.exp(rc))**a + (np.exp(x)/np.exp(rc))**b ) )
def func_cdf(x, rho, a, b, rc):
rc = np.exp(rc)
rho = np.exp(rho)
x = np.exp(x)
x = x/rc
aa = 3-a
bb = 1+a-b
beta = x**aa / aa * special.hyp2f1(aa, 1-bb, aa+1, -x)
try:
res = np.log(4*np.pi*rho*rc**3 * beta)
except:
pass
return res
def func_cdf_inv(frac, rho, a, b, c):
m_h = func_cdf(np.log(1e8), rho, a, b, c) + np.log(frac)
rmin = np.log(1e-10)
rmax = np.log(1e10)
alpha = 0.5
while rmax - rmin > 1e-6:
rmid = alpha*rmax + (1.-alpha)*rmin
if func_cdf(rmid, rho, a, b, c)>m_h:
rmax = rmid
else:
rmin = rmid
rmid = alpha*rmax + (1.-alpha)*rmin
return np.exp(rmid)
def func_cdf_pw(x, rho, a, b, rc):
rc = np.exp(rc)
rho = np.exp(rho)
x = np.exp(x)
x = x/rc
#print(rho, x, rc)
ab = (a-3.) /(a-b)
#print(aa, bb)
hgf = x**(3.-a) / (3.-a) * special.hyp2f1(1, ab, ab+1, -x**(b-a))
return np.log(4*np.pi*rho*rc**3 * hgf)
def func_cdf_pw_inv(frac, rho, a, b, c):
m_h = func_cdf_pw(np.log(1e8), rho, a, b, c) + np.log(frac)
rmin = np.log(1e-10)
rmax = np.log(1e10)
alpha = 0.5
while rmax - rmin > 1e-6:
rmid = alpha*rmax + (1.-alpha)*rmin
if func_cdf_pw(rmid, rho, a, b, c)>m_h:
rmax = rmid
else:
rmin = rmid
rmid = alpha*rmax + (1.-alpha)*rmin
return np.exp(rmid)
def cdf_sample(r_m):
rmin = r_m[3,0] *1.01
rmax = r_m[-1,0] *1.01
n_points = 500
r = np.logspace(np.log10(rmin), np.log10(rmax), num=n_points)
mcum = np.zeros(n_points)
mt = 0.
i = j = 0
for i in range(n_points):
if j>=len(r_m):
mcum[i] = mt
continue
while r_m[j, 0]<r[i]:
mt += r_m[j, 1]
j += 1
if j >=len(r_m):
break
mcum[i] = mt
#print(r[i], r_m[j-1,0], mcum[i])
return np.array(list(zip(r, mcum)))
def pdf_sample(r_m):
rmin = r_m[3,0] *1.01
rmax = r_m[-1,0] *1.01
n_points = 20
r = np.logspace(np.log10(rmin), np.log10(rmax), num=n_points-1)
eta = r[2]/r[1]
dmcum = np.zeros(n_points-1)
i = j = 0
for i in range(n_points-1):
while j < len(r_m):
if r_m[j, 0]<r[i+1]:
dmcum[i]+=r_m[j, 1]
j+=1
continue
else:
break
dmcum[i] /= ((r[i]*eta)**3 - r[i]**3)*4.*np.pi/3.
#print(r[i], r_m[j-1,0], mcum[i])
result = np.array(list(zip(r*np.sqrt(eta), dmcum)))
return result[np.all(result > 1e-3, axis=1)]
def cdf_smooth(raw_cdf):
n_points = 500
x = np.log(raw_cdf[:,0])
y = np.log(raw_cdf[:,1])
#tck = interpolate.splrep(x, y, s=0)
#tck, u = interpolate.splprep([x, y], s=0)
f = interpolate.interp1d(x, y, kind='linear')
xnew = np.linspace(x[0], x[-1], n_points)
#ynew = interpolate.splev(xnew, tck, der=0)
ynew = f(xnew)
#spl = interpolate.UnivariateSpline(xnew, ynew)
#spl.set_smoothing_factor(0.9)
#ynew = spl(xnew)
ynew = signal.savgol_filter(ynew, 349, 2)
return np.array(list(zip(np.exp(xnew), np.exp(ynew))))
def cdf_smooth_cheby(raw_cdf):
n_points = 500
x = np.log(raw_cdf[:,0])
y = np.log(raw_cdf[:,1])
#tck = interpolate.splrep(x, y, s=0)
#tck, u = interpolate.splprep([x, y], s=0)
f = interpolate.interp1d(x, y, kind='linear')
xnew = np.linspace(x[0], x[-1], n_points)
#ynew = interpolate.splev(xnew, tck, der=0)
ynew = f(xnew)
#spl = interpolate.UnivariateSpline(xnew, ynew)
#spl.set_smoothing_factor(0.9)
#ynew = spl(xnew)
#ynew = signal.savgol_filter(ynew, 349, 2)
cheby = np.polynomial.Chebyshev.fit(xnew, ynew, 4)
#y = signal.savgol_filter(y, len(x)//8*2+1, 3)
ynew = cheby(xnew)
return np.array(list(zip(np.exp(xnew), np.exp(ynew))))
def cdf_smooth_mono(raw_cdf):
return
def pdf_cal(cdf):
x = np.log(cdf[:,0])
y = np.log(cdf[:,1])
dydx_log = np.diff(y)/np.diff(x)
z = dydx_log * np.exp(y[:-1])/4./np.pi/(np.exp(x[:-1]))**3
return np.array(list(zip(np.exp(x[:-1]), z)))
def fit_pdf(pdf):
fmodel = lmfit.Model(func)
#fmodel = lmfit.Model(func_pw)
fmodel.set_param_hint('a', min=0)
x = np.log(pdf[:, 0])
y = np.log(pdf[:, 1])
result = fmodel.fit(y, x=x, rho = 12., a = 0., b=3., rc =-2.)
#print(result.fit_report())
params = list(result.best_values.values())
params[0] = np.exp(params[0])
params[-1] = np.exp(params[-1])
print(params)
return np.array(list(zip(np.exp(x), np.exp(result.best_fit)))), params
def fit_cdf(raw_cdf, r_h, m_tot):
for j in range(len(raw_cdf)):
if raw_cdf[j][1] > raw_cdf[-1, 1]/2.:
break
weights = np.ones(len(raw_cdf))
#weights[[j, -1]] = 50.
#print(raw_cdf[j, 0])
fmodel = lmfit.Model(func_cdf)
#print(m_tot, r_h)
#print((m_tot/2.0/(4*np.pi*np.exp(-2)**3)/((r_h/np.exp(-2))**(3-1)/(3-1) * special.hyp2f1(3-1, 4-1, 4-1, -r_h/np.exp(-2)))))
#fmodel.set_param_hint('rho', expr='log(%f/2.0/(4*pi*exp(rc)**3)/((%f/exp(rc))**(3-a)/(3-a) * special.hyp2f1(3-a, b-a, 4-a, -%f/exp(rc))))'%(m_tot, r_h, r_h), vary=True)
#fmodel.set_param_hint('rc', expr='a+b')
fmodel.set_param_hint('a', value=1, min=0)
#fmodel.set_param_hint('m_tot', expr='2* (4*pi*exp(rc)**3)*exp(rho)*((r_h/exp(rc))**(3-a)/(3-a) * special.hyp2f1(3-a, b-a, 4-a, -r_h/exp(rc)))')
#fmodel.set_param_hint('b',)
x = np.log(raw_cdf[:, 0])
y = np.log(raw_cdf[:, 1])
result = fmodel.fit(y, x=x, rho = 12., a = 1, b=4, rc =-2., method='least_square', weights=weights)
#print(result.fit_report())
params = list(result.best_values.values())
params[0] = np.exp(params[0])
params[-1] = np.exp(params[-1])
print(params)
return np.array(list(zip(np.exp(x), np.exp(result.best_fit)))), params
def fit_cdf_pw(raw_cdf):
for j in range(len(raw_cdf)):
if raw_cdf[j][1] > raw_cdf[-1, 1]/2.:
break
weights = np.ones(len(raw_cdf))
weights[[j, -1]] = 50.
#print(raw_cdf[j, 0])
fmodel = lmfit.Model(func_cdf_pw)
fmodel.set_param_hint('a', value=1, min=0)
#fmodel.set_param_hint('b')
x = np.log(raw_cdf[:, 0])
y = np.log(raw_cdf[:, 1])
result = fmodel.fit(y, x=x, rho = 12., a = 1, b=4, rc =-2., method='least_squares', weights=weights)
#print(result.fit_report())
params = list(result.best_values.values())
params[0] = np.exp(params[0])
params[-1] = np.exp(params[-1])
print(params)
return np.array(list(zip(np.exp(x), np.exp(result.best_fit)))), params
def fit_cdf_chi2(x, r, m):
model = func_cdf(r, *x)
chi_sq = sum((model - m)**2)
return chi_sq
def fit_cdf_scipy(raw_cdf, r_h, m_h, r_max, m_tot):
r = np.log(raw_cdf[:,0])
m = np.log(raw_cdf[:,1])
fun_con = lambda x: func_cdf(np.log(r_h), *x) - np.log(m_h)
fun_con_tot = lambda x: func_cdf(np.log(r_max), *x) - np.log(m_tot)
delta = 0
cons = ({'type': 'eq', 'fun': fun_con},
{'type': 'eq', 'fun': fun_con_tot},
{'type': 'ineq', 'fun': lambda x: x[1]-delta},
{'type': 'ineq', 'fun': lambda x: 3.0-x[1]-delta},
{'type': 'ineq', 'fun': lambda x: x[2]-3.0-delta})
res = optimize.minimize(fit_cdf_chi2, (12, 1, 4, -1), args=(r, m), method='SLSQP', constraints=cons)
params = res.x
fits = np.array(func_cdf(r, *params))
chi_sq_test = stats.chisquare(m, f_exp=fits)
fits = np.exp(fits)
print(fun_con(params), fun_con_tot(params))
if res.success == False or chi_sq_test[1]<0.05 or fun_con(params)>1e-5 or fun_con_tot(params)>1e-5:
params[2] = -1.0
params[0] = np.exp(params[0])
params[1] = np.abs(params[1])
params[-1] = np.exp(params[-1])
r_h_fit = func_cdf_inv(0.5,
|
np.log(params[0])
|
numpy.log
|
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class Test_cutout(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked(self, mock_inplace):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
mock_inplace.return_value = "foo"
rng = iarandom.RNG(0)
image_aug = iaa.cutout(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="gaussian",
cval=1,
fill_per_channel=0.5,
seed=rng)
assert mock_inplace.call_count == 1
assert image_aug == "foo"
args = mock_inplace.call_args_list[0][0]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(args[1], 10)
assert np.isclose(args[2], 20)
assert np.isclose(args[3], 30)
assert np.isclose(args[4], 40)
assert args[5] == "gaussian"
assert args[6] == 1
assert np.isclose(args[7], 0.5)
assert args[8] is rng
class Test_cutout_(unittest.TestCase):
def test_with_simple_image(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
mask = np.zeros(image.shape, dtype=bool)
mask[20:40, 10:30, :] = True
overlap_inside = np.sum(image_aug[mask] == 0) / np.sum(mask)
overlap_outside = np.sum(image_aug[~mask] > 0) / np.sum(~mask)
assert image_aug is image
assert overlap_inside >= 1.0 - 1e-4
assert overlap_outside >= 1.0 - 1e-4
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_constant_")
def test_fill_mode_constant_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("constant", mock_fill)
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_gaussian_")
def test_fill_mode_gaussian_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("gaussian", mock_fill)
@classmethod
def _test_with_fill_mode_mocked(cls, fill_mode, mock_fill):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
mock_fill.return_value = image
seed = iarandom.RNG(0)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode=fill_mode,
cval=0,
fill_per_channel=False,
seed=seed)
assert mock_fill.call_count == 1
args = mock_fill.call_args_list[0][0]
kwargs = mock_fill.call_args_list[0][1]
assert image_aug is image
assert args[0] is image
assert kwargs["x1"] == 10
assert kwargs["y1"] == 20
assert kwargs["x2"] == 30
assert kwargs["y2"] == 40
assert kwargs["cval"] == 0
assert kwargs["per_channel"] is False
assert kwargs["random_state"] is seed
def test_zero_height(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=20,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_zero_height_width(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=10,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_fully_outside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-50,
y1=150,
x2=-1,
y2=200,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_partially_inside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=-25,
y1=-25,
x2=25,
y2=25,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.all(image_aug[0:25, 0:25] == 0)
assert np.all(image_aug[0:25, 25:] > 0)
assert np.all(image_aug[25:, :] > 0)
def test_zero_sized_axes(self):
shapes = [(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(0, 1, 1),
(1, 1, 0),
(1, 0, 1),
(1, 0),
(0, 1),
(0, 0)]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-5,
y1=-5,
x2=5,
y2=5,
fill_mode="constant",
cval=0)
assert np.array_equal(image_aug, image_cp)
class Test_fill_rectangle_gaussian_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=20,
x2=60,
y2=70,
cval=0,
per_channel=False,
random_state=rng)
assert np.array_equal(image_aug[:20, :],
image_cp[:20, :])
assert not np.array_equal(image_aug[20:70, 10:60],
image_cp[20:70, 10:60])
assert np.isclose(np.average(image_aug[20:70, 10:60]), 127.5,
rtol=0, atol=5.0)
assert np.isclose(np.std(image_aug[20:70, 10:60]), 255.0/2.0/3.0,
rtol=0, atol=2.5)
def test_per_channel(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=False,
random_state=iarandom.RNG(0))
image_aug_pc = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
diff11 = (image_aug[..., 0] != image_aug[..., 1])
diff12 = (image_aug[..., 0] != image_aug[..., 2])
diff21 = (image_aug_pc[..., 0] != image_aug_pc[..., 1])
diff22 = (image_aug_pc[..., 0] != image_aug_pc[..., 2])
assert not np.any(diff11)
assert not np.any(diff12)
assert np.any(diff21)
assert np.any(diff22)
def test_deterministic_with_same_seed(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug_pc1 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc2 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc3 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(1))
assert np.array_equal(image_aug_pc1, image_aug_pc2)
assert not np.array_equal(image_aug_pc2, image_aug_pc3)
def test_no_channels(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = image.reshape((1, 10))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=per_channel,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 3, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(nb_channels=nb_channels,
per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)),
(1, 1, nb_channels))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=10,
x2=300-10,
y2=300-10,
cval=0,
per_channel=per_channel,
random_state=rng)
rect = image_aug[10:-10, 10:-10]
p_true = np.sum(rect) / rect.size
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect, image_cp[10:-10, 10:-10])
assert np.isclose(p_true, 0.5, rtol=0, atol=0.1)
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_int_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = int(max_value) - int(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect,
image_cp[10:-10, 10:-10])
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
min_value = 0.0
center_value = 0.5
max_value = 1.0
dynamic_range = np.float128(max_value) - np.float128(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert np.any(np.isclose(image, min_value,
rtol=0, atol=1e-4))
assert np.any(np.isclose(image, max_value,
rtol=0, atol=1e-4))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
assert not np.allclose(rect, image_cp[10:-10, 10:-10],
rtol=0, atol=1e-4)
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.allclose(image_aug[..., 0],
image_aug[..., c],
rtol=0, atol=1e-4)
class Test_fill_rectangle_constant_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_but_per_channel_is_false(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 25)
def test_iterable_cval_with_per_channel_is_true_channel_mismatch(self):
image = np.mod(np.arange(100*100*5), 256).astype(np.uint8).reshape(
(100, 100, 5))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
assert np.all(image_aug[20:70, 10:60, 3] == 21)
assert np.all(image_aug[20:70, 10:60, 4] == 17)
def test_single_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 17)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
def test_no_channels_single_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=per_channel, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_no_channels_iterable_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100*nb_channels), 256
).astype(np.uint8).reshape((100, 100, nb_channels))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
if per_channel:
for c in np.arange(nb_channels):
val = 17 if c % 2 == 0 else 21
assert np.all(image_aug[20:70, 10:60, c] == val)
else:
assert np.all(image_aug[20:70, 10:60, :] == 17)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[0, 1], per_channel=per_channel,
random_state=None)
rect = image_aug[10:-10, 10:-10]
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0] == 0)
assert np.all(image_aug[10:-10, 10:-10, 1] == 1)
assert np.all(image_aug[10:-10, 10:-10, 2] == 0)
else:
assert np.all(image_aug[20:70, 10:60] == 0)
def test_other_dtypes_uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0]
== min_value)
assert np.all(image_aug[10:-10, 10:-10, 1]
== 10)
assert np.all(image_aug[10:-10, 10:-10, 2]
== max_value)
else:
assert np.all(image_aug[-10:-10, 10:-10] == min_value)
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
# Use this here instead of any(isclose(...)) because
# the latter one leads to overflow warnings.
assert image.flat[0] <= np.float128(min_value) + 1.0
assert image.flat[4] >= np.float128(max_value) - 1.0
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert image_aug.dtype.name == dtype
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
if per_channel:
assert np.allclose(image_aug[10:-10, 10:-10, 0],
np.float128(min_value),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 1],
np.float128(10),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 2],
np.float128(max_value),
rtol=0, atol=1e-4)
else:
assert np.allclose(image_aug[-10:-10, 10:-10],
np.float128(min_value),
rtol=0, atol=1e-4)
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestCutout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Cutout()
assert aug.nb_iterations.value == 1
assert isinstance(aug.position[0], iap.Uniform)
assert isinstance(aug.position[1], iap.Uniform)
assert np.isclose(aug.size.value, 0.2)
assert aug.squared.value == 1
assert aug.fill_mode.value == "constant"
assert aug.cval.value == 128
assert aug.fill_per_channel.value == 0
def test___init___custom(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
assert aug.nb_iterations.value == 1
assert np.isclose(aug.position[0].value, 0.5)
assert np.isclose(aug.position[1].value, 0.5)
assert np.isclose(aug.size.value, 0.1)
assert np.isclose(aug.squared.p.value, 0.6)
assert aug.fill_mode.a == ["gaussian", "constant"]
assert np.isclose(aug.cval.a.value, 0)
assert np.isclose(aug.cval.b.value, 255)
assert np.isclose(aug.fill_per_channel.p.value, 0.5)
def test___init___fill_mode_is_stochastic_param(self):
param = iap.Deterministic("constant")
aug = iaa.Cutout(fill_mode=param)
assert aug.fill_mode is param
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked__squared_false(self, mock_apply):
aug = iaa.Cutout(nb_iterations=2,
position=(0.5, 0.6),
size=iap.DeterministicList([0.1, 0.2]),
squared=False,
fill_mode="gaussian",
cval=1,
fill_per_channel=True)
image = np.zeros((10, 30, 3), dtype=np.uint8)
# dont return image itself, otherwise the loop below will fail
# at its second iteration as the method is expected to handle
# internally a copy of the image and not the image itself
mock_apply.return_value = np.copy(image)
_ = aug(image=image)
assert mock_apply.call_count == 2
for call_idx in np.arange(2):
args = mock_apply.call_args_list[call_idx][0]
kwargs = mock_apply.call_args_list[call_idx][1]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(kwargs["x1"], 0.5*30 - 0.5 * (0.2*30))
assert np.isclose(kwargs["y1"], 0.6*10 - 0.5 * (0.1*10))
assert np.isclose(kwargs["x2"], 0.5*30 + 0.5 * (0.2*30))
assert np.isclose(kwargs["y2"], 0.6*10 + 0.5 * (0.1*10))
assert kwargs["fill_mode"] == "gaussian"
assert np.array_equal(kwargs["cval"], [1, 1, 1])
assert np.isclose(kwargs["fill_per_channel"], 1.0)
assert isinstance(kwargs["seed"], iarandom.RNG)
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked__squared_true(self, mock_apply):
aug = iaa.Cutout(nb_iterations=2,
position=(0.5, 0.6),
size=iap.DeterministicList([0.1, 0.2]),
squared=True,
fill_mode="gaussian",
cval=1,
fill_per_channel=True)
image = np.zeros((10, 30, 3), dtype=np.uint8)
# dont return image itself, otherwise the loop below will fail
# at its second iteration as the method is expected to handle
# internally a copy of the image and not the image itself
mock_apply.return_value = np.copy(image)
_ = aug(image=image)
assert mock_apply.call_count == 2
for call_idx in np.arange(2):
args = mock_apply.call_args_list[call_idx][0]
kwargs = mock_apply.call_args_list[call_idx][1]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(kwargs["x1"], 0.5*30 - 0.5 * (0.1*10))
assert np.isclose(kwargs["y1"], 0.6*10 - 0.5 * (0.1*10))
assert np.isclose(kwargs["x2"], 0.5*30 + 0.5 * (0.1*10))
assert np.isclose(kwargs["y2"], 0.6*10 + 0.5 * (0.1*10))
assert kwargs["fill_mode"] == "gaussian"
assert np.array_equal(kwargs["cval"], [1, 1, 1])
assert np.isclose(kwargs["fill_per_channel"], 1.0)
assert isinstance(kwargs["seed"], iarandom.RNG)
def test_simple_image(self):
aug = iaa.Cutout(nb_iterations=2,
position=(
iap.DeterministicList([0.2, 0.8]),
iap.DeterministicList([0.2, 0.8])
),
size=0.2,
fill_mode="constant",
cval=iap.DeterministicList([0, 0, 0, 1, 1, 1]))
image = np.full((100, 100, 3), 255, dtype=np.uint8)
for _ in np.arange(3):
images_aug = aug(images=[image, image])
for image_aug in images_aug:
values = np.unique(image_aug)
assert len(values) == 3
assert 0 in values
assert 1 in values
assert 255 in values
def test_batch_contains_only_non_image_data(self):
aug = iaa.Cutout()
segmap_arr = np.ones((3, 3, 1), dtype=np.int32)
segmap = ia.SegmentationMapsOnImage(segmap_arr, shape=(3, 3, 3))
segmap_aug = aug.augment_segmentation_maps(segmap)
assert np.array_equal(segmap.get_arr(), segmap_aug.get_arr())
def test_sampling_when_position_is_stochastic_parameter(self):
# sampling of position works slightly differently when it is a single
# parameter instead of tuple (paramX, paramY), so we have an extra
# test for that situation here
param = iap.DeterministicList([0.5, 0.6])
aug = iaa.Cutout(position=param)
samples = aug._draw_samples([
np.zeros((3, 3, 3), dtype=np.uint8),
np.zeros((3, 3, 3), dtype=np.uint8)
], iarandom.RNG(0))
assert np.allclose(samples.pos_x, [0.5, 0.5])
assert np.allclose(samples.pos_y, [0.6, 0.6])
def test_by_comparison_to_official_implementation(self):
image = np.ones((10, 8, 2), dtype=np.uint8)
aug = iaa.Cutout(1, position="uniform", size=0.2, squared=True,
cval=0)
aug_official = _CutoutOfficial(n_holes=1, length=int(10*0.2))
dropped = np.zeros((10, 8, 2), dtype=np.int32)
dropped_official = np.copy(dropped)
height = np.zeros((10, 8, 2), dtype=np.int32)
width = np.copy(height)
height_official = np.copy(height)
width_official = np.copy(width)
nb_iterations = 3 * 1000
images_aug = aug(images=[image] * nb_iterations)
for image_aug in images_aug:
image_aug_off = aug_official(image)
mask = (image_aug == 0)
mask_off = (image_aug_off == 0)
dropped += mask
dropped_official += mask_off
ydrop = np.max(mask, axis=(2, 1))
xdrop = np.max(mask, axis=(2, 0))
wx = np.where(xdrop)
wy = np.where(ydrop)
x1 = wx[0][0]
x2 = wx[0][-1]
y1 = wy[0][0]
y2 = wy[0][-1]
ydrop_off = np.max(mask_off, axis=(2, 1))
xdrop_off = np.max(mask_off, axis=(2, 0))
wx_off = np.where(xdrop_off)
wy_off = np.where(ydrop_off)
x1_off = wx_off[0][0]
x2_off = wx_off[0][-1]
y1_off = wy_off[0][0]
y2_off = wy_off[0][-1]
height += (
np.full(height.shape, 1 + (y2 - y1), dtype=np.int32)
* mask)
width += (
np.full(width.shape, 1 + (x2 - x1), dtype=np.int32)
* mask)
height_official += (
np.full(height_official.shape, 1 + (y2_off - y1_off),
dtype=np.int32)
* mask_off)
width_official += (
np.full(width_official.shape, 1 + (x2_off - x1_off),
dtype=np.int32)
* mask_off)
dropped_prob = dropped / nb_iterations
dropped_prob_off = dropped_official / nb_iterations
height_avg = height / (dropped + 1e-4)
height_avg_off = height_official / (dropped_official + 1e-4)
width_avg = width / (dropped + 1e-4)
width_avg_off = width_official / (dropped_official + 1e-4)
prob_max_diff = np.max(np.abs(dropped_prob - dropped_prob_off))
height_avg_max_diff = np.max(np.abs(height_avg - height_avg_off))
width_avg_max_diff = np.max(np.abs(width_avg - width_avg_off))
assert prob_max_diff < 0.04
assert height_avg_max_diff < 0.3
assert width_avg_max_diff < 0.3
def test_determinism(self):
aug = iaa.Cutout(nb_iterations=(1, 3),
size=(0.1, 0.2),
fill_mode=["gaussian", "constant"],
cval=(0, 255))
image = np.mod(
np.arange(100*100*3), 256
).reshape((100, 100, 3)).astype(np.uint8)
sums = []
for _ in np.arange(10):
aug_det = aug.to_deterministic()
image_aug1 = aug_det(image=image)
image_aug2 = aug_det(image=image)
assert np.array_equal(image_aug1, image_aug2)
sums.append(np.sum(image_aug1))
assert len(np.unique(sums)) > 1
def test_get_parameters(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
params = aug.get_parameters()
assert params[0] is aug.nb_iterations
assert params[1] is aug.position
assert params[2] is aug.size
assert params[3] is aug.squared
assert params[4] is aug.fill_mode
assert params[5] is aug.cval
assert params[6] is aug.fill_per_channel
def test_pickleable(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
runtest_pickleable_uint8_img(aug)
# this is mostly copy-pasted cutout code from
# https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
# we use this to compare our implementation against
# we changed some pytorch to numpy stuff
class _CutoutOfficial(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of
it.
"""
# h = img.size(1)
# w = img.size(2)
h = img.shape[0]
w = img.shape[1]
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
# note that in the paper they normalize to 0-mean,
# i.e. 0 here is actually not black but grayish pixels
mask[y1: y2, x1: x2] = 0
# mask = torch.from_numpy(mask)
# mask = mask.expand_as(img)
if img.ndim != 2:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, img.shape[-1]))
img = img * mask
return img
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(
|
np.unique(image_aug)
|
numpy.unique
|
import numpy as np
from PIL import Image
import torch
import os
import matplotlib.patches as mpatches
import logging
import time
import datetime
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import pandas as pd
import torchvision
import torchvision.transforms.functional as TF
from torchvision import transforms
from skimage import filters
from torch.nn.modules.loss import _Loss
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.JPG', '.PNG']
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (iterable of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def is_image_file(filename):
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
img.load()
return img.convert('RGB')
class tsv_DataLoader(torch.utils.data.Dataset):
"""
Load dataset from tab separated value
This is useful for tensorboard visualization later
This is set up for semantic segmentation
"""
def __init__(self, hypes, tsv_file, img_transform=None, mask_transform=None, normalize=None, return_path=False,
random_crop=False):
"""
Args:
tsv_file (string): Path to csv file with relative image paths and labels.
img_transform (callable, optional): Optional transforms to be applied to the image.
mask_transform (callable, optional): Optional transforms to be applied to the mask.
"""
super(tsv_DataLoader, self).__init__()
self.tsv_path = os.path.abspath(os.path.dirname(tsv_file))
self.series_list = pd.read_csv(tsv_file, sep='\t')
self.img_transform = img_transform
self.mask_transform = mask_transform
self.normalize = normalize
self.imgs = self._make_dataset()
self.colours = hypes['data']['class_colours']
self.img_size = hypes['arch']['image_shape'][1:3]
self.return_path = return_path
self.random_crop = random_crop
if random_crop:
self.random_crop = torchvision.transforms.RandomResizedCrop(size=self.img_size, scale=(0.5, 1.2), ratio=(3. / 4., 4. / 3.))
def __len__(self):
return len(self.series_list)
def __getitem__(self, idx):
filename = os.path.join(self.tsv_path, self.series_list.iloc[idx, 0])
maskname = os.path.join(self.tsv_path, self.series_list.iloc[idx, 1])
if is_image_file(filename):
image = pil_loader(filename)
if is_image_file(maskname):
mask = pil_loader(maskname)
if self.random_crop:
try:
i, j, h, w = self.random_crop.get_params(image, [*self.random_crop.scale], [*self.random_crop.ratio])
image = TF.resized_crop(image, i, j, h, w, self.img_size,
interpolation=TF.InterpolationMode.BILINEAR)
mask = TF.resized_crop(mask, i, j, h, w, self.img_size, interpolation=TF.InterpolationMode.NEAREST)
except:
image = TF.resize(image, self.img_size, Image.BILINEAR)
mask = TF.resize(mask, self.img_size, Image.NEAREST)
if idx == 1:
print('random_crop failed, resized')
if self.mask_transform is not None:
mask = self.mask_transform(mask)
if self.img_transform is not None:
image = self.img_transform(image)
mask =
|
np.asarray(mask)
|
numpy.asarray
|
import multiprocessing
from multiprocessing import Manager
import itertools
import numpy as np
from tqdm import tqdm
moore_neighbourhood = set(itertools.permutations([1,-1]*3 + [0,0], 3))
vonneumann_neighbourhood = [(0,0,1),(0,0,-1),(0,1,0),(0,-1,0),(1,0,0),(-1,0,0)]
neighbourhood = vonneumann_neighbourhood
def parse_results(results):
new_matrix = [[[0 for _ in range(matrix_size)] for _ in range(matrix_size)] for _ in range(matrix_size)]
for result in results:
i, j, k, v = result
new_matrix[i][j][k] = v
return np.array(new_matrix)
matrix_size = 100
interval = 10
matrix = np.zeros((matrix_size,matrix_size,matrix_size)).astype(int)
size = int(matrix_size/2+interval)-int(matrix_size/2-interval)
matrix[int(matrix_size/2-interval):int(matrix_size/2+interval),int(matrix_size/2-interval):int(matrix_size/2+interval),int(matrix_size/2-interval):int(matrix_size/2+interval)] = np.random.randint(2, size=(size,size,size))
cells = [(i,j,k) for i in range(1, matrix_size-1) for j in range(1,matrix_size-1) for k in range(1,matrix_size-1)]
def apply_rule(coordinates):
i, j, k = coordinates
alive = (matrix[i,j,k] == 1)
neighbours = sum(matrix[i+i2,j+j2,k+k2] for i2,j2,k2 in neighbourhood)
#print(neighbours)
if not alive and neighbours in [1,3]:
return (i, j, k, 1)
if alive and neighbours in range(0,7):
return (i, j, k, 1)
else:
return (i, j, k, 0)
iterations = 20
cpus = multiprocessing.cpu_count()
for _ in tqdm(range(iterations)):
with multiprocessing.Pool(processes=cpus) as pool:
results = pool.map(apply_rule, cells)
matrix = parse_results(results)
with open('data/test.npy', 'wb') as f:
|
np.save(f, matrix)
|
numpy.save
|
"""
Transfer Adaboost
"""
import inspect
import warnings
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from adapt.utils import check_arrays, check_one_array, check_estimator
EPS = np.finfo(float).eps
def _get_median_predict(X, predictions, weights):
sorted_idx = np.argsort(predictions, axis=-1)
# Find index of median prediction for each sample
weight_cdf = np.cumsum(weights[sorted_idx], axis=-1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[..., -1][..., np.newaxis]
median_idx = median_or_above.argmax(axis=-1)
new_predictions = None
for i in range(median_idx.shape[1]):
median_estimators = sorted_idx[np.arange(len(X)), i, median_idx[:, i]]
if new_predictions is None:
new_predictions = predictions[np.arange(len(X)), i, median_estimators].reshape(-1,1)
else:
new_predictions = np.concatenate((
new_predictions,
predictions[np.arange(len(X)), i, median_estimators].reshape(-1,1)
), axis=1)
return new_predictions
def _binary_search(func, verbose=1):
left=0
right=1
tol=1.e-3
best=1
best_score=1
for i in range(1000):
if np.abs(func(left)) < tol:
best = left
break
elif np.abs(func(right)) < tol:
best = right
break
else:
midle = (left + right) / 2
if func(midle) < best_score:
best = midle
best_score = func(midle)
if func(midle) * func(left) <= 0:
right = midle
else:
left = midle
if i >= 999 and verbose:
print("Binary search has not converged."
" Set value to the current best.")
return best
class TrAdaBoost:
"""
Transfer AdaBoost for Classification
TrAdaBoost algorithm is a **supervised** instances-based domain
adaptation method suited for **classification** tasks.
The method is based on a "**reverse boosting**" principle where the
weights of source instances poorly predicted decrease at each
boosting iteration whereas the ones of target instances increase.
The algorithm performs the following steps:
- **1.** Normalize weights: :math:`\\sum w_S + \\sum w_T = 1`.
- **2.** Fit an estimator :math:`f` on source and target labeled data
:math:`(X_S, y_S), (X_T, y_T)` with the respective importances
weights: :math:`w_S, w_T`.
- **3.** Compute error vectors of training instances:
- :math:`\\epsilon_S = L_{01}(f(X_S), y_S)`.
- :math:`\\epsilon_T = L_{01}(f(X_T), y_T)`.
- **4.** Compute total weighted error of target instances:
:math:`E_T = \\frac{1}{n_T} w_T^T \\epsilon_T`.
- **5.** Update source and target weights:
- :math:`w_S = w_S \\beta^{\\epsilon_S}`.
- :math:`w_T = w_T \\beta_T^{-\\epsilon_T}`.
Where:
- :math:`\\beta = 1 \\setminus (1 + \\sqrt{2 \\text{ln} n_S \\setminus N})`.
- :math:`\\beta_T = E_T \\setminus (1 - E_T)`.
- **6.** Return to step **1** and loop until the number :math:`N`
of boosting iteration is reached.
The prediction are then given by the vote of the :math:`N \\setminus 2`
last computed estimators weighted by their respective parameter
:math:`\\beta_T`.
Parameters
----------
estimator : sklearn estimator or tensorflow Model (default=None)
Base estimator used to learn the task.
If estimator is ``None``, a ``LogisticRegression``
instance is used as base estimator.
n_estimators : int (default=10)
Number of boosting iteration.
verbose : int (default=1)
Verbosity level.
random_state : int (default=None)
Seed of random generator.
Attributes
----------
estimators_ : list of object
List of fitted estimators
estimator_errors_ : 1D array of float
Array of weighted estimator errors computed on
labeled target data.
estimator_weights_ : 1D array of float
Array of estimator importance weights.
sample_weights_src_ : list of numpy arrays
List of source sample weight for each iteration.
sample_weights_tgt_ : list of numpy arrays
List of target sample weight for each iteration.
Examples
--------
>>> import numpy as np
>>> from adapt.instance_based import TrAdaBoost
>>> from sklearn.tree import DecisionTreeClassifier
>>> np.random.seed(0)
>>> Xs = np.random.random((100, 2))
>>> Xt = np.random.random((100, 2))
>>> ys = (Xs[:, [0]] < 0.5).astype(int)
>>> yt = (Xt[:, [1]] < 0.5).astype(int)
>>> dtc = DecisionTreeClassifier(max_depth=5)
>>> dtc.fit(np.concatenate((Xs, Xt[:10])),
... np.concatenate((ys, yt[:10])))
>>> dtc.score(Xt, yt)
0.55
>>> tr = TrAdaBoost(DecisionTreeClassifier(max_depth=5),
... n_estimators=20, random_state=1)
>>> tr.fit(Xs, ys, Xt[:10], yt[:10])
Iteration 0 - Error: 0.1000
...
Iteration 19 - Error: 0.0000
>>> (tr.predict(Xt) == yt.ravel()).mean()
0.59
See also
--------
TrAdaBoostR2, TwoStageTrAdaBoostR2
References
----------
.. [1] `[1] <http://www.cs.ust.hk/~qyang/Docs/2007/tradaboost.pdf>`_ <NAME>., \
<NAME>., <NAME>., and <NAME>. "Boosting for transfer learning". In ICML, 2007.
"""
def __init__(self, estimator=None, n_estimators=10,
verbose=1, random_state=None):
np.random.seed(random_state)
tf.random.set_seed(random_state)
self.task_ = "class"
if isinstance(self, TrAdaBoostR2):
self.task_ = "reg"
self.base_estimator_ = check_estimator(estimator, copy=True,
force_copy=True,
task=self.task_)
self.n_estimators = n_estimators
self.verbose = verbose
self.random_state = random_state
def fit(self, Xs, ys, Xt, yt,
sample_weight_src=None,
sample_weight_tgt=None,
**fit_params):
"""
Fit TrAdaBoost
Parameters
----------
Xs : numpy array
Source input data.
ys : numpy array
Source output data.
Xt : numpy array
Target input data.
yt : numpy array
Target output data.
sample_weight_src : numpy array, (default=None)
Initial sample weight of source data
sample_weight_tgt : numpy array, (default=None)
Initial sample weight of target data
fit_params : key, value arguments
Arguments given to the fit method of the
estimator.
Returns
-------
self : returns an instance of self
"""
np.random.seed(self.random_state)
tf.random.set_seed(self.random_state)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
Xs, ys, Xt, yt = check_arrays(Xs, ys, Xt, yt)
n_s = len(Xs)
n_t = len(Xt)
if sample_weight_src is None:
sample_weight_src = np.ones(n_s) / (n_s + n_t)
if sample_weight_tgt is None:
sample_weight_tgt = np.ones(n_t) / (n_s + n_t)
sum_weights = (sample_weight_src.sum() +
sample_weight_tgt.sum())
sample_weight_src = sample_weight_src / sum_weights
sample_weight_tgt = sample_weight_tgt / sum_weights
self.sample_weights_src_ = []
self.sample_weights_tgt_ = []
self.estimators_ = []
self.estimator_errors_ = []
for iboost in range(self.n_estimators):
self.sample_weights_src_.append(
np.copy(sample_weight_src))
self.sample_weights_tgt_.append(
np.copy(sample_weight_tgt))
sample_weight_src, sample_weight_tgt = self._boost(
iboost, Xs, ys, Xt, yt,
sample_weight_src, sample_weight_tgt,
**fit_params
)
if self.verbose >= 1:
print("Iteration %i - Error: %.4f"%
(iboost, self.estimator_errors_[-1]))
if sample_weight_src is None:
break
sum_weights = (sample_weight_src.sum() +
sample_weight_tgt.sum())
sample_weight_src = sample_weight_src / sum_weights
sample_weight_tgt = sample_weight_tgt / sum_weights
self.estimator_errors_ = np.array(self.estimator_errors_)
self.estimator_weights_ = np.array([
-np.log(err / (1-err) + EPS) + 2*EPS
for err in self.estimator_errors_])
return self
def _boost(self, iboost, Xs, ys, Xt, yt,
sample_weight_src, sample_weight_tgt,
**fit_params):
X = np.concatenate((Xs, Xt))
y = np.concatenate((ys, yt))
sample_weight = np.concatenate((sample_weight_src,
sample_weight_tgt))
estimator = check_estimator(self.base_estimator_,
copy=True, force_copy=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if "sample_weight" in inspect.signature(estimator.fit).parameters:
estimator.fit(X, y,
sample_weight=sample_weight,
**fit_params)
else:
bootstrap_index = np.random.choice(
len(X), size=len(X), replace=True, p=sample_weight)
estimator.fit(X[bootstrap_index], y[bootstrap_index],
**fit_params)
ys_pred = estimator.predict(Xs)
yt_pred = estimator.predict(Xt)
if ys_pred.ndim == 1:
ys_pred = ys_pred.reshape(-1, 1)
yt_pred = yt_pred.reshape(-1, 1)
if self.task_ == "reg":
error_vect_src = np.abs(ys_pred - ys).mean(tuple(range(1, ys.ndim)))
error_vect_tgt = np.abs(yt_pred - yt).mean(tuple(range(1, yt.ndim)))
error_vect = np.concatenate((error_vect_src, error_vect_tgt))
error_max = error_vect.max() + EPS
if error_max != 0:
error_vect /= error_max
error_vect_src /= error_max
error_vect_tgt /= error_max
else:
if isinstance(estimator, BaseEstimator):
error_vect_src = (ys_pred != ys).astype(float).ravel()
error_vect_tgt = (yt_pred != yt).astype(float).ravel()
error_vect = np.concatenate((error_vect_src, error_vect_tgt))
else:
if ys.shape[1] == 1:
error_vect_src = (np.abs(ys_pred - ys) > 0.5).astype(float).ravel()
error_vect_tgt = (np.abs(yt_pred - yt) > 0.5).astype(float).ravel()
else:
error_vect_src = (ys_pred.argmax(1) != ys.argmax(1)).astype(float).ravel()
error_vect_tgt = (yt_pred.argmax(1) != yt.argmax(1)).astype(float).ravel()
error_vect = np.concatenate((error_vect_src, error_vect_tgt))
if isinstance(self, _AdaBoostR2):
estimator_error = (sample_weight * error_vect).sum()
else:
estimator_error = ((sample_weight_tgt * error_vect_tgt).sum() /
sample_weight_tgt.sum())
if estimator_error > 0.5:
estimator_error = 0.5
beta_t = estimator_error / (1. - estimator_error)
beta_s = 1. / (1. + np.sqrt(
2. * np.log(len(Xs)) / self.n_estimators
))
if not iboost == self.n_estimators - 1:
if isinstance(self, _AdaBoostR2):
sample_weight_tgt = (sample_weight_tgt *
np.power(beta_t, (1 - error_vect_tgt)))
sample_weight_tgt *= ((1. - sample_weight_src.sum()) /
sample_weight_tgt.sum())
else:
# Source updating weights
sample_weight_src *= np.power(
beta_s, error_vect_src)
# Target updating weights
sample_weight_tgt *= np.power(
beta_t, - error_vect_tgt)
self.estimators_.append(estimator)
self.estimator_errors_.append(estimator_error)
return sample_weight_src, sample_weight_tgt
def predict(self, X):
"""
Return weighted vote of estimators.
Parameters
----------
X : array
Input data.
Returns
-------
y_pred : array
Vote results.
"""
X = check_one_array(X)
N = len(self.estimators_)
weights = self.estimator_weights_[int(N/2):]
predictions = []
for est in self.estimators_[int(N/2):]:
if isinstance(est, BaseEstimator):
y_pred = est.predict_proba(X)
else:
y_pred = est.predict(X)
if y_pred.ndim == 1:
y_pred = y_pred.reshape(-1, 1)
if y_pred.shape[1] == 1:
y_pred = np.concatenate((1-y_pred, y_pred),
axis=1)
predictions.append(y_pred)
predictions = np.stack(predictions, -1)
weighted_vote = predictions.dot(weights).argmax(1)
return weighted_vote
def predict_weights(self, domain="src"):
"""
Return sample weights.
Return the final source importance weighting.
Parameters
----------
domain : str (default="tgt")
Choose between ``"source", "src"`` and
``"target", "tgt"``.
Returns
-------
weights : source sample weights
"""
if hasattr(self, "sample_weights_src_"):
if domain in ["src", "source"]:
return self.sample_weights_src_[-1]
elif domain in ["tgt", "target"]:
return self.sample_weights_tgt_[-1]
else:
raise ValueError("`domain `argument "
"should be `tgt` or `src`, "
"got, %s"%domain)
else:
raise NotFittedError("Weights are not fitted yet, please "
"call 'fit' first.")
class TrAdaBoostR2(TrAdaBoost):
"""
Transfer AdaBoost for Regression
TrAdaBoostR2 algorithm is a **supervised** instances-based domain
adaptation method suited for **regression** tasks.
The method is based on a "**reverse boosting**" principle where the
weights of source instances poorly predicted decrease at each
boosting iteration whereas the ones of target instances increase.
The algorithm performs the following steps:
- **1.** Normalize weights: :math:`\\sum w_S + \\sum w_T = 1`.
- **2.** Fit an estimator :math:`f` on source and target labeled data
:math:`(X_S, y_S), (X_T, y_T)` with the respective importances
weights: :math:`w_S, w_T`.
- **3.** Compute error vectors of training instances:
- :math:`\\epsilon_S = L(f(X_S), y_S)`.
- :math:`\\epsilon_T = L(f(X_T), y_T)`.
- **4** Normalize error vectors:
- :math:`\\epsilon_S = \\epsilon_S \\setminus
max_{\\epsilon \\in \\epsilon_S \cup \\epsilon_T} \\epsilon`.
- :math:`\\epsilon_T = \\epsilon_T \\setminus
max_{\\epsilon \\in \\epsilon_S \cup \\epsilon_T} \\epsilon`.
- **5.** Compute total weighted error of target instances:
:math:`E_T = \\frac{1}{n_T} w_T^T \\epsilon_T`.
- **6.** Update source and target weights:
- :math:`w_S = w_S \\beta^{\\epsilon_S}`.
- :math:`w_T = w_T \\beta_T^{-\\epsilon_T}`.
Where:
- :math:`\\beta = 1 \\setminus (1 + \\sqrt{2 \\text{ln} n_S \\setminus N})`.
- :math:`\\beta_T = E_T \\setminus (1 - E_T)`.
- **7.** Return to step **1** and loop until the number :math:`N`
of boosting iteration is reached.
The prediction are then given by the weighted median of the
:math:`N \\setminus 2` last estimators.
Parameters
----------
estimator : sklearn estimator or tensorflow Model (default=None)
Base estimator used to learn the task.
If estimator is ``None``, a ``LinearRegression``
instance is used as base estimator.
n_estimators : int (default=10)
Number of boosting iteration.
verbose : int (default=1)
Verbosity level.
random_state : int (default=None)
Seed of random generator.
Attributes
----------
estimators_ : list of object
List of fitted estimators
estimator_errors_ : 1D array of float
Array of weighted estimator errors computed on
labeled target data.
estimator_weights_ : 1D array of float
Array of estimator importance weights.
sample_weights_src_ : list of numpy arrays
List of source sample weight for each iteration.
sample_weights_tgt_ : list of numpy arrays
List of target sample weight for each iteration.
Examples
--------
>>> import numpy as np
>>> from adapt.instance_based import TrAdaBoostR2
>>> np.random.seed(0)
>>> Xs = np.random.random((100, 2))
>>> Xt = np.random.random((100, 2))
>>> ys = Xs[:, [0]]
>>> yt = Xt[:, [1]]
>>> lr = LinearRegression()
>>> lr.fit(np.concatenate((Xs, Xt[:10])),
... np.concatenate((ys, yt[:10])))
>>> np.abs(lr.predict(Xt) - yt).mean()
0.30631...
>>> tr = TrAdaBoostR2(n_estimators=20)
>>> tr.fit(Xs, ys, Xt[:10], yt[:10])
Iteration 0 - Error: 0.4396
...
Iteration 19 - Error: 0.0675
>>> np.abs(tr.predict(Xt) - yt).mean()
0.05801...
See also
--------
TrAdaBoost, TwoStageTrAdaBoostR2
References
----------
.. [1] `[1] <https://www.cs.utexas.edu/~dpardoe/papers/ICML10.pdf>`_ \
<NAME> and <NAME>. "Boosting for regression transfer". In ICML, 2010.
"""
def predict(self, X):
"""
Return weighted median of estimators.
Parameters
----------
X : array
Input data.
Returns
-------
y_pred : array
Median results.
"""
X = check_one_array(X)
N = len(self.estimators_)
weights = self.estimator_weights_
weights = weights[int(N/2):]
predictions = []
for est in self.estimators_[int(N/2):]:
y_pred = est.predict(X)
if y_pred.ndim == 1:
y_pred = y_pred.reshape(-1, 1)
predictions.append(y_pred)
predictions = np.stack(predictions, -1)
return _get_median_predict(X, predictions, weights)
class _AdaBoostR2(TrAdaBoostR2):
"""
AdaBoostR2 object with fixed sample weights.
"""
pass
class TwoStageTrAdaBoostR2(TrAdaBoostR2):
"""
Two Stage Transfer AdaBoost for Regression
TwoStageTrAdaBoostR2 algorithm is a **supervised** instances-based
domain adaptation method suited for **regression** tasks.
The method is based on a "**reverse boosting**" principle where the
weights of source instances poorly predicted decrease at each
boosting iteration whereas the ones of target instances increase.
This "two stages" version of TrAdaBoostR2 algorithm update separately
the weights of source and target instances.
In a first stage, the weights of source instances are
frozen whereas the ones of target instances are updated according to
the classical AdaBoostR2 algorithm. In a second stage, the weights of
target instances are now frozen whereas the ones of source instances
are updated according to the TrAdaBoost algorithm.
At each first stage, a cross-validation score is computed with the
labeled target data available. The CV scores obtained are used at
the end to select the best estimator whithin all boosting iterations.
The algorithm performs the following steps:
- **1.** Normalize weights: :math:`\\sum w_S + \\sum w_T = 1`.
- **2.** Fit an AdaBoostR2 estimator :math:`f` on source and target
labeled data :math:`(X_S, y_S), (X_T, y_T)` with the respective
importances initial weights: :math:`w_S, w_T`. During training
of the AdaBoost estimator, the source weights :math:`w_S` are
frozen.
- **3.** Compute a cross-validation score on :math:`(X_T, y_T)`
- **4.** Compute error vectors of training instances:
- :math:`\\epsilon_S = L(f(X_S), y_S)`.
- :math:`\\epsilon_T = L(f(X_T), y_T)`.
- **5** Normalize error vectors:
- :math:`\\epsilon_S = \\epsilon_S \\setminus
max_{\\epsilon \\in \\epsilon_S \cup \\epsilon_T} \\epsilon`.
- :math:`\\epsilon_T = \\epsilon_T \\setminus
max_{\\epsilon \\in \\epsilon_S \cup \\epsilon_T} \\epsilon`.
- **6.** Update source and target weights:
- :math:`w_S = w_S \\beta_S^{\\epsilon_S} \\setminus Z`.
- :math:`w_T = w_T \\setminus Z`.
Where:
- :math:`Z` is a normalizing constant.
- :math:`\\beta_S` is chosen such that the sum of target weights
:math:`w_T` is equal to :math:`\\frac{n_T}{n_T + n_S}
+ \\frac{t}{N - 1}(1 - \\frac{n_T}{n_T + n_S})` with :math:`t`
the current boosting iteration number. :math:`\\beta_S` is found
using binary search.
- **7.** Return to step **1** and loop until the number :math:`N`
of boosting iteration is reached.
The prediction are then given by the best estimator according
to cross-validation scores.
Parameters
----------
estimator : sklearn estimator or tensorflow Model (default=None)
Base estimator used to learn the task.
If estimator is ``None``, a ``LinearRegression``
instance is used as base estimator.
n_estimators : int (default=10)
Number of boosting iteration.
n_estimators_fs : int (default=10)
Number of boosting iteration in first stage
(given to AdaboostR2 estimators)
cv: int, optional (default=5)
Split cross-validation parameter.
verbose : int (default=1)
Verbosity level.
random_state : int (default=None)
Seed of random generator.
Attributes
----------
estimators_ : list of object
List of fitted AdaboostR2 estimators for each
first stage.
estimator_errors_ : 1D array of float
Array of cross-validation MAE computed on
labeled target data.
sample_weights_src_ : list of numpy arrays
List of source sample weight for each iteration.
sample_weights_tgt_ : list of numpy arrays
List of target sample weight for each iteration.
Examples
--------
>>> import numpy as np
>>> from adapt.instance_based import TwoStageTrAdaBoostR2
>>> np.random.seed(0)
>>> Xs = np.random.random((100, 2))
>>> Xt = np.random.random((100, 2))
>>> ys = Xs[:, [0]]
>>> yt = Xt[:, [1]]
>>> lr = LinearRegression()
>>> lr.fit(np.concatenate((Xs, Xt[:10])),
... np.concatenate((ys, yt[:10])))
>>> np.abs(lr.predict(Xt) - yt).mean()
0.30631...
>>> tr = TwoStageTrAdaBoostR2()
>>> tr.fit(Xs, ys, Xt[:10], yt[:10])
Iteration 0 - Cross-validation score: 0.3154 (0.1813)
...
Iteration 9 - Cross-validation score: 0.0015 (0.0009)
>>> np.abs(tr.predict(Xt) - yt).mean()
0.00126...
See also
--------
TrAdaBoost, TrAdaBoostR2
References
----------
.. [1] `[1] <https://www.cs.utexas.edu/~dpardoe/papers/ICML10.pdf>`_ \
<NAME> and <NAME>. "Boosting for regression transfer". In ICML, 2010.
"""
def __init__(self,
estimator=None,
n_estimators=10,
n_estimators_fs=10,
cv=5,
verbose=1,
random_state=None):
super().__init__(estimator,
n_estimators,
verbose,
random_state)
self.n_estimators_fs = n_estimators_fs
self.cv = cv
def fit(self, Xs, ys, Xt, yt,
sample_weight_src=None,
sample_weight_tgt=None,
**fit_params):
np.random.seed(self.random_state)
tf.random.set_seed(self.random_state)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
Xs, ys, Xt, yt = check_arrays(Xs, ys, Xt, yt)
n_s = len(Xs)
n_t = len(Xt)
if sample_weight_src is None:
sample_weight_src =
|
np.ones(n_s)
|
numpy.ones
|
import random
import numpy as np
from torchvision.transforms import functional as F
from .bbox_util import *
import torch
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def check_boxes_validity(boxes):
for box in boxes:
if box[0]>box[2]:
print('invalid bbox . .')
return 0
if box[1]>box[3]:
print('invalid bbox . .')
return 0
for pos in box:
if pos<0:
print('invalid bbox . .')
return 0
return 1
class RandomHorizontalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
image_orig,target_orig=image.copy(),target.copy()
if random.random() < self.prob:
image = np.array(image)
height, width = image.shape[0:2]
image = np.flip(image,axis=1).copy()
bbox = np.array(target["boxes"])
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
checked = check_boxes_validity(target["boxes"])
if checked:
return image, target
else:
return image_orig, target_orig
class RandomVerticalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
image_orig,target_orig=image.copy(),target.copy()
if random.random() < self.prob:
image = np.array(image)
height, width = image.shape[0:2]
image = np.flip(image,axis=0).copy()
bbox = np.array(target["boxes"])
bbox[:, [1, 3]] = height - bbox[:, [3, 1]]
target["boxes"] = bbox
checked = check_boxes_validity(target["boxes"])
if checked:
return image, target
else:
return image_orig, target_orig
class RandomRotate(object):
def __init__(self, prob, angle = 10):
self.angle = angle
self.prob = prob
if type(self.angle) == tuple:
assert len(self.angle) == 2, "Invalid range"
else:
self.angle = (-self.angle, self.angle)
def __call__(self, img, target):
image_orig,target_orig=img.copy(),target.copy()
if random.random() < self.prob:
angle = random.uniform(*self.angle)
img = np.array(img)
bboxes = np.array(target["boxes"])
w,h = img.shape[1], img.shape[0]
cx, cy = w//2, h//2
img = rotate_im(img, angle)
corners = get_corners(bboxes)
corners = np.hstack((corners, bboxes[:,4:]))
corners[:,:8] = rotate_box(corners[:,:8], angle, cx, cy, h, w)
new_bbox = get_enclosing_box(corners)
scale_factor_x = img.shape[1] / w
scale_factor_y = img.shape[0] / h
img = cv2.resize(img, (w,h))
new_bbox[:,:4] /= [scale_factor_x, scale_factor_y, scale_factor_x, scale_factor_y]
bboxes = new_bbox
bboxes = clip_box(bboxes, [0,0,w, h], 0.25)
target["boxes"] = bboxes
checked = check_boxes_validity(target["boxes"])
if checked:
return img, target
else:
return image_orig, target_orig
class RandomScale(object):
def __init__(self, prob,scale = 0.2, diff = False):
self.scale = scale
self.prob = prob
if type(self.scale) == tuple:
assert len(self.scale) == 2, "Invalid range"
assert self.scale[0] > -1, "Scale factor can't be less than -1"
assert self.scale[1] > -1, "Scale factor can't be less than -1"
else:
assert self.scale > 0, "Please input a positive float"
self.scale = (max(-1, -self.scale), self.scale)
self.diff = diff
def __call__(self, img, target):
image_orig,target_orig=img.copy(),target.copy()
if random.random() < self.prob:
img = np.array(img)
bboxes = np.array(target["boxes"])
img_shape = img.shape
if self.diff:
scale_x = random.uniform(*self.scale)
scale_y = random.uniform(*self.scale)
else:
scale_x = random.uniform(*self.scale)
scale_y = scale_x
resize_scale_x = 1 + scale_x
resize_scale_y = 1 + scale_y
img= cv2.resize(img, None, fx = resize_scale_x, fy = resize_scale_y)
bboxes[:,:4] *= [resize_scale_x, resize_scale_y, resize_scale_x, resize_scale_y]
canvas = np.zeros(img_shape, dtype = np.uint8)
y_lim = int(min(resize_scale_y,1)*img_shape[0])
x_lim = int(min(resize_scale_x,1)*img_shape[1])
canvas[:y_lim,:x_lim,:] = img[:y_lim,:x_lim,:]
img = canvas
bboxes = clip_box(bboxes, [0,0,1 + img_shape[1], img_shape[0]], 0.25)
target["boxes"] = bboxes
checked = check_boxes_validity(target["boxes"])
if checked:
return img, target
else:
return image_orig, target_orig
class ToTensor(object):
def __call__(self, image, target):
image=
|
np.array(image)
|
numpy.array
|
import pickle5 as pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
#mpl.use('pdf')
import itertools
import numpy as np
from datetime import datetime
import torch
from torch import nn
from torch import optim
import os
import sys
import pandas as pd
from matplotlib import interactive
from matplotlib.patches import Rectangle
from utils import make_histos
from utils.utilities import meter
from utils.utilities import cartesian_converter
sys.path.insert(0,'/mnt/c/Users/rober/Dropbox/Bobby/Linux/classes/GAML/GAMLX/nflows/nflows')
from nflows.transforms.autoregressive import MaskedUMNNAutoregressiveTransform
from nflows.flows.base import Flow
from nflows.distributions.normal import StandardNormal
from nflows.distributions.normal import DiagonalNormal
from nflows.transforms.base import CompositeTransform
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.permutations import ReversePermutation
#data_path = "gendata/4features/" #Just electorn features
#data_path = "gendata/16features/" #All 16 features
#data_path = "gendata/Cond/16features/maaf/"
data_path = "gendata/Cond/16features/UMNN/"
physics_cuts = False
gen_all_emd = False
gen_1d_histos = True
gen_emd_comp = False
dfs = []
filenames = os.listdir(data_path)
for f in filenames:
df0 = pd.read_pickle(data_path+f)
dfs.append(df0)
df_nflow_data = pd.concat(dfs)
nflow_data_len = len(df_nflow_data.index)
print("The Generated dataset has {} events".format(nflow_data_len))
with open('data/pi0.pkl', 'rb') as f:
xz = np.array(pickle.load(f), dtype=np.float32)
x = cartesian_converter(xz,type='x')
z = cartesian_converter(xz,type='z')
df_test_data = pd.DataFrame(x)
df_test_data_z = pd.DataFrame(z)
#df_nflow_data = df_test_data_z
#df_test_data = df_test_data_all.sample(n=nflow_data_len)
if len(df_nflow_data) > len(df_test_data):
df_nflow_data = df_nflow_data.sample(n=len(df_test_data))
else:
df_test_data = df_test_data.sample(n=len(df_nflow_data))
df_test_data_z = df_test_data_z.sample(n=len(df_nflow_data))
#df_nflow_data = df_nflow_data.sample(n=100000)
#df_test_data = df_test_data.sample(n=100000)
if physics_cuts:
df = df_nflow_data
#df = df_test_data
print(len(df.index))
# e = 0
# df['emass2'] = df[e]**2-df[e+1]**2-df[e+2]**2-df[e+3]**2
# e = 4
# df['pmass'] = np.sqrt(df[e]**2-df[e+1]**2-df[e+2]**2-df[e+3]**2)
# e = 8
# df['g1mass2'] = df[8]**2-df[9]**2-df[10]**2-df[11]**2
# e = 12
# df['g2mass2'] = df[12]**2-df[13]**2-df[14]**2-df[15]**2
# df['g1g2'] = 2*(df[8]*df[12]-df[9]*df[13]-df[10]*df[1]-df[11]*df[15])
# df['pimass2'] = df['g1mass2']+df['g1g2']+df['g2mass2']
# # e = 0
# # df['protonE'] = df[4]
# # df['Etot'] = df[e] + df[e+4]+df[e+8]+df[e+12]
# # e = 1
# # df['pxtot'] = df[e] + df[e+4]+df[e+8]+df[e+12]
# # e = 2
# # df['pytot'] = df[e] + df[e+4]+df[e+8]+df[e+12]
# # e = 3
# # df['pztot'] = df[e] + df[e+4]+df[e+8]+df[e+12]
# # df['NetE'] = np.sqrt(df['Etot']**2 - df['pxtot']**2 - df['pytot']**2 - df['pztot']**2)
# e = 0
# df['emass2'] = df[e]**2-df[e+1]**2-df[e+2]**2-df[e+3]**2
# epsilon = .5
# df2 = df.query("pimass2>({0}-{1}) and pimass2<({0}+{1})".format((.135*.135),epsilon))
# #df2 = df.query("NetE>(4.556-{}) and NetE<(4.556+{})".format(epsilon,epsilon))
# #df2 = df.query("protonE<1.475")#.format(epsilon,epsilon))
# print(df)
# df = df.head(len(df2.index))
# print(df2)
# #sys.exit()
if len(df_nflow_data) > len(df_test_data):
df_nflow_data_16 = df_nflow_data.sample(n=len(df_test_data))
else:
df_test_data = df_test_data.sample(n=len(df_nflow_data))
##################################
dvpi0p = df_nflow_data
#dvpi0p = df_test_data
e=4
dvpi0p.loc[:,'pmass'] = np.sqrt(dvpi0p[e]**2-dvpi0p[e+1]**2-dvpi0p[e+2]**2-dvpi0p[e+3]**2)
dvpi0p.loc[:, "Gpx"] = dvpi0p.loc[:, 9]
dvpi0p.loc[:, "Gpy"] = dvpi0p.loc[:, 10]
dvpi0p.loc[:, "Gpz"] = dvpi0p.loc[:, 11]
dvpi0p.loc[:, "Gpx2"] = dvpi0p.loc[:, 13]
dvpi0p.loc[:, "Gpy2"] = dvpi0p.loc[:, 14]
dvpi0p.loc[:, "Gpz2"] = dvpi0p.loc[:, 15]
gam1 = [dvpi0p['Gpx'], dvpi0p['Gpy'], dvpi0p['Gpz']]
gam2 = [dvpi0p['Gpx2'], dvpi0p['Gpy2'], dvpi0p['Gpz2']]
pi0 = [dvpi0p['Gpx']+dvpi0p['Gpx2'], dvpi0p['Gpy']+dvpi0p['Gpy2'], dvpi0p['Gpz']+dvpi0p['Gpz2']]
def dot(vec1, vec2):
# dot product of two 3d vectors
return vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2]
def mag(vec1):
# L2 norm of vector
return np.sqrt(dot(vec1, vec1))
dvpi0p.loc[:, "Mpi0"] = np.sqrt((mag(gam1)+mag(gam2))**2 - mag(pi0)**2)
dvpi0p16 = dvpi0p
#################################
#dvpi0p = df_nflow_data_16
dvpi0p = df_test_data
e=4
dvpi0p.loc[:,'pmass'] =
|
np.sqrt(dvpi0p[e]**2-dvpi0p[e+1]**2-dvpi0p[e+2]**2-dvpi0p[e+3]**2)
|
numpy.sqrt
|
import numpy as np
import cv2
import glob
import os
import skvideo.io
from scipy.special import gamma
from matplotlib import pyplot as plt
import skvideo.utils
import pandas as pd
import math
from joblib import dump
import scipy
from joblib import load
from scipy.fftpack import fft, dct
import scipy.io as sio
from scipy.stats import norm,lognorm,skew,kurtosis
from skvideo.measure import videobliinds,videobliinds_features,niqe
from skimage.util.shape import view_as_windows
win = np.array(skvideo.utils.gen_gauss_window(3, 7.0/6.0))
gamma_range = np.arange(0.2, 10, 0.001)
a = scipy.special.gamma(2.0/gamma_range)
a *= a
b = scipy.special.gamma(1.0/gamma_range)
c = scipy.special.gamma(3.0/gamma_range)
prec_gammas = a/(b*c)
def generate_ggd(x,alphaparam,sigma):
betaparam = sigma*np.sqrt(gamma(1.0/alphaparam)/gamma(3.0/alphaparam))
y = alphaparam/(2*betaparam*gamma(1.0/alphaparam))*np.exp(-(np.abs(x)/betaparam)**alphaparam)
return y
def stat_feats(chroma_mscn):
alpha,sigma = estimateggdparam(chroma_mscn)
skewness = skew(chroma_mscn.flatten())
kurt =kurtosis(chroma_mscn.flatten())
return alpha,sigma,skewness,kurt
def extract_secondord_feats(mscncoefs):
# alpha_m, = extract_ggd_features(mscncoefs)
pps1, pps2, pps3, pps4 = paired_product(mscncoefs)
alpha1, N1, bl1, br1, lsq1, rsq1 = aggd_features(pps1)
alpha2, N2, bl2, br2, lsq2, rsq2 = aggd_features(pps2)
alpha3, N3, bl3, br3, lsq3, rsq3 = aggd_features(pps3)
alpha4, N4, bl4, br4, lsq4, rsq4 = aggd_features(pps4)
return np.array([
alpha1, N1, lsq1**2, rsq1**2, # (V)
alpha2, N2, lsq2**2, rsq2**2, # (H)
alpha3, N3, lsq3**2, rsq3**2, # (D1)
alpha4, N4, lsq4**2, rsq4**2]) # (D2)
def _extract_subband_feats(mscncoefs):
# alpha_m, = extract_ggd_features(mscncoefs)
alpha_m, sigma = estimateggdparam(mscncoefs.copy())
pps1, pps2, pps3, pps4 = paired_product(mscncoefs)
alpha1, N1, bl1, br1, lsq1, rsq1 = aggd_features(pps1)
alpha2, N2, bl2, br2, lsq2, rsq2 = aggd_features(pps2)
alpha3, N3, bl3, br3, lsq3, rsq3 = aggd_features(pps3)
alpha4, N4, bl4, br4, lsq4, rsq4 = aggd_features(pps4)
return np.array([
alpha_m, sigma,
alpha1, N1, lsq1**2, rsq1**2, # (V)
alpha2, N2, lsq2**2, rsq2**2, # (H)
alpha3, N3, lsq3**2, rsq3**2, # (D1)
alpha4, N4, lsq4**2, rsq4**2, # (D2)
])
def estimateggdparam(vec):
gam = np.asarray([x / 1000.0 for x in range(200, 10000, 1)])
r_gam = (gamma(1.0/gam)*gamma(3.0/gam))/((gamma(2.0/gam))**2)
# print(np.mean(vec))
sigma_sq = np.mean(vec**2) #-(np.mean(vec))**2
sigma = np.sqrt(sigma_sq)
E = np.mean(np.abs(vec))
rho = sigma_sq/(E**2+1e-6)
array_position =(np.abs(rho - r_gam)).argmin()
alphaparam = gam[array_position]
return alphaparam,sigma
def all_aggd(y):
falpha1,fN1,fbl1,fbr1,flsq1,frsq1 = aggd_features(y.copy())
pps1, pps2, pps3, pps4 = paired_product(y)
alpha1, N1, bl1, br1, lsq1, rsq1 = aggd_features(pps1)
alpha2, N2, bl2, br2, lsq2, rsq2 = aggd_features(pps2)
alpha3, N3, bl3, br3, lsq3, rsq3 = aggd_features(pps3)
alpha4, N4, bl4, br4, lsq4, rsq4 = aggd_features(pps4)
return np.array([
falpha1, fN1, flsq1**2,frsq1**2,
alpha1, N1, lsq1**2, rsq1**2, # (V)
alpha2, N2, lsq2**2, rsq2**2, # (H)
alpha3, N3, lsq3**2, rsq3**2, # (D1)
alpha4, N4, lsq4**2, rsq4**2, # (D2)
])
def brisque(y_mscn):
# half_scale = cv2.resize(y, dsize=(0,0),fx=0.5,fy=0.5, interpolation=cv2.INTER_LANCZOS4)
feats_full = _extract_subband_feats(y_mscn)
# feats_half = _extract_subband_feats(half_scale)
return feats_full#np.concatenate((feats_full,feats_half))
def aggd_features(imdata):
#flatten imdata
imdata.shape = (len(imdata.flat),)
imdata2 = imdata*imdata
left_data = imdata2[imdata<0]
right_data = imdata2[imdata>=0]
left_mean_sqrt = 0
right_mean_sqrt = 0
if len(left_data) > 0:
left_mean_sqrt = np.sqrt(np.average(left_data))
if len(right_data) > 0:
right_mean_sqrt = np.sqrt(np.average(right_data))
if right_mean_sqrt != 0:
gamma_hat = left_mean_sqrt/right_mean_sqrt
else:
gamma_hat = np.inf
#solve r-hat norm
imdata2_mean = np.mean(imdata2)
if imdata2_mean != 0:
r_hat = (np.average(np.abs(imdata))**2) / (np.average(imdata2))
else:
r_hat = np.inf
rhat_norm = r_hat * (((math.pow(gamma_hat, 3) + 1)*(gamma_hat + 1)) / math.pow(math.pow(gamma_hat, 2) + 1, 2))
#solve alpha by guessing values that minimize ro
pos = np.argmin((prec_gammas - rhat_norm)**2);
alpha = gamma_range[pos]
gam1 = scipy.special.gamma(1.0/alpha)
gam2 = scipy.special.gamma(2.0/alpha)
gam3 = scipy.special.gamma(3.0/alpha)
aggdratio = np.sqrt(gam1) / np.sqrt(gam3)
bl = aggdratio * left_mean_sqrt
br = aggdratio * right_mean_sqrt
#mean parameter
N = (br - bl)*(gam2 / gam1)#*aggdratio
return (alpha, N, bl, br, left_mean_sqrt, right_mean_sqrt)
# def ggd_features(imdata):
# nr_gam = 1/prec_gammas
# sigma_sq = np.var(imdata)
# E = np.mean(np.abs(imdata))
# rho = sigma_sq/E**2
# pos = np.argmin(np.abs(nr_gam - rho));
# return gamma_range[pos], sigma_sq
def sigma_map(image):
im = image.astype(np.float32)
mu = cv2.GaussianBlur(im,(7,7),7.0/6.0,7.0/6.0)
mu_sq = mu*mu
sigma = np.sqrt(np.abs(cv2.GaussianBlur(im**2,(7,7),7.0/6.0,7.0/6.0)-mu_sq))
return sigma
def dog(image):
image = image.astype(np.float32)
gauss1 = cv2.GaussianBlur(image,(7,7),7.0/6.0,7.0/6.0)
gauss2 = cv2.GaussianBlur(image,(7,7),7.0*1.5/6.0,7.0*1.5/6.0)
dog = gauss1-gauss2
return dog
def paired_product(new_im):
shift1 = np.roll(new_im.copy(), 1, axis=1)
shift2 = np.roll(new_im.copy(), 1, axis=0)
shift3 = np.roll(np.roll(new_im.copy(), 1, axis=0), 1, axis=1)
shift4 = np.roll(np.roll(new_im.copy(), 1, axis=0), -1, axis=1)
H_img = shift1 * new_im
V_img = shift2 * new_im
D1_img = shift3 * new_im
D2_img = shift4 * new_im
return (H_img, V_img, D1_img, D2_img)
def gen_gauss_window(lw, sigma):
sd = np.float32(sigma)
lw = int(lw)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd *= sd
for ii in range(1, lw + 1):
tmp = np.exp(-0.5 * np.float32(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
return weights
def compute_image_mscn_transform(image, C=1, avg_window=None, extend_mode='constant'):
if avg_window is None:
avg_window = gen_gauss_window(3, 7.0/6.0)
assert len(np.shape(image)) == 2
h, w = np.shape(image)
mu_image = np.zeros((h, w), dtype=np.float32)
var_image = np.zeros((h, w), dtype=np.float32)
image = np.array(image).astype('float32')
scipy.ndimage.correlate1d(image, avg_window, 0, mu_image, mode=extend_mode)
scipy.ndimage.correlate1d(mu_image, avg_window, 1, mu_image, mode=extend_mode)
scipy.ndimage.correlate1d(image**2, avg_window, 0, var_image, mode=extend_mode)
scipy.ndimage.correlate1d(var_image, avg_window, 1, var_image, mode=extend_mode)
var_image = np.sqrt(np.abs(var_image - mu_image**2))
return (image - mu_image)/(var_image + C), var_image, mu_image
def generate_aggd(x1,x2,alpha,sigma_l,sigma_r):
beta_l = sigma_l*np.sqrt(gamma(1/alpha)/gamma(3/alpha))
beta_r= sigma_r*np.sqrt(gamma(1/alpha)/gamma(3/alpha))
f1 = alpha/((beta_l+beta_r)*gamma(1/alpha))*np.exp(-(-x1/beta_l)**alpha)
f2 = alpha/((beta_l+beta_r)*gamma(1/alpha))*np.exp(-(x2/beta_r)**alpha)
f = np.concatenate((f1,f2),axis=0)
return f
def chroma_feats(lab):
# lab = cv2.cvtColor(bgr,cv2.COLOR_BGR2Lab)
a = lab[:,:,1]
b = lab[:,:,2]
chroma = np.sqrt(a**2+b**2)
chroma_mscn,sigma_map,_ = compute_image_mscn_transform(chroma)
sigma_mscn,_,_ =compute_image_mscn_transform(sigma_map)
alpha,sigma,skewness,kurt= stat_feats(chroma_mscn)
salpha,ssigma,sskewness,skurt= stat_feats(sigma_mscn)
half_scale = cv2.resize(chroma.astype(np.uint8), dsize=(0,0),fx=0.5,fy=0.5, interpolation=cv2.INTER_CUBIC)
half_chroma_mscn,half_sigma_map,_ = compute_image_mscn_transform(half_scale)
half_sigma_mscn,_,_ = compute_image_mscn_transform(half_sigma_map)
halpha,hsigma,hskewness,hkurt= stat_feats(half_chroma_mscn)
hsalpha,hssigma,hsskewness,hskurt= stat_feats(half_sigma_mscn)
first_order_feats = np.asarray([alpha,sigma,skewness,kurt,halpha,hsigma,hskewness,hkurt,salpha,ssigma,sskewness,skurt,hsalpha,hssigma,hsskewness,hskurt])
return first_order_feats
def estimate_log_deri_ggd(image):
log_im = np.log(image+0.5)
log_feats = []
shifts= [(0,1),(1,0),(1,1),(1,-1)]
for i in range(len(shifts)):
rolled = np.roll(log_im, shift=shifts[i],axis=(0,1))
log_deri = log_im - rolled
alpha,sigma = estimateggdparam(log_deri)
log_feats.append(np.asarray([alpha,sigma]))
D5 = log_im + np.roll(log_im,shift=(1,1),axis=(0,1))-np.roll(log_im,shift=(0,1),axis=(0,1))-np.roll(log_im,shift=(1,0),axis=(0,1))
D6 = np.roll(log_im,shift=(-1,0),axis=(0,1))+np.roll(log_im,shift=(1,0),axis=(0,1))-
|
np.roll(log_im,shift=(0,-1),axis=(0,1))
|
numpy.roll
|
import os
import torch
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from torchvision import datasets, transforms
import skimage.io
import skimage.transform
import re
import tqdm
def inf_dataloader(data_loader):
while True:
for data in data_loader:
yield data
class _GrayscaleDataset(Dataset):
processed_folder = 'processed'
train_file = 'training.pt'
test_file = 'test.pt'
def __init__(self, root, train=True, download=False):
super(_GrayscaleDataset, self).__init__()
assert download == False
if train:
self.data, self.targets = torch.load(os.path.join(root, self.processed_folder, self.train_file))
else:
self.data, self.targets = torch.load(os.path.join(root, self.processed_folder, self.test_file))
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img.numpy(), mode='L')
return img, target
def __len__(self):
return len(self.data)
class _ColorDataset(_GrayscaleDataset):
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img.numpy(), mode='RGB')
return img, target
class _NormalAnomalyBase(Dataset):
def __init__(self, DatasetClass, root, split, normal_class, normal=True, transform=None, download=True,
equal_abnormal_count=False):
super().__init__()
if split == 'train':
dataset = DatasetClass(root=root, train=True, download=download)
else:
dataset = DatasetClass(root=root, train=False, download=download)
self.data = dataset
self.transform = transform
self.normal_class = normal_class
if split == 'train':
self._min_dataset_size = 10000 # to speed up training
else:
self._min_dataset_size = 0
if normal:
self.active_indexes = [i for i in range(len(dataset)) if dataset.targets[i] == self.normal_class]
else:
self.active_indexes = [i for i in range(len(dataset)) if dataset.targets[i] != self.normal_class]
if equal_abnormal_count:
normal_count = (dataset.targets == self.normal_class).sum()
np.random.shuffle(self.active_indexes)
self.active_indexes = self.active_indexes[:normal_count]
def __getitem__(self, index):
index = index % len(self.active_indexes)
image, _ = self.data[self.active_indexes[index]]
if self.transform is not None:
image = self.transform(image)
return image
def __len__(self):
_len = len(self.active_indexes)
if _len >= self._min_dataset_size:
return _len
else:
# if data is too small we just duplicate all samples several times
factor = int(np.ceil(self._min_dataset_size / _len))
return factor * _len
class CIFAR10(_NormalAnomalyBase):
def __init__(self, root, split, normal_class, normal=True, transform=None):
super().__init__(datasets.CIFAR10, root, split, normal_class, normal, transform, download=True)
class MNIST(_NormalAnomalyBase):
def __init__(self, root, split, normal_class, normal=True, transform=None):
super().__init__(datasets.MNIST, root, split, normal_class, normal, transform, download=True)
class FashionMNIST(_NormalAnomalyBase):
def __init__(self, root, split, normal_class, normal=True, transform=None):
preprocessed_root = os.path.join(root, 'ad_protocol')
fashion_mnist_preprocessed(root, preprocessed_root)
super().__init__(_GrayscaleDataset, preprocessed_root, split, normal_class, normal, transform,
download=False,
equal_abnormal_count=True)
class COIL100(_NormalAnomalyBase):
def __init__(self, root, split, normal_class, normal=True, transform=None):
preprocessed_root = os.path.join(root, 'ad_protocol')
coil_100_preprocessing(root, preprocessed_root)
super().__init__(_ColorDataset, preprocessed_root, split, normal_class, normal, transform,
download=False,
equal_abnormal_count=True)
class CelebA(_NormalAnomalyBase):
def __init__(self, root, split, normal_class, normal=True, transform=None, abnormal_class=None,
extended_attribute_list=False):
assert normal_class == 0
self.data = datasets.CelebA(root, split)
self.transform = transform
if extended_attribute_list:
self.attributes = ["Bags_Under_Eyes", "Bald", "Bangs", "Eyeglasses", "Goatee",
"Heavy_Makeup", "Mustache", "Sideburns", "Wearing_Hat"]
else:
self.attributes = ["Bald", "Mustache", "Bangs", "Eyeglasses", "Wearing_Hat"]
if normal:
byte_index = torch.ones(len(self.data), dtype=torch.bool)
for attr_name in self.attributes:
byte_index = byte_index.logical_and(self.data.attr[:, self.data.attr_names.index(attr_name)] == 0)
self.active_indexes = torch.nonzero(byte_index, as_tuple=False).numpy().flatten()
else:
assert abnormal_class in self.attributes
# filter images where this attribute is presented
byte_index = self.data.attr[:, self.data.attr_names.index(abnormal_class)] == 1
# filter images where all other attributes are not presented
for attr_name in self.attributes:
if attr_name != abnormal_class:
byte_index = byte_index.logical_and(self.data.attr[:, self.data.attr_names.index(attr_name)] == 0)
self.active_indexes = torch.nonzero(byte_index, as_tuple=False).numpy().flatten()
if split == 'train':
self._min_dataset_size = 10000 # as required in _NormalAnomalyBase
else:
self._min_dataset_size = 0
class LSUN(_NormalAnomalyBase):
def __init__(self, root, split, normal_class, normal=True, transform=None):
assert normal_class == 0
self.transform = transform
_class = 'bedroom' if normal else "conference_room"
if split == 'test':
split = 'val' # as was done in ADGAN (https://link.springer.com/chapter/10.1007/978-3-030-10925-7_1)
self.data = datasets.LSUN(root, classes=[f"{_class}_{split}"])
self.active_indexes = list(range(len(self.data))) # as required in _NormalAnomalyBase
if split == 'train':
self._min_dataset_size = 10000 # as required in _NormalAnomalyBase
else:
self._min_dataset_size = 0
def fashion_mnist_preprocessed(original_root, preprocessed_root):
train = datasets.FashionMNIST(root=original_root, train=True, download=True)
test = datasets.FashionMNIST(root=original_root, train=False, download=True)
if os.path.exists(preprocessed_root):
print('Preprocessing to fit to the anomaly detection train/test protocol was already done.')
else:
print("Preprocessing to fit to the anomaly detection train/test protocol....")
data = torch.cat((train.data, test.data), dim=0)
targets = torch.cat((train.targets, test.targets), dim=0)
train_data = []
train_labels = []
test_data = []
test_labels = []
indexs = np.array(range(data.shape[0]))
for label in range(10):
label_indexs = indexs[targets.numpy() == label]
np.random.shuffle(label_indexs)
n = int(len(label_indexs) * 0.8)
train_indexs = label_indexs[:n]
test_indexs = label_indexs[n:]
train_data.append(data[train_indexs])
train_labels.append(targets[train_indexs])
test_data.append(data[test_indexs])
test_labels.append(targets[test_indexs])
train_data = torch.cat(train_data, dim=0).detach().clone()
train_labels = torch.cat(train_labels, dim=0).detach().clone()
test_data = torch.cat(test_data, dim=0).detach().clone()
test_labels = torch.cat(test_labels, dim=0).detach().clone()
train = (train_data, train_labels)
test = (test_data, test_labels)
output_root = os.path.join(preprocessed_root, 'processed')
os.makedirs(output_root, exist_ok=True)
torch.save(train, os.path.join(output_root, 'training.pt'))
torch.save(test, os.path.join(output_root, 'test.pt'))
print("Preprocessing is done.")
def coil_100_preprocessing(original_root, preprocessed_root):
if os.path.exists(preprocessed_root):
print('Preprocessing to fit to the anomaly detection train/test protocol was already done.')
else:
print("Preprocessing to fit to the anomaly detection train/test protocol....")
original_root = os.path.join(original_root, 'coil-100')
pat = 'obj(?P<class>\d+)__(?P<numb>\d+)'
imgs = []
labels = []
print("Loading data....")
for filename in tqdm.tqdm(os.listdir(original_root)):
if os.path.splitext(filename)[1] != '.png':
continue
img = skimage.io.imread(os.path.join(original_root, filename))
img = skimage.transform.rescale(img, (0.25, 0.25, 1))
img = (img * 255).astype(np.uint8)
label = int(re.match(pat, filename)['class'])
if label == 100:
label = 0
imgs.append(img)
labels.append(label)
imgs =
|
np.stack(imgs, axis=0)
|
numpy.stack
|
import argparse
import os
import time
# import subprocess
# proc1 = subprocess.Popen(['scontrol', 'show', 'job', os.environ['SLURM_JOBID'], '-d'], stdout=subprocess.PIPE)
# process = subprocess.run(['grep', '-oP', 'GRES=.*IDX:\K\d'], stdin=proc1.stdout, capture_output=True, text=True)
# os.environ['EGL_DEVICE_ID'] = process.stdout.rstrip()
# proc1.stdout.close()
import imageio
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg")
#matplotlib.use("TkAgg")
import numpy as np
import torch
import torchvision
import yaml
from tqdm import tqdm
#from nerf-pytorch import
from nerf import (
CfgNode,
get_ray_bundle,
load_flame_data,
load_llff_data,
models,
get_embedding_function,
run_one_iter_of_nerf,
meshgrid_xy
)
def save_plt_image(im1, outname):
fig = plt.figure()
fig.set_size_inches((6.4,6.4))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
#plt.set_cmap('jet')
ax.imshow(im1, aspect='equal')
plt.savefig(outname, dpi=80)
plt.close(fig)
def normal_map_from_depth_map(depthmap):
h, w = np.shape(depthmap)
normals = np.zeros((h, w, 3))
phong = np.zeros((h, w, 3))
for x in range(1, h - 1):
for y in range(1, w - 1):
dzdx = (float((depthmap[x + 1, y])) - float((depthmap[x - 1, y]))) / 2.0
dzdy = (float((depthmap[x, y + 1])) - float((depthmap[x, y - 1]))) / 2.0
n = np.array([-dzdx, -dzdy, 0.005])
n = n * 1/np.linalg.norm(n)
dir = np.array([x,y,1.0])
dir = dir *1/np.linalg.norm(dir)
normals[x, y] = (n*0.5 + 0.5)
phong[x, y] = np.dot(dir,n)*0.5+0.5
normals *= 255
normals = normals.astype('uint8')
#plt.imshow(depthmap, cmap='gray')
#plt.show()
plt.imshow(normals)
plt.show()
plt.imshow(phong)
plt.show()
print('a')
return normals
def torch_normal_map(depthmap,focal,weights=None,clean=True, central_difference=False):
W,H = depthmap.shape
#normals = torch.zeros((H,W,3), device=depthmap.device)
cx = focal[2]*W
cy = focal[3]*H
fx = focal[0]
fy = focal[1]
ii, jj = meshgrid_xy(torch.arange(W, device=depthmap.device),
torch.arange(H, device=depthmap.device))
points = torch.stack(
[
((ii - cx) * depthmap) / fx,
-((jj - cy) * depthmap) / fy,
depthmap,
],
dim=-1)
difference = 2 if central_difference else 1
dx = (points[difference:,:,:] - points[:-difference,:,:])
dy = (points[:,difference:,:] - points[:,:-difference,:])
normals = torch.cross(dy[:-difference,:,:],dx[:,:-difference,:],2)
normalize_factor = torch.sqrt(torch.sum(normals*normals,2))
normals[:,:,0] /= normalize_factor
normals[:,:,1] /= normalize_factor
normals[:,:,2] /= normalize_factor
normals = normals * 0.5 +0.5
if clean and weights is not None: # Use volumetric rendering weights to clean up the normal map
mask = weights.repeat(3,1,1).permute(1,2,0)
mask = mask[:-difference,:-difference]
where = torch.where(mask > 0.22)
normals[where] = 1.0
normals = (1-mask)*normals + (mask)*torch.ones_like(normals)
normals *= 255
#plt.imshow(normals.cpu().numpy().astype('uint8'))
#plt.show()
return normals
def vis(tensor):
plt.imshow((tensor*255).cpu().numpy().astype('uint8'))
plt.show()
def normal_map_from_depth_map_backproject(depthmap):
h, w = np.shape(depthmap)
normals = np.zeros((h, w, 3))
phong = np.zeros((h, w, 3))
cx = cy = h//2
fx=fy=500
fx = fy = 1150
for x in range(1, h - 1):
for y in range(1, w - 1):
#dzdx = (float((depthmap[x + 1, y])) - float((depthmap[x - 1, y]))) / 2.0
#dzdy = (float((depthmap[x, y + 1])) - float((depthmap[x, y - 1]))) / 2.0
p = np.array([(x*depthmap[x,y]-cx)/fx, (y*depthmap[x,y]-cy)/fy, depthmap[x,y]])
py = np.array([(x*depthmap[x,y+1]-cx)/fx, ((y+1)*depthmap[x,y+1]-cy)/fy, depthmap[x,y+1]])
px = np.array([((x+1)*depthmap[x+1,y]-cx)/fx, (y*depthmap[x+1,y]-cy)/fy, depthmap[x+1,y]])
#n = np.array([-dzdx, -dzdy, 0.005])
n = np.cross(px-p, py-p)
n = n * 1/np.linalg.norm(n)
dir = p#np.array([x,y,1.0])
dir = dir *1/np.linalg.norm(dir)
normals[x, y] = (n*0.5 + 0.5)
phong[x, y] = np.dot(dir,n)*0.5+0.5
normals *= 255
normals = normals.astype('uint8')
#plt.imshow(depthmap, cmap='gray')
#plt.show()
#plt.imshow(normals)
#plt.show()
#plt.imshow(phong)
#plt.show()
#print('a')
return normals
def error_image(im1, im2):
fig = plt.figure()
diff = (im1 - im2)
#gt_vs_theirs[total_mask, :] = 0
#print("theirs ", np.sqrt(np.sum(np.square(gt_vs_theirs))), np.mean(np.square(gt_vs_theirs)))
ax = plt.axes([0, 0, 1, 1], frameon=False)
# Then we disable our xaxis and yaxis completely. If we just say plt.axis('off'),
# they are still used in the computation of the image padding.
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Even though our axes (plot region) are set to cover the whole image with [0,0,1,1],
# by default they leave padding between the plotted data and the frame. We use tigher=True
# to make sure the data gets scaled to the full extents of the axes.
plt.autoscale(tight=True)
plt.imshow(np.linalg.norm(diff, axis=2), cmap='jet')
#ax.plt.axes('off')
#ax = plt.Axes(fig, [0., 0., 1., 1.])
#ax.set_axis_off()
#plt.show()
return fig
def cast_to_image(tensor, dataset_type):
# Input tensor is (H, W, 3). Convert to (3, H, W).
tensor = tensor.permute(2, 0, 1)
tensor = tensor.clamp(0.0,1.0)
# Convert to PIL Image and then np.array (output shape: (H, W, 3))
img = np.array(torchvision.transforms.ToPILImage()(tensor.detach().cpu()))
return img
# # Map back to shape (3, H, W), as tensorboard needs channels first.
# return np.moveaxis(img, [-1], [0])
def cast_to_disparity_image(tensor):
img = (tensor - tensor.min()) / (tensor.max() - tensor.min())
img = img.clamp(0, 1) * 255
return img.detach().cpu().numpy().astype(np.uint8)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", type=str, required=True, help="Path to (.yml) config file."
)
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="Checkpoint / pre-trained model to evaluate.",
)
parser.add_argument(
"--savedir", type=str, default='./renders/', help="Save images to this directory, if specified."
)
parser.add_argument(
"--save-disparity-image", action="store_true", help="Save disparity images too."
)
parser.add_argument(
"--save-error-image", action="store_true", help="Save photometric error visualization"
)
configargs = parser.parse_args()
# Read config file.
cfg = None
with open(configargs.config, "r") as f:
cfg_dict = yaml.load(f, Loader=yaml.FullLoader)
cfg = CfgNode(cfg_dict)
images, poses, render_poses, hwf = None, None, None, None
i_train, i_val, i_test = None, None, None
if cfg.dataset.type.lower() == "blender":
# Load blender dataset
images, poses, render_poses, hwf, i_split, expressions, _, _ = load_flame_data(
cfg.dataset.basedir,
half_res=cfg.dataset.half_res,
testskip=cfg.dataset.testskip,
test=True
)
#i_train, i_val, i_test = i_split
i_test = i_split
H, W, focal = hwf
H, W = int(H), int(W)
elif cfg.dataset.type.lower() == "llff":
# Load LLFF dataset
images, poses, bds, render_poses, i_test = load_llff_data(
cfg.dataset.basedir, factor=cfg.dataset.downsample_factor,
)
hwf = poses[0, :3, -1]
H, W, focal = hwf
hwf = [int(H), int(W), focal]
render_poses = torch.from_numpy(render_poses)
# Device on which to run.
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
encode_position_fn = get_embedding_function(
num_encoding_functions=cfg.models.coarse.num_encoding_fn_xyz,
include_input=cfg.models.coarse.include_input_xyz,
log_sampling=cfg.models.coarse.log_sampling_xyz,
)
encode_direction_fn = None
if cfg.models.coarse.use_viewdirs:
encode_direction_fn = get_embedding_function(
num_encoding_functions=cfg.models.coarse.num_encoding_fn_dir,
include_input=cfg.models.coarse.include_input_dir,
log_sampling=cfg.models.coarse.log_sampling_dir,
)
# Initialize a coarse resolution model.
model_coarse = getattr(models, cfg.models.coarse.type)(
num_encoding_fn_xyz=cfg.models.coarse.num_encoding_fn_xyz,
num_encoding_fn_dir=cfg.models.coarse.num_encoding_fn_dir,
include_input_xyz=cfg.models.coarse.include_input_xyz,
include_input_dir=cfg.models.coarse.include_input_dir,
use_viewdirs=cfg.models.coarse.use_viewdirs,
num_layers=cfg.models.coarse.num_layers,
hidden_size=cfg.models.coarse.hidden_size,
include_expression=True
)
model_coarse.to(device)
# If a fine-resolution model is specified, initialize it.
model_fine = None
if hasattr(cfg.models, "fine"):
model_fine = getattr(models, cfg.models.fine.type)(
num_encoding_fn_xyz=cfg.models.fine.num_encoding_fn_xyz,
num_encoding_fn_dir=cfg.models.fine.num_encoding_fn_dir,
include_input_xyz=cfg.models.fine.include_input_xyz,
include_input_dir=cfg.models.fine.include_input_dir,
use_viewdirs=cfg.models.fine.use_viewdirs,
num_layers=cfg.models.coarse.num_layers,
hidden_size=cfg.models.coarse.hidden_size,
include_expression=True
)
model_fine.to(device)
checkpoint = torch.load(configargs.checkpoint)
model_coarse.load_state_dict(checkpoint["model_coarse_state_dict"])
if checkpoint["model_fine_state_dict"]:
try:
model_fine.load_state_dict(checkpoint["model_fine_state_dict"])
except:
print(
"The checkpoint has a fine-level model, but it could "
"not be loaded (possibly due to a mismatched config file."
)
if "height" in checkpoint.keys():
hwf[0] = checkpoint["height"]
if "width" in checkpoint.keys():
hwf[1] = checkpoint["width"]
if "focal_length" in checkpoint.keys():
hwf[2] = checkpoint["focal_length"]
if "background" in checkpoint.keys():
background = checkpoint["background"]
if background is not None:
print("loaded background with shape ", background.shape)
background.to(device)
if "latent_codes" in checkpoint.keys():
latent_codes = checkpoint["latent_codes"]
use_latent_code = False
if latent_codes is not None:
use_latent_code = True
latent_codes.to(device)
print("loading index map for latent codes...")
idx_map =
|
np.load(cfg.dataset.basedir + "/index_map.npy")
|
numpy.load
|
# -*- coding:UTF-8 -*-
from copy import deepcopy
from inspect import stack as INSPECTstack
from math import ceil as MATHceil
from math import floor as MATHfloor
from numpy import arange as NUMPYarange
from numpy import around as NUMPYaround
from numpy import array as NUMPYarray
from numpy import isnan as NUMPYisnan
from numpy import mean as NUMPYmean
from numpy import nan as NUMPYnan
from numpy import sort as NUMPYsort
from numpy import where as NUMPYwhere
from numpy.ma import masked_invalid as NUMPYma__masked_invalid
from numpy.random import randint as NUMPYrandom__randint
from scipy.stats import scoreatpercentile as SCIPYstats__scoreatpercentile
# xarray based functions
from xarray import open_dataset
from xarray import where as XARRAYwhere
# ENSO_metrics functions
from EnsoMetrics.EnsoCollectionsLib import ReferenceObservations
from EnsoMetrics.EnsoPlotLib import plot_param
from EnsoMetrics import EnsoErrorsWarnings
calendar_months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
observations = sorted(ReferenceObservations().keys(), key=lambda v: v.upper())
# metrics order
metrics_background = [
"BiasPrLatRmse", "BiasPrLonRmse", "BiasSshLatRmse", "BiasSshLonRmse", "BiasSstLatRmse", "BiasSstLonRmse",
"BiasTauxLatRmse", "BiasTauxLonRmse", "SeasonalPrLatRmse", "SeasonalPrLonRmse", "SeasonalSshLatRmse",
"SeasonalSshLonRmse", "SeasonalSstLatRmse", "SeasonalSstLonRmse", "SeasonalTauxLatRmse", "SeasonalTauxLonRmse"]
metrics_basic = [
"EnsoSstLonRmse", "EnsoPrTsRmse", "EnsoSstTsRmse", "EnsoTauxTsRmse", "EnsoAmpl", "EnsoSeasonality", "EnsoSstSkew",
"EnsoDuration", "EnsoSstDiversity", "EnsoSstDiversity_1", "EnsoSstDiversity_2", "NinoSstDiversity",
"NinoSstDiversity_1", "NinoSstDiversity_2"]
metrics_teleconnection = [
"EnsoPrMapCorr", "EnsoPrMapRmse", "EnsoPrMapStd", "EnsoPrMapDjfCorr", "EnsoPrMapDjfRmse", "EnsoPrMapDjfStd",
"EnsoPrMapJjaCorr", "EnsoPrMapJjaRmse", "EnsoPrMapJjaStd", "EnsoSlpMapCorr", "EnsoSlpMapRmse", "EnsoSlpMapStd",
"EnsoSlpMapDjfCorr", "EnsoSlpMapDjfRmse", "EnsoSlpMapDjfStd", "EnsoSlpMapJjaCorr", "EnsoSlpMapJjaRmse",
"EnsoSlpMapJjaStd", "EnsoSstMapCorr", "EnsoSstMapRmse", "EnsoSstMapStd", "EnsoSstMapDjfCorr", "EnsoSstMapDjfRmse",
"EnsoSstMapDjfStd", "EnsoSstMapJjaCorr", "EnsoSstMapJjaRmse", "EnsoSstMapJjaStd"]
metrics_process = [
"EnsoFbSstTaux", "EnsoFbTauxSsh", "EnsoFbSshSst", "EnsoFbSstThf", "EnsoFbSstSwr", "EnsoFbSstLhf", "EnsoFbSstLwr",
"EnsoFbSstShf", "EnsodSstOce", "EnsodSstOce_1", "EnsodSstOce_2"]
# models order
models_order = [
"ACCESS1-0", "ACCESS1-3", "ACCESS-CM2", "ACCESS-ESM1-5", "BCC-CSM1-1", "BCC-CSM1-1-M", "BCC-CSM2-MR", "BCC-ESM1",
"BNU-ESM", "CAMS-CSM1-0", "CanCM4", "CanESM2", "CanESM5", "CanESM5-CanOE", "CCSM4", "CESM1-BGC", "CESM1-CAM5",
"CESM2", "CESM2-FV2", "CESM1-FASTCHEM", "CESM1-WACCM", "CESM2-WACCM", "CESM2-WACCM-FV2", "CMCC-CESM", "CMCC-CM",
"CMCC-CMS", "CNRM-CM5", "CNRM-CM5-2", "CNRM-CM6-1", "CNRM-CM6-1-HR", "CNRM-ESM2-1", "CSIRO-Mk3-6-0",
"CSIRO-Mk3L-1-2", "E3SM-1-0", "E3SM-1-1", "EC-EARTH", "EC-Earth3", "EC-Earth3-Veg", "FGOALS-f3-L", "FGOALS-g2",
"FGOALS-s2", "FIO-ESM", "GFDL-CM2p1", "GFDL-CM3", "GFDL-CM4", "GFDL-ESM2G", "GFDL-ESM2M", "GFDL-ESM4",
"GISS-E2-1-G", "GISS-E2-1-G-CC", "GISS-E2-H", "GISS-E2-H-CC", "GISS-E2-1-H", "GISS-E2-R", "GISS-E2-R-CC", "HadCM3",
"HadGEM2-AO", "HadGEM2-CC", "HadGEM2-ES", "HadGEM3-GC31-LL", "INMCM4", "INM-CM4-8", "INM-CM5-0", "IPSL-CM5A-LR",
"IPSL-CM5A-MR", "IPSL-CM5B-LR", "IPSL-CM6A-LR", "KACE-1-0-G", "MIROC4h", "MIROC5", "MIROC6", "MIROC-ESM",
"MIROC-ESM-CHEM", "MIROC-ES2L", "MPI-ESM-LR", "MPI-ESM-MR", "MPI-ESM-P", "MPI-ESM-1-2-HAM", "MPI-ESM1-2-HR",
"MPI-ESM1-2-LR", "MRI-CGCM3", "MRI-ESM1", "MRI-ESM2-0", "NESM3", "NorESM1-M", "NorESM1-ME", "NorCPM1", "NorESM2-LM",
"NorESM2-MM", "SAM0-UNICON", "TaiESM1", "UKESM1-0-LL"]
def bootstrap(tab, num_samples=1000000, alpha=0.05, nech=None, statistic=NUMPYmean):
"""Returns bootstrap estimate of 100.0*(1-alpha) CI for statistic."""
n = len(tab)
if nech is None:
nech = deepcopy(n)
idx = NUMPYrandom__randint(0, n, (num_samples, nech))
samples = tab[idx]
stat = NUMPYsort(statistic(samples, 1))
return [stat[int((alpha/2.0)*num_samples)], stat[int((1-alpha/2.0)*num_samples)]]
def create_labels(label_name, label_ticks):
if label_name == "months":
if len(label_ticks) > 40:
mult = 6
elif len(label_ticks) > 10:
mult = 4
else:
mult = 3
label_ticks = [ii for ii in label_ticks if ii % mult == 0]
label = [calendar_months[ii % 12] for ii in label_ticks]
elif label_name == "latitude":
if len(label_ticks) < 40:
mult = 10
else:
mult = 20
label_ticks = [ii for ii in label_ticks if ii % mult == 0]
if min(label_ticks) < 0 and max(label_ticks) > 0 and 0 not in label_ticks:
label_ticks = NUMPYarray(label_ticks)
while 0 not in label_ticks:
label_ticks = label_ticks + 1
label = [str(abs(int(ii))) + '$^\circ$S' if ii < 0 else (str(abs(int(ii))) + '$^\circ$N' if ii > 0 else 'eq')
for ii in label_ticks]
elif label_name == "longitude":
if len(label_ticks) < 200:
mult = 40
else:
mult = 90
label_ticks = [ii for ii in label_ticks if ii % mult == 0]
if min(label_ticks) < 180 and max(label_ticks) > 180 and 180 not in label_ticks:
label_ticks = NUMPYarray(label_ticks)
while 180 not in label_ticks:
label_ticks = label_ticks + 10
label = [str(int(ii)) + "$^\circ$E" if ii < 180 else (
str(abs(int(ii) - 360)) + "$^\circ$W" if ii > 180 else "180$^\circ$") for ii in label_ticks]
return label_ticks, label
def create_levels(labelbar):
diff = round(float(labelbar[1] - labelbar[0]), 2)
if diff in [0.3, 0.6, 0.9] or diff % 3 == 0:
mult = 3
elif diff in [0.1, 0.2, 0.4, 0.8, 1.0, 2, 4, 8, 10, 20, 40, 60, 80, 100]:
mult = 4
elif diff in [0.5, 5, 25]:
mult = 5
else:
mult = 6
delta = float(diff) / mult
return [round(kk + jj * delta, 2) for kk in labelbar[:-1] for jj in range(mult)] + [labelbar[-1]]
def format_metric(metric_type, metric_value, metric_units):
if metric_type in ["CORR", "RMSE"]:
mytext = deepcopy(metric_type)
else:
if metric_type == "difference":
mytext = "model-ref"
elif metric_type == "ratio":
mytext = r"$\frac{model}{ref}$"
elif metric_type == "relative_difference":
mytext = r"$\frac{model-ref}{ref}$"
else:
mytext = r"$abs\left(\frac{model-ref}{ref}\right)$"
if metric_value is not None:
return mytext + ": " + "{0:.2f}".format(metric_value) + " " + metric_units
else:
return None
def get_reference(metric_collection, metric):
if metric_collection in ["ENSO_tel"] and "Map" in metric:
my_met = metric.replace("Corr", "").replace("Rmse", "").replace("Std", "")
else:
my_met = deepcopy(metric)
return plot_param(metric_collection, my_met)['metric_reference']
def minimaxi(tab):
tmp = [my_mask(tmp, remove_masked=True) for tmp in tab]
tmp = [tt.min() for tt in tmp] + [tt.max() for tt in tmp]
return min(tmp), max(tmp)
def minmax_plot(tab, metric=False):
# define minimum and maximum
mini, maxi = minimaxi(tab)
if mini == maxi or abs(maxi-mini)/float(abs(maxi+mini))<1e-2:
tt = max(abs(mini), abs(maxi))/10.
tmp = int(str("%.e" % tt)[3:])
if mini == maxi or (mini < 0 and maxi > 0):
tmp = 10**-tmp if tt < 1 else 10**tmp
elif mini >= 0:
tmp = 10**-tmp if tt < 1 else 10**tmp
else:
tmp = 10**-tmp if tt < 1 else 10**tmp
mini = 0 if mini > 0 and (mini-tmp)<0 else mini - tmp
maxi = 0 if maxi < 0 and (maxi+tmp)>0 else maxi + tmp
if mini < 0 and maxi > 0:
locmaxi = max([abs(mini), abs(maxi)])
locmini = -deepcopy(locmaxi)
else:
locmini, locmaxi = deepcopy(mini), deepcopy(maxi)
# find the power of ten to get an interval between 1 and 10
mult = pow(10, int(str("%e" % abs(locmaxi - locmini)).split('e')[1]))
locmini, locmaxi = int(MATHfloor(float(locmini) / mult)), int(MATHceil(float(locmaxi) / mult))
if locmaxi == 2 and maxi < 15 and mult == 10 and abs(locmini) != locmaxi:
locmini, locmaxi = 0, 15
mult = 1.
scalmini, scalemaxi = mini / mult, maxi / mult
interval = locmaxi - locmini
listbase = list(NUMPYaround([ii*10**exp for exp in range(-1, 1) for ii in range(1, 6)], decimals=1))
listbase = listbase + listbase
listmult = [3] * int(len(listbase)/2) + [4] * int(len(listbase)/2)
list1 = list(NUMPYaround([listbase[ii] * listmult[ii] for ii in range(len(listbase))], decimals=1))
list2 = list(NUMPYaround([abs(ii - interval) for ii in list1], decimals=1))
interval = list1[list2.index(min(list2))]
base = listbase[list1.index(interval)]
if base * 4.5 < interval:
ii = 1
tmp = sorted(list2)
while base * 4.5 < interval:
interval = list1[list2.index(tmp[ii])]
base = listbase[list1.index(interval)]
ii += 1
if abs(locmini) == locmaxi:
maxi_out = 2 * base
while maxi_out - base > locmaxi:
maxi_out -= base
if metric is True and maxi_out < scalemaxi + base * 0.4:
maxi_out += base
mini_out = -maxi_out
else:
if locmini < 0 and locmaxi <= 0:
locmini, locmaxi = abs(locmaxi), abs(locmini)
sign = -1
else:
sign = 1
half_int = int(round(interval / 2.))
tmp_middle = locmini + half_int
mini_out = max([0, tmp_middle - half_int])
while mini_out > locmini:
mini_out -= base
while mini_out + base < locmini:
mini_out += base
maxi_out = mini_out + 2 * base
while maxi_out < locmaxi:
maxi_out += base
while maxi_out - base > locmaxi:
maxi_out -= base
minmax = list(NUMPYaround(NUMPYarray([mini_out, maxi_out]) * sign, decimals=0).astype(int))
mini_out, maxi_out = min(minmax), max(minmax)
if metric is True:
if maxi_out < scalemaxi + base * 0.4:
maxi_out += base
tick_labels = NUMPYarange(mini_out, maxi_out + base / 2., base)
tick_labels = list(
|
NUMPYaround(tick_labels * mult, decimals=4)
|
numpy.around
|
# MIT License
#
# Copyright (c) 2018-2020 Tskit Developers
# Copyright (c) 2016-2017 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for the supported topological variations and operations.
"""
import functools
import io
import itertools
import json
import math
import random
import sys
import unittest
import msprime
import numpy as np
import pytest
import _tskit
import tests as tests
import tests.test_wright_fisher as wf
import tests.tsutil as tsutil
import tskit
import tskit.provenance as provenance
def simple_keep_intervals(tables, intervals, simplify=True, record_provenance=True):
"""
Simple Python implementation of keep_intervals.
"""
ts = tables.tree_sequence()
last_stop = 0
for start, stop in intervals:
if start < 0 or stop > ts.sequence_length:
raise ValueError("Slice bounds must be within the existing tree sequence")
if start >= stop:
raise ValueError("Interval error: start must be < stop")
if start < last_stop:
raise ValueError("Intervals must be disjoint")
last_stop = stop
tables.edges.clear()
tables.sites.clear()
tables.mutations.clear()
for edge in ts.edges():
for interval_left, interval_right in intervals:
if not (edge.right <= interval_left or edge.left >= interval_right):
left = max(interval_left, edge.left)
right = min(interval_right, edge.right)
tables.edges.append(edge.replace(left=left, right=right))
for site in ts.sites():
for interval_left, interval_right in intervals:
if interval_left <= site.position < interval_right:
site_id = tables.sites.append(site)
for m in site.mutations:
tables.mutations.append(m.replace(site=site_id, parent=tskit.NULL))
tables.build_index()
tables.compute_mutation_parents()
tables.sort()
if simplify:
tables.simplify(record_provenance=False)
if record_provenance:
parameters = {"command": "keep_intervals", "TODO": "add parameters"}
tables.provenances.add_row(
record=json.dumps(provenance.get_provenance_dict(parameters))
)
def generate_segments(n, sequence_length=100, seed=None):
rng = random.Random(seed)
segs = []
for j in range(n):
left = rng.randint(0, sequence_length - 1)
right = rng.randint(left + 1, sequence_length)
assert left < right
segs.append(tests.Segment(left, right, j))
return segs
def naive_kc_distance(tree1, tree2, lambda_=0):
"""
Returns the Kendall-Colijn distance between the specified pair of trees.
lambda_ determines weight of topology vs branch lengths in calculating
the distance. Set lambda_ at 0 to only consider topology, set at 1 to
only consider branch lengths. See Kendall & Colijn (2016):
https://academic.oup.com/mbe/article/33/10/2735/2925548
"""
samples = tree1.tree_sequence.samples()
if not np.array_equal(samples, tree2.tree_sequence.samples()):
raise ValueError("Trees must have the same samples")
if not len(tree1.roots) == len(tree2.roots) == 1:
raise ValueError("Trees must have one root")
for tree in [tree1, tree2]:
for u in tree.nodes():
if tree.num_children(u) == 1:
raise ValueError("Unary nodes are not supported")
n = samples.shape[0]
N = (n * (n - 1)) // 2
m = [np.zeros(N + n), np.zeros(N + n)]
M = [np.zeros(N + n), np.zeros(N + n)]
for tree_index, tree in enumerate([tree1, tree2]):
for sample in range(n):
m[tree_index][N + sample] = 1
M[tree_index][N + sample] = tree.branch_length(sample)
for n1, n2 in itertools.combinations(range(n), 2):
mrca = tree.mrca(samples[n1], samples[n2])
depth = 0
u = tree.parent(mrca)
while u != tskit.NULL:
depth += 1
u = tree.parent(u)
pair_index = n1 * (n1 - 2 * n + 1) // -2 + n2 - n1 - 1
m[tree_index][pair_index] = depth
M[tree_index][pair_index] = tree.time(tree.root) - tree.time(mrca)
return np.linalg.norm((1 - lambda_) * (m[0] - m[1]) + lambda_ * (M[0] - M[1]))
class KCVectors:
"""
Manages the two vectors (m and M) of a tree used to compute the
KC distance between trees. For any two samples, u and v,
m and M capture the distance of mrca(u, v) to the root in
number of edges and time, respectively.
See Kendall & Colijn (2016):
https://academic.oup.com/mbe/article/33/10/2735/2925548
"""
def __init__(self, n):
self.n = n
self.N = (self.n * (self.n - 1)) // 2
self.m = np.zeros(self.N + self.n)
self.M = np.zeros(self.N + self.n)
def fill_kc_vectors(tree, kc_vecs):
sample_index_map = np.zeros(tree.tree_sequence.num_nodes)
for j, u in enumerate(tree.tree_sequence.samples()):
sample_index_map[u] = j
for root in tree.roots:
stack = [(tree.root, 0)]
while len(stack) > 0:
u, depth = stack.pop()
if tree.is_sample(u):
time = tree.branch_length(u)
update_kc_vectors_single_leaf(kc_vecs, u, time, sample_index_map)
c1 = tree.left_child(u)
while c1 != tskit.NULL:
stack.append((c1, depth + 1))
c2 = tree.right_sib(c1)
while c2 != tskit.NULL:
update_kc_vectors_all_pairs(
tree, kc_vecs, c1, c2, depth, tree.time(root) - tree.time(u)
)
c2 = tree.right_sib(c2)
c1 = tree.right_sib(c1)
def update_kc_vectors_single_leaf(kc_vecs, u, time, sample_index_map):
u_index = int(sample_index_map[u])
kc_vecs.m[kc_vecs.N + u_index] = 1
kc_vecs.M[kc_vecs.N + u_index] = time
def update_kc_vectors_all_pairs(tree, kc_vecs, c1, c2, depth, time):
s1_index = tree.left_sample(c1)
while True:
s2_index = tree.left_sample(c2)
while True:
update_kc_vectors_pair(kc_vecs, s1_index, s2_index, depth, time)
if s2_index == tree.right_sample(c2):
break
s2_index = tree.next_sample(s2_index)
if s1_index == tree.right_sample(c1):
break
s1_index = tree.next_sample(s1_index)
def update_kc_vectors_pair(kc_vecs, n1, n2, depth, time):
if n1 > n2:
n1, n2 = n2, n1
pair_index = n2 - n1 - 1 + (-1 * n1 * (n1 - 2 * kc_vecs.n + 1)) // 2
kc_vecs.m[pair_index] = depth
kc_vecs.M[pair_index] = time
def norm_kc_vectors(kc_vecs1, kc_vecs2, lambda_):
vT1 = 0
vT2 = 0
distance_sum = 0
for i in range(kc_vecs1.n + kc_vecs1.N):
vT1 = (kc_vecs1.m[i] * (1 - lambda_)) + (lambda_ * kc_vecs1.M[i])
vT2 = (kc_vecs2.m[i] * (1 - lambda_)) + (lambda_ * kc_vecs2.M[i])
distance_sum += (vT1 - vT2) ** 2
return math.sqrt(distance_sum)
def c_kc_distance(tree1, tree2, lambda_=0):
"""
Simplified version of the naive_kc_distance() function above.
Written without Python features to aid writing C implementation.
"""
samples = tree1.tree_sequence.samples()
if tree1.tree_sequence.num_samples != tree2.tree_sequence.num_samples:
raise ValueError("Trees must have the same samples")
for sample1, sample2 in zip(samples, tree2.tree_sequence.samples()):
if sample1 != sample2:
raise ValueError("Trees must have the same samples")
if not len(tree1.roots) == len(tree2.roots) == 1:
raise ValueError("Trees must have one root")
for tree in [tree1, tree2]:
for u in range(tree.tree_sequence.num_nodes):
left_child = tree.left_child(u)
if left_child != tskit.NULL and left_child == tree.right_child(u):
raise ValueError("Unary nodes are not supported")
n = tree1.tree_sequence.num_samples
vecs1 = KCVectors(n)
fill_kc_vectors(tree1, vecs1)
vecs2 = KCVectors(n)
fill_kc_vectors(tree2, vecs2)
return norm_kc_vectors(vecs1, vecs2, lambda_)
class ExampleTopologyMixin:
"""
Some example topologies for tests cases.
"""
def test_single_coalescent_tree(self):
ts = msprime.simulate(10, random_seed=1, length=10)
self.verify(ts)
def test_coalescent_trees(self):
ts = msprime.simulate(8, recombination_rate=5, random_seed=1, length=2)
assert ts.num_trees > 2
self.verify(ts)
def test_coalescent_trees_internal_samples(self):
ts = msprime.simulate(8, recombination_rate=5, random_seed=10, length=2)
assert ts.num_trees > 2
self.verify(tsutil.jiggle_samples(ts))
def test_coalescent_trees_all_samples(self):
ts = msprime.simulate(8, recombination_rate=5, random_seed=10, length=2)
assert ts.num_trees > 2
tables = ts.dump_tables()
flags = np.zeros_like(tables.nodes.flags) + tskit.NODE_IS_SAMPLE
tables.nodes.flags = flags
self.verify(tables.tree_sequence())
def test_wright_fisher_trees_unsimplified(self):
tables = wf.wf_sim(10, 5, deep_history=False, seed=2)
tables.sort()
ts = tables.tree_sequence()
self.verify(ts)
def test_wright_fisher_trees_simplified(self):
tables = wf.wf_sim(10, 5, deep_history=False, seed=1)
tables.sort()
ts = tables.tree_sequence()
ts = ts.simplify()
self.verify(ts)
def test_wright_fisher_trees_simplified_one_gen(self):
tables = wf.wf_sim(10, 1, deep_history=False, seed=1)
tables.sort()
ts = tables.tree_sequence()
ts = ts.simplify()
self.verify(ts)
def test_nonbinary_trees(self):
demographic_events = [
msprime.SimpleBottleneck(time=1.0, population=0, proportion=0.95)
]
ts = msprime.simulate(
20,
recombination_rate=10,
mutation_rate=5,
demographic_events=demographic_events,
random_seed=7,
)
found = False
for e in ts.edgesets():
if len(e.children) > 2:
found = True
assert found
self.verify(ts)
def test_many_multiroot_trees(self):
ts = msprime.simulate(7, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
ts = tsutil.decapitate(ts, ts.num_edges // 2)
self.verify(ts)
def test_multiroot_tree(self):
ts = msprime.simulate(15, random_seed=10)
ts = tsutil.decapitate(ts, ts.num_edges // 2)
self.verify(ts)
class TestKCMetric(unittest.TestCase):
"""
Tests on the KC metric distances.
"""
def test_same_tree_zero_distance(self):
for n in range(2, 10):
for seed in range(1, 10):
ts = msprime.simulate(n, random_seed=seed)
tree = next(ts.trees(sample_lists=True))
assert naive_kc_distance(tree, tree) == 0
assert c_kc_distance(tree, tree) == 0
assert tree.kc_distance(tree) == 0
ts = msprime.simulate(n, random_seed=seed)
tree2 = next(ts.trees(sample_lists=True))
assert naive_kc_distance(tree, tree2) == 0
assert c_kc_distance(tree, tree2) == 0
assert tree.kc_distance(tree2) == 0
def test_sample_2_zero_distance(self):
# All trees with 2 leaves must be equal distance from each other.
for seed in range(1, 10):
ts1 = msprime.simulate(2, random_seed=seed)
tree1 = next(ts1.trees(sample_lists=True))
ts2 = msprime.simulate(2, random_seed=seed + 1)
tree2 = next(ts2.trees(sample_lists=True))
assert naive_kc_distance(tree1, tree2, 0) == 0
assert c_kc_distance(tree1, tree2, 0) == 0
assert tree1.kc_distance(tree2, 0) == 0
def test_different_samples_error(self):
tree1 = next(msprime.simulate(10, random_seed=1).trees(sample_lists=True))
tree2 = next(msprime.simulate(2, random_seed=1).trees(sample_lists=True))
with pytest.raises(ValueError):
naive_kc_distance(tree1, tree2)
with pytest.raises(ValueError):
c_kc_distance(tree1, tree2)
with pytest.raises(_tskit.LibraryError):
tree1.kc_distance(tree2)
ts1 = msprime.simulate(10, random_seed=1)
nmap = np.arange(0, ts1.num_nodes)[::-1]
ts2 = tsutil.permute_nodes(ts1, nmap)
tree1 = next(ts1.trees(sample_lists=True))
tree2 = next(ts2.trees(sample_lists=True))
with pytest.raises(ValueError):
naive_kc_distance(tree1, tree2)
with pytest.raises(ValueError):
c_kc_distance(tree1, tree2)
with pytest.raises(_tskit.LibraryError):
tree1.kc_distance(tree2)
unsimplified_ts = msprime.simulate(
10, random_seed=1, recombination_rate=10, record_full_arg=True
)
trees = unsimplified_ts.trees(sample_lists=True)
tree1 = next(trees)
tree2 = next(trees)
with pytest.raises(ValueError):
naive_kc_distance(tree1, tree2)
with pytest.raises(ValueError):
c_kc_distance(tree1, tree2)
with pytest.raises(_tskit.LibraryError):
tree1.kc_distance(tree2)
def validate_trees(self, n):
for seed in range(1, 10):
ts1 = msprime.simulate(n, random_seed=seed)
ts2 = msprime.simulate(n, random_seed=seed + 1)
tree1 = next(ts1.trees(sample_lists=True))
tree2 = next(ts2.trees(sample_lists=True))
kc1 = naive_kc_distance(tree1, tree2)
kc2 = c_kc_distance(tree1, tree2)
kc3 = tree1.kc_distance(tree2)
self.assertAlmostEqual(kc1, kc2)
self.assertAlmostEqual(kc1, kc3)
self.assertAlmostEqual(kc1, naive_kc_distance(tree2, tree1))
self.assertAlmostEqual(kc2, c_kc_distance(tree2, tree1))
self.assertAlmostEqual(kc3, tree2.kc_distance(tree1))
def test_sample_3(self):
self.validate_trees(3)
def test_sample_4(self):
self.validate_trees(4)
def test_sample_10(self):
self.validate_trees(10)
def test_sample_20(self):
self.validate_trees(20)
def validate_nonbinary_trees(self, n):
demographic_events = [
msprime.SimpleBottleneck(0.02, 0, proportion=0.25),
msprime.SimpleBottleneck(0.2, 0, proportion=1),
]
for seed in range(1, 10):
ts = msprime.simulate(
n, random_seed=seed, demographic_events=demographic_events
)
# Check if this is really nonbinary
found = False
for edgeset in ts.edgesets():
if len(edgeset.children) > 2:
found = True
break
assert found
tree1 = next(ts.trees(sample_lists=True))
ts = msprime.simulate(
n, random_seed=seed + 1, demographic_events=demographic_events
)
tree2 = next(ts.trees(sample_lists=True))
self.do_kc_distance(tree1, tree2)
# compare to a binary tree also
ts = msprime.simulate(n, random_seed=seed + 1)
tree2 = next(ts.trees(sample_lists=True))
self.do_kc_distance(tree1, tree2)
def test_non_binary_sample_10(self):
self.validate_nonbinary_trees(10)
def test_non_binary_sample_20(self):
self.validate_nonbinary_trees(20)
def test_non_binary_sample_30(self):
self.validate_nonbinary_trees(30)
def verify_result(self, tree1, tree2, lambda_, result, places=None):
kc1 = naive_kc_distance(tree1, tree2, lambda_)
kc2 = c_kc_distance(tree1, tree2, lambda_)
kc3 = tree1.kc_distance(tree2, lambda_)
self.assertAlmostEqual(kc1, result, places=places)
self.assertAlmostEqual(kc2, result, places=places)
self.assertAlmostEqual(kc3, result, places=places)
kc1 = naive_kc_distance(tree2, tree1, lambda_)
kc2 = c_kc_distance(tree2, tree1, lambda_)
kc3 = tree2.kc_distance(tree1, lambda_)
self.assertAlmostEqual(kc1, result, places=places)
self.assertAlmostEqual(kc2, result, places=places)
self.assertAlmostEqual(kc3, result, places=places)
def test_known_kc_sample_3(self):
# Test with hardcoded known values
tables_1 = tskit.TableCollection(sequence_length=1.0)
tables_2 = tskit.TableCollection(sequence_length=1.0)
# Nodes
sv = [True, True, True, False, False]
tv_1 = [0.0, 0.0, 0.0, 2.0, 3.0]
tv_2 = [0.0, 0.0, 0.0, 4.0, 6.0]
for is_sample, t1, t2 in zip(sv, tv_1, tv_2):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables_1.nodes.add_row(flags=flags, time=t1)
tables_2.nodes.add_row(flags=flags, time=t2)
# Edges
lv = [0.0, 0.0, 0.0, 0.0]
rv = [1.0, 1.0, 1.0, 1.0]
pv = [3, 3, 4, 4]
cv = [0, 1, 2, 3]
for left, right, p, c in zip(lv, rv, pv, cv):
tables_1.edges.add_row(left=left, right=right, parent=p, child=c)
tables_2.edges.add_row(left=left, right=right, parent=p, child=c)
tree_1 = next(tables_1.tree_sequence().trees(sample_lists=True))
tree_2 = next(tables_2.tree_sequence().trees(sample_lists=True))
self.verify_result(tree_1, tree_2, 0, 0)
self.verify_result(tree_1, tree_2, 1, 4.243, places=3)
def test_10_samples(self):
nodes_1 = io.StringIO(
"""\
id is_sample time population individual metadata
0 1 0.000000 0 -1 b''
1 1 0.000000 0 -1 b''
2 1 0.000000 0 -1 b''
3 1 0.000000 0 -1 b''
4 1 0.000000 0 -1 b''
5 1 0.000000 0 -1 b''
6 1 0.000000 0 -1 b''
7 1 0.000000 0 -1 b''
8 1 0.000000 0 -1 b''
9 1 0.000000 0 -1 b''
10 0 0.047734 0 -1 b''
11 0 0.061603 0 -1 b''
12 0 0.189503 0 -1 b''
13 0 0.275885 0 -1 b''
14 0 0.518301 0 -1 b''
15 0 0.543143 0 -1 b''
16 0 0.865193 0 -1 b''
17 0 1.643658 0 -1 b''
18 0 2.942350 0 -1 b''
"""
)
edges_1 = io.StringIO(
"""\
left right parent child
0.000000 10000.000000 10 0
0.000000 10000.000000 10 2
0.000000 10000.000000 11 9
0.000000 10000.000000 11 10
0.000000 10000.000000 12 3
0.000000 10000.000000 12 7
0.000000 10000.000000 13 5
0.000000 10000.000000 13 11
0.000000 10000.000000 14 1
0.000000 10000.000000 14 8
0.000000 10000.000000 15 4
0.000000 10000.000000 15 14
0.000000 10000.000000 16 13
0.000000 10000.000000 16 15
0.000000 10000.000000 17 6
0.000000 10000.000000 17 12
0.000000 10000.000000 18 16
0.000000 10000.000000 18 17
"""
)
ts_1 = tskit.load_text(
nodes_1, edges_1, sequence_length=10000, strict=False, base64_metadata=False
)
nodes_2 = io.StringIO(
"""\
id is_sample time population individual metadata
0 1 0.000000 0 -1 b''
1 1 0.000000 0 -1 b''
2 1 0.000000 0 -1 b''
3 1 0.000000 0 -1 b''
4 1 0.000000 0 -1 b''
5 1 0.000000 0 -1 b''
6 1 0.000000 0 -1 b''
7 1 0.000000 0 -1 b''
8 1 0.000000 0 -1 b''
9 1 0.000000 0 -1 b''
10 0 0.210194 0 -1 b''
11 0 0.212217 0 -1 b''
12 0 0.223341 0 -1 b''
13 0 0.272703 0 -1 b''
14 0 0.443553 0 -1 b''
15 0 0.491653 0 -1 b''
16 0 0.729369 0 -1 b''
17 0 1.604113 0 -1 b''
18 0 1.896332 0 -1 b''
"""
)
edges_2 = io.StringIO(
"""\
left right parent child
0.000000 10000.000000 10 5
0.000000 10000.000000 10 7
0.000000 10000.000000 11 3
0.000000 10000.000000 11 4
0.000000 10000.000000 12 6
0.000000 10000.000000 12 9
0.000000 10000.000000 13 10
0.000000 10000.000000 13 12
0.000000 10000.000000 14 8
0.000000 10000.000000 14 11
0.000000 10000.000000 15 1
0.000000 10000.000000 15 2
0.000000 10000.000000 16 13
0.000000 10000.000000 16 14
0.000000 10000.000000 17 0
0.000000 10000.000000 17 16
0.000000 10000.000000 18 15
0.000000 10000.000000 18 17
"""
)
ts_2 = tskit.load_text(
nodes_2, edges_2, sequence_length=10000, strict=False, base64_metadata=False
)
tree_1 = next(ts_1.trees(sample_lists=True))
tree_2 = next(ts_2.trees(sample_lists=True))
self.verify_result(tree_1, tree_2, 0, 12.85, places=2)
self.verify_result(tree_1, tree_2, 1, 10.64, places=2)
def test_15_samples(self):
nodes_1 = io.StringIO(
"""\
id is_sample time population individual metadata
0 1 0.000000 0 -1
1 1 0.000000 0 -1
2 1 0.000000 0 -1
3 1 0.000000 0 -1
4 1 0.000000 0 -1
5 1 0.000000 0 -1
6 1 0.000000 0 -1
7 1 0.000000 0 -1
8 1 0.000000 0 -1
9 1 0.000000 0 -1
10 1 0.000000 0 -1
11 1 0.000000 0 -1
12 1 0.000000 0 -1
13 1 0.000000 0 -1
14 1 0.000000 0 -1
15 0 0.026043 0 -1
16 0 0.032662 0 -1
17 0 0.072032 0 -1
18 0 0.086792 0 -1
19 0 0.130699 0 -1
20 0 0.177640 0 -1
21 0 0.199800 0 -1
22 0 0.236391 0 -1
23 0 0.342445 0 -1
24 0 0.380356 0 -1
25 0 0.438502 0 -1
26 0 0.525632 0 -1
27 0 1.180078 0 -1
28 0 2.548099 0 -1
"""
)
edges_1 = io.StringIO(
"""\
left right parent child
0.000000 10000.000000 15 6
0.000000 10000.000000 15 13
0.000000 10000.000000 16 1
0.000000 10000.000000 16 4
0.000000 10000.000000 17 0
0.000000 10000.000000 17 7
0.000000 10000.000000 18 2
0.000000 10000.000000 18 17
0.000000 10000.000000 19 5
0.000000 10000.000000 19 9
0.000000 10000.000000 20 12
0.000000 10000.000000 20 15
0.000000 10000.000000 21 8
0.000000 10000.000000 21 20
0.000000 10000.000000 22 11
0.000000 10000.000000 22 21
0.000000 10000.000000 23 10
0.000000 10000.000000 23 22
0.000000 10000.000000 24 14
0.000000 10000.000000 24 16
0.000000 10000.000000 25 18
0.000000 10000.000000 25 19
0.000000 10000.000000 26 23
0.000000 10000.000000 26 24
0.000000 10000.000000 27 25
0.000000 10000.000000 27 26
0.000000 10000.000000 28 3
0.000000 10000.000000 28 27
"""
)
ts_1 = tskit.load_text(
nodes_1, edges_1, sequence_length=10000, strict=False, base64_metadata=False
)
nodes_2 = io.StringIO(
"""\
id is_sample time population individual metadata
0 1 0.000000 0 -1
1 1 0.000000 0 -1
2 1 0.000000 0 -1
3 1 0.000000 0 -1
4 1 0.000000 0 -1
5 1 0.000000 0 -1
6 1 0.000000 0 -1
7 1 0.000000 0 -1
8 1 0.000000 0 -1
9 1 0.000000 0 -1
10 1 0.000000 0 -1
11 1 0.000000 0 -1
12 1 0.000000 0 -1
13 1 0.000000 0 -1
14 1 0.000000 0 -1
15 0 0.011443 0 -1
16 0 0.055694 0 -1
17 0 0.061677 0 -1
18 0 0.063416 0 -1
19 0 0.163014 0 -1
20 0 0.223445 0 -1
21 0 0.251724 0 -1
22 0 0.268749 0 -1
23 0 0.352039 0 -1
24 0 0.356134 0 -1
25 0 0.399454 0 -1
26 0 0.409174 0 -1
27 0 2.090839 0 -1
28 0 3.772716 0 -1
"""
)
edges_2 = io.StringIO(
"""\
left right parent child
0.000000 10000.000000 15 6
0.000000 10000.000000 15 8
0.000000 10000.000000 16 9
0.000000 10000.000000 16 12
0.000000 10000.000000 17 3
0.000000 10000.000000 17 4
0.000000 10000.000000 18 13
0.000000 10000.000000 18 16
0.000000 10000.000000 19 2
0.000000 10000.000000 19 11
0.000000 10000.000000 20 1
0.000000 10000.000000 20 17
0.000000 10000.000000 21 0
0.000000 10000.000000 21 18
0.000000 10000.000000 22 10
0.000000 10000.000000 22 15
0.000000 10000.000000 23 14
0.000000 10000.000000 23 21
0.000000 10000.000000 24 5
0.000000 10000.000000 24 7
0.000000 10000.000000 25 19
0.000000 10000.000000 25 22
0.000000 10000.000000 26 24
0.000000 10000.000000 26 25
0.000000 10000.000000 27 20
0.000000 10000.000000 27 23
0.000000 10000.000000 28 26
0.000000 10000.000000 28 27
"""
)
ts_2 = tskit.load_text(
nodes_2, edges_2, sequence_length=10000, strict=False, base64_metadata=False
)
tree_1 = next(ts_1.trees(sample_lists=True))
tree_2 = next(ts_2.trees(sample_lists=True))
self.verify_result(tree_1, tree_2, 0, 19.95, places=2)
self.verify_result(tree_1, tree_2, 1, 17.74, places=2)
def test_nobinary_trees(self):
nodes_1 = io.StringIO(
"""\
id is_sample time population individual metadata
0 1 0.000000 -1 -1 e30=
1 1 0.000000 -1 -1 e30=
2 1 0.000000 -1 -1 e30=
3 1 0.000000 -1 -1 e30=
4 1 0.000000 -1 -1 e30=
5 1 0.000000 -1 -1 e30=
6 1 0.000000 -1 -1 e30=
7 1 0.000000 -1 -1 e30=
8 1 0.000000 -1 -1 e30=
9 1 0.000000 -1 -1
10 1 0.000000 -1 -1
11 1 0.000000 -1 -1
12 1 0.000000 -1 -1
13 1 0.000000 -1 -1
14 1 0.000000 -1 -1
15 0 2.000000 -1 -1
16 0 4.000000 -1 -1
17 0 11.000000 -1 -1
18 0 12.000000 -1 -1
"""
)
edges_1 = io.StringIO(
"""\
left right parent child
0.000000 10000.000000 15 8
0.000000 10000.000000 15 10
0.000000 10000.000000 16 6
0.000000 10000.000000 16 12
0.000000 10000.000000 16 15
0.000000 10000.000000 17 0
0.000000 10000.000000 17 1
0.000000 10000.000000 17 2
0.000000 10000.000000 17 3
0.000000 10000.000000 17 4
0.000000 10000.000000 17 5
0.000000 10000.000000 17 7
0.000000 10000.000000 17 9
0.000000 10000.000000 17 11
0.000000 10000.000000 17 13
0.000000 10000.000000 17 14
0.000000 10000.000000 18 16
0.000000 10000.000000 18 17
"""
)
ts_1 = tskit.load_text(
nodes_1, edges_1, sequence_length=10000, strict=False, base64_metadata=False
)
nodes_2 = io.StringIO(
"""\
id is_sample time population individual metadata
0 1 0.000000 -1 -1 e30=
1 1 0.000000 -1 -1 e30=
2 1 0.000000 -1 -1 e30=
3 1 0.000000 -1 -1 e30=
4 1 0.000000 -1 -1 e30=
5 1 0.000000 -1 -1 e30=
6 1 0.000000 -1 -1 e30=
7 1 0.000000 -1 -1 e30=
8 1 0.000000 -1 -1 e30=
9 1 0.000000 -1 -1 e30=
10 1 0.000000 -1 -1 e30=
11 1 0.000000 -1 -1 e30=
12 1 0.000000 -1 -1 e30=
13 1 0.000000 -1 -1 e30=
14 1 0.000000 -1 -1 e30=
15 0 2.000000 -1 -1
16 0 2.000000 -1 -1
17 0 3.000000 -1 -1
18 0 3.000000 -1 -1
19 0 4.000000 -1 -1
20 0 4.000000 -1 -1
21 0 11.000000 -1 -1
22 0 12.000000 -1 -1
"""
)
edges_2 = io.StringIO(
"""\
left right parent child
0.000000 10000.000000 15 12
0.000000 10000.000000 15 14
0.000000 10000.000000 16 0
0.000000 10000.000000 16 7
0.000000 10000.000000 17 6
0.000000 10000.000000 17 15
0.000000 10000.000000 18 4
0.000000 10000.000000 18 8
0.000000 10000.000000 18 13
0.000000 10000.000000 19 11
0.000000 10000.000000 19 18
0.000000 10000.000000 20 1
0.000000 10000.000000 20 5
0.000000 10000.000000 20 9
0.000000 10000.000000 20 10
0.000000 10000.000000 21 2
0.000000 10000.000000 21 3
0.000000 10000.000000 21 16
0.000000 10000.000000 21 17
0.000000 10000.000000 21 20
0.000000 10000.000000 22 19
0.000000 10000.000000 22 21
"""
)
ts_2 = tskit.load_text(
nodes_2, edges_2, sequence_length=10000, strict=False, base64_metadata=False
)
tree_1 = next(ts_1.trees(sample_lists=True))
tree_2 = next(ts_2.trees(sample_lists=True))
self.verify_result(tree_1, tree_2, 0, 9.434, places=3)
self.verify_result(tree_1, tree_2, 1, 44, places=1)
def test_multiple_roots(self):
tables = tskit.TableCollection(sequence_length=1.0)
# Nodes
sv = [True, True]
tv = [0.0, 0.0]
for is_sample, t in zip(sv, tv):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables.nodes.add_row(flags=flags, time=t)
ts = tables.tree_sequence()
with pytest.raises(ValueError):
naive_kc_distance(ts.first(), ts.first(), 0)
with pytest.raises(ValueError):
c_kc_distance(ts.first(), ts.first(), 0)
with pytest.raises(_tskit.LibraryError):
ts.first().kc_distance(ts.first(), 0)
def do_kc_distance(self, t1, t2, lambda_=0):
kc1 = naive_kc_distance(t1, t2, lambda_)
kc2 = c_kc_distance(t1, t2, lambda_)
kc3 = t1.kc_distance(t2, lambda_)
self.assertAlmostEqual(kc1, kc2)
self.assertAlmostEqual(kc1, kc3)
kc1 = naive_kc_distance(t2, t1, lambda_)
kc2 = c_kc_distance(t2, t1, lambda_)
kc3 = t2.kc_distance(t1, lambda_)
self.assertAlmostEqual(kc1, kc2)
self.assertAlmostEqual(kc1, kc3)
def test_non_initial_samples(self):
ts1 = msprime.simulate(10, random_seed=1)
nmap = np.arange(0, ts1.num_nodes)[::-1]
ts2 = tsutil.permute_nodes(ts1, nmap)
t1 = next(ts2.trees(sample_lists=True))
t2 = next(ts2.trees(sample_lists=True))
self.do_kc_distance(t1, t2)
def test_internal_samples(self):
ts1 = msprime.simulate(10, random_seed=1)
ts2 = tsutil.jiggle_samples(ts1)
t1 = next(ts2.trees(sample_lists=True))
t2 = next(ts2.trees(sample_lists=True))
naive_kc_distance(t1, t2)
c_kc_distance(t1, t2)
t1.kc_distance(t2)
def test_root_sample(self):
tables1 = tskit.TableCollection(sequence_length=1.0)
tables1.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
only_root = next(tables1.tree_sequence().trees(sample_lists=True))
assert only_root.kc_distance(only_root) == 0
assert only_root.kc_distance(only_root, lambda_=1) == 0
def test_non_sample_leaf(self):
tables = tskit.TableCollection(sequence_length=1.0)
c1 = tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
c2 = tables.nodes.add_row(time=0)
p = tables.nodes.add_row(time=1)
tables.edges.add_row(left=0, right=1, parent=p, child=c1)
tables.edges.add_row(left=0, right=1, parent=p, child=c2)
ts = tables.tree_sequence()
tree = next(ts.trees(sample_lists=True))
assert ts.kc_distance(ts) == 0
assert tree.kc_distance(tree) == 0
# mirrored
tables = tskit.TableCollection(sequence_length=1.0)
c1 = tables.nodes.add_row(time=0)
c2 = tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
p = tables.nodes.add_row(time=1)
tables.edges.add_row(left=0, right=1, parent=p, child=c1)
tables.edges.add_row(left=0, right=1, parent=p, child=c2)
ts = tables.tree_sequence()
tree = next(ts.trees(sample_lists=True))
assert ts.kc_distance(ts) == 0
assert tree.kc_distance(tree) == 0
def test_ignores_subtrees_with_no_samples(self):
nodes_1 = io.StringIO(
"""\
id is_sample time population individual metadata
0 0 0.000000 0 -1
1 0 0.000000 0 -1
2 0 0.000000 0 -1
3 1 0.000000 0 -1
4 0 0.000000 0 -1
5 0 0.000000 0 -1
6 1 1.000000 0 -1
7 1 2.000000 0 -1
8 0 2.000000 0 -1
9 0 3.000000 0 -1
"""
)
edges_1 = io.StringIO(
"""\
left right parent child
0.000000 1.000000 6 0
0.000000 1.000000 6 1
0.000000 1.000000 7 2
0.000000 1.000000 7 6
0.000000 1.000000 8 4
0.000000 1.000000 8 5
0.000000 1.000000 9 3
0.000000 1.000000 9 7
0.000000 1.000000 9 8
"""
)
redundant = tskit.load_text(
nodes_1, edges_1, sequence_length=1, strict=False, base64_metadata=False
)
nodes_2 = io.StringIO(
"""\
id is_sample time population individual metadata
0 0 0.000000 0 -1
1 0 0.000000 0 -1
2 0 0.000000 0 -1
3 1 0.000000 0 -1
4 0 0.000000 0 -1
5 0 0.000000 0 -1
6 1 1.000000 0 -1
7 1 2.000000 0 -1
8 0 2.000000 0 -1
9 0 3.000000 0 -1
"""
)
edges_2 = io.StringIO(
"""\
left right parent child
0.000000 1.000000 7 2
0.000000 1.000000 7 6
0.000000 1.000000 9 3
0.000000 1.000000 9 7
"""
)
simplified = tskit.load_text(
nodes_2, edges_2, sequence_length=1, strict=False, base64_metadata=False
)
assert redundant.kc_distance(simplified, 0) == 0
assert redundant.kc_distance(simplified, 1) == 0
def ts_kc_distance(ts1, ts2, lambda_=0):
check_kc_tree_sequence_inputs(ts1, ts2)
total = 0
left = 0
tree1_iter = ts1.trees(sample_lists=True)
tree1 = next(tree1_iter)
for tree2 in ts2.trees(sample_lists=True):
while tree1.interval.right < tree2.interval.right:
span = tree1.interval.right - left
total += tree1.kc_distance(tree2, lambda_) * span
left = tree1.interval.right
tree1 = next(tree1_iter)
span = tree2.interval.right - left
left = tree2.interval.right
total += tree1.kc_distance(tree2, lambda_) * span
return total / ts1.sequence_length
def ts_kc_distance_incremental(ts1, ts2, lambda_=0):
check_kc_tree_sequence_inputs(ts1, ts2)
sample_maps = [dict(), dict()]
for i, ts in enumerate([ts1, ts2]):
for j, u in enumerate(ts.samples()):
sample_maps[i][u] = j
total = 0
left = 0
t1_vecs = KCVectors(ts1.num_samples)
t2_vecs = KCVectors(ts2.num_samples)
t1_depths = np.zeros(ts1.num_nodes)
t2_depths = np.zeros(ts2.num_nodes)
edge_diffs_iter_1 = ts1.edge_diffs()
tree_iter_1 = ts1.trees(sample_lists=True)
t1, t1_diffs = next(tree_iter_1), next(edge_diffs_iter_1)
update_kc_incremental(t1, t1_vecs, t1_diffs, sample_maps[0], t1_depths)
for t2, t2_diffs in zip(ts2.trees(sample_lists=True), ts2.edge_diffs()):
update_kc_incremental(t2, t2_vecs, t2_diffs, sample_maps[1], t2_depths)
while t1_diffs[0][1] < t2_diffs[0][1]:
span = t1_diffs[0][1] - left
total += norm_kc_vectors(t1_vecs, t2_vecs, lambda_) * span
left = t1_diffs[0][1]
t1, t1_diffs = next(tree_iter_1), next(edge_diffs_iter_1)
update_kc_incremental(t1, t1_vecs, t1_diffs, sample_maps[0], t1_depths)
span = t2_diffs[0][1] - left
left = t2_diffs[0][1]
total += norm_kc_vectors(t1_vecs, t2_vecs, lambda_) * span
return total / ts1.sequence_length
# tree is the result of removing/inserting the edges in edge_diffs
def update_kc_incremental(tree, kc, edge_diffs, sample_index_map, depths):
_, edges_out, edges_in = edge_diffs
# Update state of detached subtrees.
for e in reversed(edges_out):
u = e.child
depths[u] = 0
# Only update detached subtrees that remain detached. Otherwise,
# they must be reattached by an incoming edge and will be
# updated below. We're looking into the future here by seeing
# that u remains detached after all the incoming edges are
# inserted into `tree`.
if tree.parent(u) == tskit.NULL:
update_kc_subtree_state(tree, kc, u, sample_index_map, depths)
# Propagate state change down into reattached subtrees.
for e in reversed(edges_in):
u = e.child
assert depths[u] == 0
depths[u] = depths[e.parent] + 1
update_kc_subtree_state(tree, kc, u, sample_index_map, depths)
# The per-leaf elements of KC only change when the edge directly
# above the leaf changes, so are handled separately from the
# propagated state used for leaf-pair elements.
if tree.is_leaf(u):
time = tree.branch_length(u)
update_kc_vectors_single_leaf(kc, u, time, sample_index_map)
def update_kc_subtree_state(tree, kc, u, sample_index_map, depths):
"""
Update the depths of the nodes in this subtree. When a leaf is hit,
update the KC vector elements associated with that leaf.
"""
stack = [u]
while len(stack) > 0:
v = stack.pop()
if tree.is_leaf(v):
update_kc_pairs_with_leaf(tree, kc, v, sample_index_map, depths)
else:
c = tree.left_child(v)
while c != -1:
# Terminate iteration at nodes that are currently considered
# roots by the edge diffs. Nodes with a depth of 0 are
# temporary root nodes made by breaking an outgoing edge
# that have yet to be inserted by a later incoming edge.
if depths[c] != 0:
depths[c] = depths[v] + 1
stack.append(c)
c = tree.right_sib(c)
def update_kc_pairs_with_leaf(tree, kc, leaf, sample_index_map, depths):
"""
Perform an upward traversal from `leaf` to the root, updating the KC
vector elements for pairs of `leaf` with every other leaf in the tree.
"""
root_time = tree.time(tree.root)
p = tree.parent(leaf)
c = leaf
while p != -1:
time = root_time - tree.time(p)
depth = depths[p]
for sibling in tree.children(p):
if sibling != c:
update_kc_vectors_all_pairs(tree, kc, leaf, sibling, depth, time)
c, p = p, tree.parent(p)
def check_kc_tree_sequence_inputs(ts1, ts2):
if not np.array_equal(ts1.samples(), ts2.samples()):
raise ValueError("Trees must have the same samples")
if ts1.sequence_length != ts2.sequence_length:
raise ValueError("Can't compare with sequences of different lengths")
tree1_iter = ts1.trees(sample_lists=True)
tree1 = next(tree1_iter)
for tree2 in ts2.trees(sample_lists=True):
while tree1.interval.right < tree2.interval.right:
check_kc_tree_inputs(tree1, tree2)
tree1 = next(tree1_iter)
check_kc_tree_inputs(tree1, tree2)
def check_kc_tree_inputs(tree1, tree2):
if not len(tree1.roots) == len(tree2.roots) == 1:
raise ValueError("Trees must have one root")
for tree in [tree1, tree2]:
for u in tree.nodes():
if tree.num_children(u) == 1:
raise ValueError("Unary nodes are not supported")
class TestKCSequenceMetric(unittest.TestCase):
"""
Tests the KC Metric on a tree sequence.
"""
def test_0_distance_from_self(self):
ts = msprime.simulate(10)
assert ts_kc_distance(ts, ts) == 0
def verify_errors(self, ts1, ts2):
with pytest.raises(ValueError):
ts_kc_distance(ts1, ts2)
with pytest.raises(ValueError):
ts_kc_distance_incremental(ts1, ts2)
with pytest.raises(_tskit.LibraryError):
ts1.kc_distance(ts2)
def test_errors_diff_seq_length(self):
ts1 = msprime.simulate(10, length=1)
ts2 = msprime.simulate(10, length=2)
self.verify_errors(ts1, ts2)
def test_errors_diff_num_samples(self):
ts1 = msprime.simulate(10, length=1)
ts2 = msprime.simulate(12, length=2)
self.verify_errors(ts1, ts2)
def test_errors_different_sample_lists(self):
tables_1 = tskit.TableCollection(sequence_length=2.0)
tables_2 = tskit.TableCollection(sequence_length=2.0)
sv1 = [True, True, True, False, False]
tv1 = [0.0, 0.0, 0.0, 1.0, 2.0]
sv2 = [True, True, False, False, True]
tv2 = [0.0, 0.0, 1.0, 2.0, 0.0]
for is_sample, t in zip(sv1, tv1):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables_1.nodes.add_row(flags=flags, time=t)
for is_sample, t in zip(sv2, tv2):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables_2.nodes.add_row(flags=flags, time=t)
lv = [0.0, 0.0, 0.0, 0.0]
rv = [1.0, 1.0, 1.0, 1.0]
pv1 = [3, 3, 4, 4]
cv1 = [0, 1, 2, 3]
for left, right, p, c in zip(lv, rv, pv1, cv1):
tables_1.edges.add_row(left=left, right=right, parent=p, child=c)
pv2 = [2, 2, 3, 3]
cv2 = [0, 1, 2, 4]
for left, right, p, c in zip(lv, rv, pv2, cv2):
tables_2.edges.add_row(left=left, right=right, parent=p, child=c)
ts1 = tables_1.tree_sequence()
ts2 = tables_2.tree_sequence()
self.verify_errors(ts1, ts2)
unsimplified_ts = msprime.simulate(
10, random_seed=1, recombination_rate=10, record_full_arg=True
)
self.verify_errors(unsimplified_ts, unsimplified_ts)
def test_errors_unary_nodes(self):
tables = tskit.TableCollection(sequence_length=2.0)
sv = [True, False, False]
tv = [0.0, 1.0, 2.0]
for is_sample, t in zip(sv, tv):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables.nodes.add_row(flags=flags, time=t)
lv = [0.0, 0.0, 0.0]
rv = [1.0, 1.0, 1.0]
pv = [1, 2]
cv = [0, 1]
for left, right, p, c in zip(lv, rv, pv, cv):
tables.edges.add_row(left=left, right=right, parent=p, child=c)
ts = tables.tree_sequence()
self.verify_errors(ts, ts)
def test_errors_different_samples(self):
ts1 = msprime.simulate(10, random_seed=1)
ts2 = tsutil.jiggle_samples(ts1)
self.verify_errors(ts1, ts2)
def verify_result(self, ts1, ts2, lambda_, result, places=None):
kc1 = ts_kc_distance(ts1, ts2, lambda_)
kc2 = ts_kc_distance_incremental(ts1, ts2, lambda_)
kc3 = ts1.kc_distance(ts2, lambda_)
self.assertAlmostEqual(kc1, result, places=places)
self.assertAlmostEqual(kc2, result, places=places)
self.assertAlmostEqual(kc3, result, places=places)
kc1 = ts_kc_distance(ts2, ts1, lambda_)
kc2 = ts_kc_distance_incremental(ts2, ts1, lambda_)
kc3 = ts2.kc_distance(ts1, lambda_)
self.assertAlmostEqual(kc1, result, places=places)
self.assertAlmostEqual(kc2, result, places=places)
self.assertAlmostEqual(kc3, result, places=places)
def verify_same_kc(self, ts1, ts2, lambda_=0):
kc1 = ts_kc_distance(ts1, ts2, lambda_)
kc2 = ts_kc_distance_incremental(ts1, ts2, lambda_)
kc3 = ts1.kc_distance(ts2, lambda_)
self.assertAlmostEqual(kc1, kc2)
self.assertAlmostEqual(kc2, kc3)
kc1 = ts_kc_distance(ts2, ts1, lambda_)
kc2 = ts_kc_distance_incremental(ts2, ts1, lambda_)
kc3 = ts2.kc_distance(ts1, lambda_)
self.assertAlmostEqual(kc1, kc2)
self.assertAlmostEqual(kc2, kc3)
def validate_trees(self, n):
for seed in range(1, 10):
ts1 = msprime.simulate(n, random_seed=seed, recombination_rate=1)
ts2 = msprime.simulate(n, random_seed=seed + 1, recombination_rate=1)
self.verify_same_kc(ts2, ts1)
self.verify_same_kc(ts1, ts2)
self.verify_same_kc(ts1, ts1) # Test sequences with equal breakpoints
def test_sample_5(self):
self.validate_trees(5)
def test_sample_10(self):
self.validate_trees(10)
def test_sample_20(self):
self.validate_trees(20)
def validate_nonbinary_trees(self, n):
demographic_events = [
msprime.SimpleBottleneck(0.02, 0, proportion=0.25),
msprime.SimpleBottleneck(0.2, 0, proportion=1),
]
for seed in range(1, 10):
ts1 = msprime.simulate(
n,
random_seed=seed,
demographic_events=demographic_events,
recombination_rate=1,
)
# Check if this is really nonbinary
found = False
for edgeset in ts1.edgesets():
if len(edgeset.children) > 2:
found = True
break
assert found
ts2 = msprime.simulate(
n,
random_seed=seed + 1,
demographic_events=demographic_events,
recombination_rate=1,
)
self.verify_same_kc(ts1, ts2)
# compare to a binary tree also
ts2 = msprime.simulate(n, recombination_rate=1, random_seed=seed + 1)
self.verify_same_kc(ts1, ts2)
def test_non_binary_sample_10(self):
self.validate_nonbinary_trees(10)
def test_non_binary_sample_20(self):
self.validate_nonbinary_trees(20)
def test_permit_internal_samples(self):
tables = tskit.TableCollection(1.0)
tables.nodes.add_row(flags=1)
tables.nodes.add_row(flags=1)
tables.nodes.add_row(flags=1, time=1)
tables.edges.add_row(0, 1, 2, 0)
tables.edges.add_row(0, 1, 2, 1)
ts = tables.tree_sequence()
assert ts.kc_distance(ts) == 0
assert ts_kc_distance_incremental(ts, ts) == 0
def test_known_kc_sample_trees_different_shapes(self):
tables_1 = tskit.TableCollection(sequence_length=2.0)
tables_2 = tskit.TableCollection(sequence_length=2.0)
# Nodes
sv = [True, True, True, True, False, False, False]
tv = [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]
for is_sample, t in zip(sv, tv):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables_1.nodes.add_row(flags=flags, time=t)
tables_2.nodes.add_row(flags=flags, time=t)
# First tree edges
pv1 = [4, 4, 5, 5, 6, 6, 5, 6]
cv1 = [2, 3, 1, 4, 0, 5, 0, 4]
lv1 = [0, 0, 0, 0, 0, 0, 1, 1]
rv1 = [2, 2, 2, 1, 1, 2, 2, 2]
# Second tree edges
pv2 = [4, 4, 5, 5, 6, 6, 5, 6]
cv2 = [2, 3, 0, 1, 4, 5, 4, 0]
lv2 = [0, 0, 0, 0, 0, 0, 1, 1]
rv2 = [2, 2, 1, 2, 1, 2, 2, 2]
for left, right, p, c in zip(lv1, rv1, pv1, cv1):
tables_1.edges.add_row(left=left, right=right, parent=p, child=c)
for left, right, p, c in zip(lv2, rv2, pv2, cv2):
tables_2.edges.add_row(left=left, right=right, parent=p, child=c)
tables_1.sort()
tables_2.sort()
ts_1 = tables_1.tree_sequence()
ts_2 = tables_2.tree_sequence()
self.verify_result(ts_1, ts_2, 0, 2.0)
def test_known_kc_sample_trees_same_shape_different_times(self):
tables_1 = tskit.TableCollection(sequence_length=1.0)
tables_2 = tskit.TableCollection(sequence_length=1.0)
# Nodes
sv = [True, True, True, False, False]
tv_1 = [0.0, 0.0, 0.0, 2.0, 3.0]
tv_2 = [0.0, 0.0, 0.0, 4.0, 6.0]
for is_sample, t1, t2 in zip(sv, tv_1, tv_2):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables_1.nodes.add_row(flags=flags, time=t1)
tables_2.nodes.add_row(flags=flags, time=t2)
# Edges
lv = [0.0, 0.0, 0.0, 0.0]
rv = [1.0, 1.0, 1.0, 1.0]
pv = [3, 3, 4, 4]
cv = [0, 1, 2, 3]
for left, right, p, c in zip(lv, rv, pv, cv):
tables_1.edges.add_row(left=left, right=right, parent=p, child=c)
tables_2.edges.add_row(left=left, right=right, parent=p, child=c)
ts_1 = tables_1.tree_sequence()
ts_2 = tables_2.tree_sequence()
self.verify_result(ts_1, ts_2, 0, 0)
self.verify_result(ts_1, ts_2, 1, 4.243, places=3)
def test_known_kc_same_tree_twice_same_metric(self):
tables_1 = tskit.TableCollection(sequence_length=2.0)
tables_2 = tskit.TableCollection(sequence_length=2.0)
# Nodes
sv = [True, True, True, False, False]
tv_1 = [0.0, 0.0, 0.0, 2.0, 3.0]
tv_2 = [0.0, 0.0, 0.0, 4.0, 6.0]
for is_sample, t1, t2 in zip(sv, tv_1, tv_2):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables_1.nodes.add_row(flags=flags, time=t1)
tables_2.nodes.add_row(flags=flags, time=t2)
# Edges
pv = [3, 3, 4, 4]
cv = [0, 1, 2, 3]
for p, c in zip(pv, cv):
tables_1.edges.add_row(left=0, right=1, parent=p, child=c)
tables_1.edges.add_row(left=1, right=2, parent=p, child=c)
tables_2.edges.add_row(left=0, right=0.5, parent=p, child=c)
tables_2.edges.add_row(left=0.5, right=2, parent=p, child=c)
ts_1 = tables_1.tree_sequence()
ts_2 = tables_2.tree_sequence()
self.verify_result(ts_1, ts_2, 0, 0)
self.verify_result(ts_1, ts_2, 1, 4.243, places=3)
def test_remove_root(self):
tables_1 = tskit.TableCollection(sequence_length=10.0)
tables_2 = tskit.TableCollection(sequence_length=10.0)
# Nodes
sv1 = [True, True, True, True, True, False, False, False, False, False]
tv1 = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
sv2 = [True, True, True, True, True, False, False, False, False]
tv2 = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0]
for is_sample, t in zip(sv1, tv1):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables_1.nodes.add_row(flags=flags, time=t)
for is_sample, t in zip(sv2, tv2):
flags = tskit.NODE_IS_SAMPLE if is_sample else 0
tables_2.nodes.add_row(flags=flags, time=t)
# Edges
pv1 = [5, 5, 6, 6, 7, 7, 8, 8, 8, 9, 9]
cv1 = [0, 1, 3, 4, 2, 5, 2, 6, 7, 5, 8]
lv1 = [0, 0, 0, 0, 5, 5, 0, 0, 5, 0, 0]
rv1 = [10, 10, 10, 10, 10, 10, 5, 10, 10, 5, 5]
pv2 = [5, 5, 6, 6, 7, 7, 8, 8]
cv2 = [0, 1, 2, 3, 4, 5, 6, 7]
lv2 = [0, 0, 0, 0, 0, 0, 0, 0]
rv2 = [10, 10, 10, 10, 10, 10, 10, 10]
for p, c, l, r in zip(pv1, cv1, lv1, rv1):
tables_1.edges.add_row(left=l, right=r, parent=p, child=c)
for p, c, l, r in zip(pv2, cv2, lv2, rv2):
tables_2.edges.add_row(left=l, right=r, parent=p, child=c)
ts_1 = tables_1.tree_sequence()
ts_2 = tables_2.tree_sequence()
distance = (math.sqrt(8) * 5 + math.sqrt(6) * 5) / 10
self.verify_result(ts_1, ts_2, 0, distance)
def test_ignores_subtrees_with_no_samples(self):
nodes_1 = io.StringIO(
"""\
id is_sample time population individual metadata
0 0 0.000000 0 -1
1 0 0.000000 0 -1
2 0 0.000000 0 -1
3 1 0.000000 0 -1
4 0 0.000000 0 -1
5 0 0.000000 0 -1
6 1 1.000000 0 -1
7 1 2.000000 0 -1
8 0 2.000000 0 -1
9 0 3.000000 0 -1
"""
)
edges_1 = io.StringIO(
"""\
left right parent child
0.000000 1.000000 6 0
0.000000 1.000000 6 1
0.000000 1.000000 7 2
0.000000 1.000000 7 6
0.000000 1.000000 8 4
0.000000 1.000000 8 5
0.000000 1.000000 9 3
0.000000 1.000000 9 7
0.000000 1.000000 9 8
"""
)
redundant = tskit.load_text(
nodes_1, edges_1, sequence_length=1, strict=False, base64_metadata=False
)
nodes_2 = io.StringIO(
"""\
id is_sample time population individual metadata
0 0 0.000000 0 -1
1 0 0.000000 0 -1
2 0 0.000000 0 -1
3 1 0.000000 0 -1
4 0 0.000000 0 -1
5 0 0.000000 0 -1
6 1 1.000000 0 -1
7 1 2.000000 0 -1
8 0 2.000000 0 -1
9 0 3.000000 0 -1
"""
)
edges_2 = io.StringIO(
"""\
left right parent child
0.000000 1.000000 7 2
0.000000 1.000000 7 6
0.000000 1.000000 9 3
0.000000 1.000000 9 7
"""
)
simplified = tskit.load_text(
nodes_2, edges_2, sequence_length=1, strict=False, base64_metadata=False
)
t1 = next(redundant.trees(sample_lists=True))
t2 = next(simplified.trees(sample_lists=True))
assert t1.kc_distance(t2, 0) == 0
assert t1.kc_distance(t2, 1) == 0
class TestOverlappingSegments:
"""
Tests for the overlapping segments algorithm required for simplify.
This test probably belongs somewhere else.
"""
def test_random(self):
segs = generate_segments(10, 20, 1)
for left, right, X in tests.overlapping_segments(segs):
assert right > left
assert len(X) > 0
def test_empty(self):
ret = list(tests.overlapping_segments([]))
assert len(ret) == 0
def test_single_interval(self):
for j in range(1, 10):
segs = [tests.Segment(0, 1, j) for _ in range(j)]
ret = list(tests.overlapping_segments(segs))
assert len(ret) == 1
left, right, X = ret[0]
assert left == 0
assert right == 1
assert sorted(segs) == sorted(X)
def test_stairs_down(self):
segs = [tests.Segment(0, 1, 0), tests.Segment(0, 2, 1), tests.Segment(0, 3, 2)]
ret = list(tests.overlapping_segments(segs))
assert len(ret) == 3
left, right, X = ret[0]
assert left == 0
assert right == 1
assert sorted(X) == sorted(segs)
left, right, X = ret[1]
assert left == 1
assert right == 2
assert sorted(X) == sorted(segs[1:])
left, right, X = ret[2]
assert left == 2
assert right == 3
assert sorted(X) == sorted(segs[2:])
def test_stairs_up(self):
segs = [tests.Segment(0, 3, 0), tests.Segment(1, 3, 1), tests.Segment(2, 3, 2)]
ret = list(tests.overlapping_segments(segs))
assert len(ret) == 3
left, right, X = ret[0]
assert left == 0
assert right == 1
assert X == segs[:1]
left, right, X = ret[1]
assert left == 1
assert right == 2
assert sorted(X) == sorted(segs[:2])
left, right, X = ret[2]
assert left == 2
assert right == 3
assert sorted(X) == sorted(segs)
def test_pyramid(self):
segs = [tests.Segment(0, 5, 0), tests.Segment(1, 4, 1), tests.Segment(2, 3, 2)]
ret = list(tests.overlapping_segments(segs))
assert len(ret) == 5
left, right, X = ret[0]
assert left == 0
assert right == 1
assert X == segs[:1]
left, right, X = ret[1]
assert left == 1
assert right == 2
assert sorted(X) == sorted(segs[:2])
left, right, X = ret[2]
assert left == 2
assert right == 3
assert sorted(X) == sorted(segs)
left, right, X = ret[3]
assert left == 3
assert right == 4
assert sorted(X) == sorted(segs[:2])
left, right, X = ret[4]
assert left == 4
assert right == 5
assert sorted(X) == sorted(segs[:1])
def test_gap(self):
segs = [tests.Segment(0, 2, 0), tests.Segment(3, 4, 1)]
ret = list(tests.overlapping_segments(segs))
assert len(ret) == 2
left, right, X = ret[0]
assert left == 0
assert right == 2
assert X == segs[:1]
left, right, X = ret[1]
assert left == 3
assert right == 4
assert X == segs[1:]
class TopologyTestCase:
"""
Superclass of test cases containing common utilities.
"""
random_seed = 123456
def assert_haplotypes_equal(self, ts1, ts2):
h1 = list(ts1.haplotypes())
h2 = list(ts2.haplotypes())
assert h1 == h2
def assert_variants_equal(self, ts1, ts2):
v1 = list(ts1.variants(as_bytes=True))
v2 = list(ts2.variants(as_bytes=True))
assert v1 == v2
def check_num_samples(self, ts, x):
"""
Compare against x, a list of tuples of the form
`(tree number, parent, number of samples)`.
"""
k = 0
tss = ts.trees()
t = next(tss)
for j, node, nl in x:
while k < j:
t = next(tss)
k += 1
assert nl == t.num_samples(node)
def check_num_tracked_samples(self, ts, tracked_samples, x):
k = 0
tss = ts.trees(tracked_samples=tracked_samples)
t = next(tss)
for j, node, nl in x:
while k < j:
t = next(tss)
k += 1
assert nl == t.num_tracked_samples(node)
def check_sample_iterator(self, ts, x):
"""
Compare against x, a list of tuples of the form
`(tree number, node, sample ID list)`.
"""
k = 0
tss = ts.trees(sample_lists=True)
t = next(tss)
for j, node, samples in x:
while k < j:
t = next(tss)
k += 1
for u, v in zip(samples, t.samples(node)):
assert u == v
class TestZeroRoots:
"""
Tests that for the case in which we have zero samples and therefore
zero roots in our trees.
"""
def remove_samples(self, ts):
tables = ts.dump_tables()
tables.nodes.flags = np.zeros_like(tables.nodes.flags)
return tables.tree_sequence()
def verify(self, ts, no_root_ts):
assert ts.num_trees == no_root_ts.num_trees
for tree, no_root in zip(ts.trees(), no_root_ts.trees()):
assert no_root.num_roots == 0
assert no_root.left_root == tskit.NULL
assert no_root.roots == []
assert tree.parent_dict == no_root.parent_dict
def test_single_tree(self):
ts = msprime.simulate(10, random_seed=1)
no_root_ts = self.remove_samples(ts)
assert ts.num_trees == 1
self.verify(ts, no_root_ts)
def test_multiple_trees(self):
ts = msprime.simulate(10, recombination_rate=2, random_seed=1)
no_root_ts = self.remove_samples(ts)
assert ts.num_trees > 1
self.verify(ts, no_root_ts)
class TestEmptyTreeSequences(TopologyTestCase):
"""
Tests covering tree sequences that have zero edges.
"""
def test_zero_nodes(self):
tables = tskit.TableCollection(1)
ts = tables.tree_sequence()
assert ts.sequence_length == 1
assert ts.num_trees == 1
assert ts.num_nodes == 0
assert ts.num_edges == 0
t = next(ts.trees())
assert t.index == 0
assert t.left_root == tskit.NULL
assert t.interval == (0, 1)
assert t.roots == []
assert t.root == tskit.NULL
assert t.parent_dict == {}
assert list(t.nodes()) == []
assert list(ts.haplotypes()) == []
assert list(ts.variants()) == []
methods = [t.parent, t.left_child, t.right_child, t.left_sib, t.right_sib]
for method in methods:
for u in [-1, 0, 1, 100]:
with pytest.raises(ValueError):
method(u)
tsp = ts.simplify()
assert tsp.num_nodes == 0
assert tsp.num_edges == 0
def test_one_node_zero_samples(self):
tables = tskit.TableCollection(sequence_length=1)
tables.nodes.add_row(time=0, flags=0)
# Without a sequence length this should fail.
ts = tables.tree_sequence()
assert ts.sequence_length == 1
assert ts.num_trees == 1
assert ts.num_nodes == 1
assert ts.sample_size == 0
assert ts.num_edges == 0
assert ts.num_sites == 0
assert ts.num_mutations == 0
t = next(ts.trees())
assert t.index == 0
assert t.left_root == tskit.NULL
assert t.interval == (0, 1)
assert t.roots == []
assert t.root == tskit.NULL
assert t.parent_dict == {}
assert list(t.nodes()) == []
assert list(ts.haplotypes()) == []
assert list(ts.variants()) == []
methods = [t.parent, t.left_child, t.right_child, t.left_sib, t.right_sib]
for method in methods:
assert method(0) == tskit.NULL
for u in [-1, 1, 100]:
with pytest.raises(ValueError):
method(u)
def test_one_node_zero_samples_sites(self):
tables = tskit.TableCollection(sequence_length=1)
tables.nodes.add_row(time=0, flags=0)
tables.sites.add_row(position=0.5, ancestral_state="0")
tables.mutations.add_row(site=0, derived_state="1", node=0)
ts = tables.tree_sequence()
assert ts.sequence_length == 1
assert ts.num_trees == 1
assert ts.num_nodes == 1
assert ts.sample_size == 0
assert ts.num_edges == 0
assert ts.num_sites == 1
assert ts.num_mutations == 1
t = next(ts.trees())
assert t.index == 0
assert t.left_root == tskit.NULL
assert t.interval == (0, 1)
assert t.roots == []
assert t.root == tskit.NULL
assert t.parent_dict == {}
assert len(list(t.sites())) == 1
assert list(t.nodes()) == []
assert list(ts.haplotypes()) == []
assert len(list(ts.variants())) == 1
tsp = ts.simplify()
assert tsp.num_nodes == 0
assert tsp.num_edges == 0
def test_one_node_one_sample(self):
tables = tskit.TableCollection(sequence_length=1)
tables.nodes.add_row(time=0, flags=tskit.NODE_IS_SAMPLE)
ts = tables.tree_sequence()
assert ts.sequence_length == 1
assert ts.num_trees == 1
assert ts.num_nodes == 1
assert ts.sample_size == 1
assert ts.num_edges == 0
t = next(ts.trees())
assert t.index == 0
assert t.left_root == 0
assert t.interval == (0, 1)
assert t.roots == [0]
assert t.root == 0
assert t.parent_dict == {}
assert list(t.nodes()) == [0]
assert list(ts.haplotypes(isolated_as_missing=False)) == [""]
assert list(ts.variants()) == []
methods = [t.parent, t.left_child, t.right_child, t.left_sib, t.right_sib]
for method in methods:
assert method(0) == tskit.NULL
for u in [-1, 1, 100]:
with pytest.raises(ValueError):
method(u)
tsp = ts.simplify()
assert tsp.num_nodes == 1
assert tsp.num_edges == 0
def test_one_node_one_sample_sites(self):
tables = tskit.TableCollection(sequence_length=1)
tables.nodes.add_row(time=0, flags=tskit.NODE_IS_SAMPLE)
tables.sites.add_row(position=0.5, ancestral_state="0")
tables.mutations.add_row(site=0, derived_state="1", node=0)
ts = tables.tree_sequence()
assert ts.sequence_length == 1
assert ts.num_trees == 1
assert ts.num_nodes == 1
assert ts.sample_size == 1
assert ts.num_edges == 0
assert ts.num_sites == 1
assert ts.num_mutations == 1
t = next(ts.trees())
assert t.index == 0
assert t.left_root == 0
assert t.interval == (0, 1)
assert t.roots == [0]
assert t.root == 0
assert t.parent_dict == {}
assert list(t.nodes()) == [0]
assert list(ts.haplotypes(isolated_as_missing=False)) == ["1"]
assert len(list(ts.variants())) == 1
methods = [t.parent, t.left_child, t.right_child, t.left_sib, t.right_sib]
for method in methods:
assert method(0) == tskit.NULL
for u in [-1, 1, 100]:
with pytest.raises(ValueError):
method(u)
tsp = ts.simplify(filter_sites=False)
assert tsp.num_nodes == 1
assert tsp.num_edges == 0
assert tsp.num_sites == 1
class TestHoleyTreeSequences(TopologyTestCase):
"""
Tests for tree sequences in which we have partial (or no) trees defined
over some of the sequence.
"""
def verify_trees(self, ts, expected):
observed = []
for t in ts.trees():
observed.append((t.interval, t.parent_dict))
assert expected == observed
# Test simple algorithm also.
observed = []
for interval, parent in tsutil.algorithm_T(ts):
parent_dict = {j: parent[j] for j in range(ts.num_nodes) if parent[j] >= 0}
observed.append((interval, parent_dict))
assert expected == observed
def verify_zero_roots(self, ts):
for tree in ts.trees():
assert tree.num_roots == 0
assert tree.left_root == tskit.NULL
assert tree.roots == []
def test_simple_hole(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0
2 3 2 0
0 1 2 1
2 3 2 1
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
expected = [((0, 1), {0: 2, 1: 2}), ((1, 2), {}), ((2, 3), {0: 2, 1: 2})]
self.verify_trees(ts, expected)
def test_simple_hole_zero_roots(self):
nodes = io.StringIO(
"""\
id is_sample time
0 0 0
1 0 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0
2 3 2 0
0 1 2 1
2 3 2 1
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
expected = [((0, 1), {0: 2, 1: 2}), ((1, 2), {}), ((2, 3), {0: 2, 1: 2})]
self.verify_trees(ts, expected)
self.verify_zero_roots(ts)
def test_initial_gap(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
1 2 2 0,1
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
expected = [((0, 1), {}), ((1, 2), {0: 2, 1: 2})]
self.verify_trees(ts, expected)
def test_initial_gap_zero_roots(self):
nodes = io.StringIO(
"""\
id is_sample time
0 0 0
1 0 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
1 2 2 0,1
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
expected = [((0, 1), {}), ((1, 2), {0: 2, 1: 2})]
self.verify_trees(ts, expected)
self.verify_zero_roots(ts)
def test_final_gap(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 2 2 0,1
"""
)
ts = tskit.load_text(nodes, edges, sequence_length=3, strict=False)
expected = [((0, 2), {0: 2, 1: 2}), ((2, 3), {})]
self.verify_trees(ts, expected)
def test_final_gap_zero_roots(self):
nodes = io.StringIO(
"""\
id is_sample time
0 0 0
1 0 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 2 2 0,1
"""
)
ts = tskit.load_text(nodes, edges, sequence_length=3, strict=False)
expected = [((0, 2), {0: 2, 1: 2}), ((2, 3), {})]
self.verify_trees(ts, expected)
self.verify_zero_roots(ts)
def test_initial_and_final_gap(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
1 2 2 0,1
"""
)
ts = tskit.load_text(nodes, edges, sequence_length=3, strict=False)
expected = [((0, 1), {}), ((1, 2), {0: 2, 1: 2}), ((2, 3), {})]
self.verify_trees(ts, expected)
def test_initial_and_final_gap_zero_roots(self):
nodes = io.StringIO(
"""\
id is_sample time
0 0 0
1 0 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
1 2 2 0,1
"""
)
ts = tskit.load_text(nodes, edges, sequence_length=3, strict=False)
expected = [((0, 1), {}), ((1, 2), {0: 2, 1: 2}), ((2, 3), {})]
self.verify_trees(ts, expected)
self.verify_zero_roots(ts)
class TestTsinferExamples(TopologyTestCase):
"""
Test cases on troublesome topology examples that arose from tsinfer.
"""
def test_no_last_tree(self):
# The last tree was not being generated here because of a bug in
# the low-level tree generation code.
nodes = io.StringIO(
"""\
id is_sample population time
0 1 -1 3.00000000000000
1 1 -1 2.00000000000000
2 1 -1 2.00000000000000
3 1 -1 2.00000000000000
4 1 -1 2.00000000000000
5 1 -1 1.00000000000000
6 1 -1 1.00000000000000
7 1 -1 1.00000000000000
8 1 -1 1.00000000000000
9 1 -1 1.00000000000000
10 1 -1 1.00000000000000
"""
)
edges = io.StringIO(
"""\
id left right parent child
0 62291.41659631 79679.17408763 1 5
1 62291.41659631 62374.60889677 1 6
2 122179.36037089 138345.43104411 1 7
3 67608.32330402 79679.17408763 1 8
4 122179.36037089 138345.43104411 1 8
5 62291.41659631 79679.17408763 1 9
6 126684.47550333 138345.43104411 1 10
7 23972.05905068 62291.41659631 2 5
8 79679.17408763 82278.53390076 2 5
9 23972.05905068 62291.41659631 2 6
10 79679.17408763 110914.43816806 2 7
11 145458.28890561 189765.31932273 2 7
12 79679.17408763 110914.43816806 2 8
13 145458.28890561 200000.00000000 2 8
14 23972.05905068 62291.41659631 2 9
15 79679.17408763 110914.43816806 2 9
16 145458.28890561 145581.18329797 2 10
17 4331.62138785 23972.05905068 3 6
18 4331.62138785 23972.05905068 3 9
19 110914.43816806 122179.36037089 4 7
20 138345.43104411 145458.28890561 4 7
21 110914.43816806 122179.36037089 4 8
22 138345.43104411 145458.28890561 4 8
23 110914.43816806 112039.30503475 4 9
24 138345.43104411 145458.28890561 4 10
25 0.00000000 200000.00000000 0 1
26 0.00000000 200000.00000000 0 2
27 0.00000000 200000.00000000 0 3
28 0.00000000 200000.00000000 0 4
"""
)
ts = tskit.load_text(nodes, edges, sequence_length=200000, strict=False)
pts = tests.PythonTreeSequence(ts)
num_trees = 0
for _ in pts.trees():
num_trees += 1
assert num_trees == ts.num_trees
n = 0
for pt, t in zip(pts.trees(), ts.trees()):
assert (pt.left, pt.right) == t.interval
for j in range(ts.num_nodes):
assert pt.parent[j] == t.parent(j)
assert pt.left_child[j] == t.left_child(j)
assert pt.right_child[j] == t.right_child(j)
assert pt.left_sib[j] == t.left_sib(j)
assert pt.right_sib[j] == t.right_sib(j)
n += 1
assert n == num_trees
intervals = [t.interval for t in ts.trees()]
assert intervals[0][0] == 0
assert intervals[-1][-1] == ts.sequence_length
class TestRecordSquashing(TopologyTestCase):
"""
Tests that we correctly squash adjacent equal records together.
"""
def test_single_record(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 1 0
1 2 1 0
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
tss, node_map = ts.simplify(map_nodes=True)
assert list(node_map) == [0, 1]
assert tss.dump_tables().nodes == ts.dump_tables().nodes
simplified_edges = list(tss.edges())
assert len(simplified_edges) == 1
e = simplified_edges[0]
assert e.left == 0
assert e.right == 2
def test_single_tree(self):
ts = msprime.simulate(10, random_seed=self.random_seed)
ts_redundant = tsutil.insert_redundant_breakpoints(ts)
tss = ts_redundant.simplify()
assert tss.dump_tables().nodes == ts.dump_tables().nodes
assert tss.dump_tables().edges == ts.dump_tables().edges
def test_many_trees(self):
ts = msprime.simulate(20, recombination_rate=5, random_seed=self.random_seed)
assert ts.num_trees > 2
ts_redundant = tsutil.insert_redundant_breakpoints(ts)
tss = ts_redundant.simplify()
assert tss.dump_tables().nodes == ts.dump_tables().nodes
assert tss.dump_tables().edges == ts.dump_tables().edges
class TestRedundantBreakpoints(TopologyTestCase):
"""
Tests for dealing with redundant breakpoints within the tree sequence.
These are records that may be squashed together into a single record.
"""
def test_single_tree(self):
ts = msprime.simulate(10, random_seed=self.random_seed)
ts_redundant = tsutil.insert_redundant_breakpoints(ts)
assert ts.sample_size == ts_redundant.sample_size
assert ts.sequence_length == ts_redundant.sequence_length
assert ts_redundant.num_trees == 2
trees = [t.parent_dict for t in ts_redundant.trees()]
assert len(trees) == 2
assert trees[0] == trees[1]
assert [t.parent_dict for t in ts.trees()][0] == trees[0]
def test_many_trees(self):
ts = msprime.simulate(20, recombination_rate=5, random_seed=self.random_seed)
assert ts.num_trees > 2
ts_redundant = tsutil.insert_redundant_breakpoints(ts)
assert ts.sample_size == ts_redundant.sample_size
assert ts.sequence_length == ts_redundant.sequence_length
assert ts_redundant.num_trees > ts.num_trees
assert ts_redundant.num_edges > ts.num_edges
redundant_trees = ts_redundant.trees()
redundant_t = next(redundant_trees)
comparisons = 0
for t in ts.trees():
while (
redundant_t is not None
and redundant_t.interval.right <= t.interval.right
):
assert t.parent_dict == redundant_t.parent_dict
comparisons += 1
redundant_t = next(redundant_trees, None)
assert comparisons == ts_redundant.num_trees
class TestUnaryNodes(TopologyTestCase):
"""
Tests for situations in which we have unary nodes in the tree sequence.
"""
def test_simple_case(self):
# Simple case where we have n = 2 and some unary nodes.
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 1
4 0 2
5 0 3
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0
0 1 3 1
0 1 4 2,3
0 1 5 4
"""
)
sites = "position ancestral_state\n"
mutations = "site node derived_state\n"
for j in range(5):
position = j * 1 / 5
sites += f"{position} 0\n"
mutations += f"{j} {j} 1\n"
ts = tskit.load_text(
nodes=nodes,
edges=edges,
sites=io.StringIO(sites),
mutations=io.StringIO(mutations),
strict=False,
)
assert ts.sample_size == 2
assert ts.num_nodes == 6
assert ts.num_trees == 1
assert ts.num_sites == 5
assert ts.num_mutations == 5
assert len(list(ts.edge_diffs())) == ts.num_trees
t = next(ts.trees())
assert t.parent_dict == {0: 2, 1: 3, 2: 4, 3: 4, 4: 5}
assert t.mrca(0, 1) == 4
assert t.mrca(0, 2) == 2
assert t.mrca(0, 4) == 4
assert t.mrca(0, 5) == 5
assert t.mrca(0, 3) == 4
H = list(ts.haplotypes())
assert H[0] == "10101"
assert H[1] == "01011"
def test_ladder_tree(self):
# We have a single tree with a long ladder of unary nodes along a path
num_unary_nodes = 30
n = 2
nodes = """\
is_sample time
1 0
1 0
"""
edges = """\
left right parent child
0 1 2 0
"""
for j in range(num_unary_nodes + 2):
nodes += "0 {}\n".format(j + 2)
for j in range(num_unary_nodes):
edges += "0 1 {} {}\n".format(n + j + 1, n + j)
root = num_unary_nodes + 3
root_time = num_unary_nodes + 3
edges += "0 1 {} 1,{}\n".format(root, num_unary_nodes + 2)
ts = tskit.load_text(io.StringIO(nodes), io.StringIO(edges), strict=False)
t = ts.first()
assert t.mrca(0, 1) == root
assert t.tmrca(0, 1) == root_time
ts_simplified, node_map = ts.simplify(map_nodes=True)
test_map = [tskit.NULL for _ in range(ts.num_nodes)]
test_map[0] = 0
test_map[1] = 1
test_map[root] = 2
assert list(node_map) == test_map
assert ts_simplified.num_edges == 2
t = ts_simplified.first()
assert t.mrca(0, 1) == 2
assert t.tmrca(0, 1) == root_time
ts_simplified = ts.simplify(keep_unary=True, record_provenance=False)
assert ts_simplified.tables == ts.tables
def verify_unary_tree_sequence(self, ts):
"""
Take the specified tree sequence and produce an equivalent in which
unary records have been interspersed, every other with an associated individual
"""
assert ts.num_trees > 2
assert ts.num_mutations > 2
tables = ts.dump_tables()
next_node = ts.num_nodes
node_times = {j: node.time for j, node in enumerate(ts.nodes())}
edges = []
for i, e in enumerate(ts.edges()):
node = ts.node(e.parent)
t = node.time - 1e-14 # Arbitrary small value.
next_node = len(tables.nodes)
indiv = tables.individuals.add_row() if i % 2 == 0 else tskit.NULL
tables.nodes.add_row(time=t, population=node.population, individual=indiv)
edges.append(
tskit.Edge(left=e.left, right=e.right, parent=next_node, child=e.child)
)
node_times[next_node] = t
edges.append(
tskit.Edge(left=e.left, right=e.right, parent=e.parent, child=next_node)
)
edges.sort(key=lambda e: node_times[e.parent])
tables.edges.reset()
for e in edges:
tables.edges.append(e)
ts_new = tables.tree_sequence()
assert ts_new.num_edges > ts.num_edges
self.assert_haplotypes_equal(ts, ts_new)
self.assert_variants_equal(ts, ts_new)
ts_simplified = ts_new.simplify()
assert list(ts_simplified.records()) == list(ts.records())
self.assert_haplotypes_equal(ts, ts_simplified)
self.assert_variants_equal(ts, ts_simplified)
assert len(list(ts.edge_diffs())) == ts.num_trees
assert 0 < ts_new.num_individuals < ts_new.num_nodes
for params in [
{"keep_unary": False, "keep_unary_in_individuals": False},
{"keep_unary": True, "keep_unary_in_individuals": False},
{"keep_unary": False, "keep_unary_in_individuals": True},
]:
s = tests.Simplifier(ts_new, ts_new.samples(), **params)
py_ts, py_node_map = s.simplify()
lib_ts, lib_node_map = ts_new.simplify(map_nodes=True, **params)
py_tables = py_ts.dump_tables()
py_tables.provenances.clear()
lib_tables = lib_ts.dump_tables()
lib_tables.provenances.clear()
assert lib_tables == py_tables
assert np.all(lib_node_map == py_node_map)
def test_binary_tree_sequence_unary_nodes(self):
ts = msprime.simulate(
20, recombination_rate=5, mutation_rate=5, random_seed=self.random_seed
)
self.verify_unary_tree_sequence(ts)
def test_nonbinary_tree_sequence_unary_nodes(self):
demographic_events = [
msprime.SimpleBottleneck(time=1.0, population=0, proportion=0.95)
]
ts = msprime.simulate(
20,
recombination_rate=10,
mutation_rate=5,
demographic_events=demographic_events,
random_seed=self.random_seed,
)
found = False
for r in ts.edgesets():
if len(r.children) > 2:
found = True
assert found
self.verify_unary_tree_sequence(ts)
class TestGeneralSamples(TopologyTestCase):
"""
Test cases in which we have samples at arbitrary nodes (i.e., not at
{0,...,n - 1}).
"""
def test_simple_case(self):
# Simple case where we have n = 3 and samples starting at n.
nodes = io.StringIO(
"""\
id is_sample time
0 0 2
1 0 1
2 1 0
3 1 0
4 1 0
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 1 2,3
0 1 0 1,4
"""
)
sites = io.StringIO(
"""\
position ancestral_state
0.1 0
0.2 0
0.3 0
0.4 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state
0 2 1
1 3 1
2 4 1
3 1 1
"""
)
ts = tskit.load_text(
nodes=nodes, edges=edges, sites=sites, mutations=mutations, strict=False
)
assert ts.sample_size == 3
assert list(ts.samples()) == [2, 3, 4]
assert ts.num_nodes == 5
assert ts.num_nodes == 5
assert ts.num_sites == 4
assert ts.num_mutations == 4
assert len(list(ts.edge_diffs())) == ts.num_trees
t = next(ts.trees())
assert t.root == 0
assert t.parent_dict == {1: 0, 2: 1, 3: 1, 4: 0}
H = list(ts.haplotypes())
assert H[0] == "1001"
assert H[1] == "0101"
assert H[2] == "0010"
tss, node_map = ts.simplify(map_nodes=True)
assert list(node_map) == [4, 3, 0, 1, 2]
# We should have the same tree sequence just with canonicalised nodes.
assert tss.sample_size == 3
assert list(tss.samples()) == [0, 1, 2]
assert tss.num_nodes == 5
assert tss.num_trees == 1
assert tss.num_sites == 4
assert tss.num_mutations == 4
assert len(list(ts.edge_diffs())) == ts.num_trees
t = next(tss.trees())
assert t.root == 4
assert t.parent_dict == {0: 3, 1: 3, 2: 4, 3: 4}
H = list(tss.haplotypes())
assert H[0] == "1001"
assert H[1] == "0101"
assert H[2] == "0010"
def verify_permuted_nodes(self, ts):
"""
Take the specified tree sequence and permute the nodes, verifying that we
get back a tree sequence with the correct properties.
"""
# Mapping from the original nodes into nodes in the new tree sequence.
node_map = list(range(ts.num_nodes))
random.shuffle(node_map)
# Change the permutation so that the relative order of samples is maintained.
# Then, we should get back exactly the same tree sequence after simplify
# and haplotypes and variants are also equal.
samples = sorted(node_map[: ts.sample_size])
node_map = samples + node_map[ts.sample_size :]
permuted = tsutil.permute_nodes(ts, node_map)
assert ts.sequence_length == permuted.sequence_length
assert list(permuted.samples()) == samples
assert list(permuted.haplotypes()) == list(ts.haplotypes())
assert [v.genotypes for v in permuted.variants(as_bytes=True)] == [
v.genotypes for v in ts.variants(as_bytes=True)
]
assert ts.num_trees == permuted.num_trees
j = 0
for t1, t2 in zip(ts.trees(), permuted.trees()):
t1_dict = {node_map[k]: node_map[v] for k, v in t1.parent_dict.items()}
assert node_map[t1.root] == t2.root
assert t1_dict == t2.parent_dict
for u1 in t1.nodes():
u2 = node_map[u1]
assert sorted([node_map[v] for v in t1.samples(u1)]) == sorted(
list(t2.samples(u2))
)
j += 1
assert j == ts.num_trees
# The simplified version of the permuted tree sequence should be in canonical
# form, and identical to the original.
simplified, s_node_map = permuted.simplify(map_nodes=True)
original_tables = ts.dump_tables()
simplified_tables = simplified.dump_tables()
original_tables.provenances.clear()
simplified_tables.provenances.clear()
assert original_tables.sequence_length == simplified_tables.sequence_length
assert original_tables.nodes == simplified_tables.nodes
assert original_tables.edges == simplified_tables.edges
assert original_tables.sites == simplified_tables.sites
assert original_tables.mutations == simplified_tables.mutations
assert original_tables.individuals == simplified_tables.individuals
assert original_tables.populations == simplified_tables.populations
assert original_tables == simplified_tables
assert ts.sequence_length == simplified.sequence_length
for _ in simplified.trees():
pass
for u, v in enumerate(node_map):
assert s_node_map[v] == u
assert np.array_equal(simplified.samples(), ts.samples())
assert list(simplified.nodes()) == list(ts.nodes())
assert list(simplified.edges()) == list(ts.edges())
assert list(simplified.sites()) == list(ts.sites())
assert list(simplified.haplotypes()) == list(ts.haplotypes())
assert list(simplified.variants(as_bytes=True)) == list(
ts.variants(as_bytes=True)
)
def test_single_tree_permuted_nodes(self):
ts = msprime.simulate(10, mutation_rate=5, random_seed=self.random_seed)
self.verify_permuted_nodes(ts)
def test_binary_tree_sequence_permuted_nodes(self):
ts = msprime.simulate(
20, recombination_rate=5, mutation_rate=5, random_seed=self.random_seed
)
self.verify_permuted_nodes(ts)
def test_nonbinary_tree_sequence_permuted_nodes(self):
demographic_events = [
msprime.SimpleBottleneck(time=1.0, population=0, proportion=0.95)
]
ts = msprime.simulate(
20,
recombination_rate=10,
mutation_rate=5,
demographic_events=demographic_events,
random_seed=self.random_seed,
)
found = False
for e in ts.edgesets():
if len(e.children) > 2:
found = True
assert found
self.verify_permuted_nodes(ts)
class TestTraversalOrder:
"""
Tests node traversal orders.
"""
#
# 9 10
# / \ / \
# / \ / 8
# / \ / / \
# 7 \ / / \
# / \ 6 / / 6
# / 5 / \ / 5 / \
# / / \ / \ / / \ / \
# 4 0 1 2 3 4 0 1 2 3
#
# 0 ------------------ 0.5 ------------------ 1.0
nodes = """\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 1 0 0.00000000000000
3 1 0 0.00000000000000
4 1 0 0.00000000000000
5 0 0 0.14567111023387
6 0 0 0.21385545626353
7 0 0 0.43508024345063
8 0 0 0.60156352971203
9 0 0 0.90000000000000
10 0 0 1.20000000000000
"""
edges = """\
id left right parent child
0 0.00000000 1.00000000 5 0,1
1 0.00000000 1.00000000 6 2,3
2 0.00000000 0.50000000 7 4,5
3 0.50000000 1.00000000 8 5,6
4 0.00000000 0.50000000 9 6,7
5 0.50000000 1.00000000 10 4,8
"""
node_order_results = {
"preorder": [[9, 6, 2, 3, 7, 4, 5, 0, 1], [10, 4, 8, 5, 0, 1, 6, 2, 3]],
"inorder": [[2, 6, 3, 9, 4, 7, 0, 5, 1], [4, 10, 0, 5, 1, 8, 2, 6, 3]],
"postorder": [[2, 3, 6, 4, 0, 1, 5, 7, 9], [4, 0, 1, 5, 2, 3, 6, 8, 10]],
"levelorder": [[9, 6, 7, 2, 3, 4, 5, 0, 1], [10, 4, 8, 5, 6, 0, 1, 2, 3]],
"breadthfirst": [[9, 6, 7, 2, 3, 4, 5, 0, 1], [10, 4, 8, 5, 6, 0, 1, 2, 3]],
"timeasc": [[0, 1, 2, 3, 4, 5, 6, 7, 9], [0, 1, 2, 3, 4, 5, 6, 8, 10]],
"timedesc": [[9, 7, 6, 5, 4, 3, 2, 1, 0], [10, 8, 6, 5, 4, 3, 2, 1, 0]],
"minlex_postorder": [[0, 1, 5, 4, 7, 2, 3, 6, 9], [0, 1, 5, 2, 3, 6, 8, 4, 10]],
}
def test_traversal_order(self):
ts = tskit.load_text(
nodes=io.StringIO(self.nodes), edges=io.StringIO(self.edges), strict=False
)
for test_order, expected_result in self.node_order_results.items():
tree_orders = []
for tree in ts.trees():
tree_orders.append(list(tree.nodes(order=test_order)))
assert tree_orders == expected_result
def test_polytomy_inorder(self):
"""
If there are N children, current inorder traversal first visits
floor(N/2) children, then the parent, then the remaining children.
Here we explicitly test that behaviour.
"""
#
# __4__
# / / \ \
# 0 1 2 3
#
nodes_polytomy_4 = """\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 1 0 0.00000000000000
3 1 0 0.00000000000000
4 0 0 1.00000000000000
"""
edges_polytomy_4 = """\
id left right parent child
0 0.00000000 1.00000000 4 0,1,2,3
"""
#
# __5__
# / /|\ \
# 0 1 2 3 4
#
nodes_polytomy_5 = """\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 1 0 0.00000000000000
3 1 0 0.00000000000000
4 1 0 0.00000000000000
5 0 0 1.00000000000000
"""
edges_polytomy_5 = """\
id left right parent child
0 0.00000000 1.00000000 5 0,1,2,3,4
"""
for nodes_string, edges_string, expected_result in [
[nodes_polytomy_4, edges_polytomy_4, [[0, 1, 4, 2, 3]]],
[nodes_polytomy_5, edges_polytomy_5, [[0, 1, 5, 2, 3, 4]]],
]:
ts = tskit.load_text(
nodes=io.StringIO(nodes_string),
edges=io.StringIO(edges_string),
strict=False,
)
tree_orders = []
for tree in ts.trees():
tree_orders.append(list(tree.nodes(order="inorder")))
assert tree_orders == expected_result
def test_minlex_postorder_multiple_roots(self):
#
# 10 8 9 11
# / \ / \ / \ / \
# 5 3 2 4 6 7 1 0
#
nodes_string = """\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 1 0 0.00000000000000
3 1 0 0.00000000000000
4 1 0 0.00000000000000
5 1 0 0.00000000000000
6 1 0 0.00000000000000
7 1 0 0.00000000000000
8 0 0 1.00000000000000
9 0 0 1.00000000000000
10 0 0 1.00000000000000
11 0 0 1.00000000000000
"""
edges_string = """\
id left right parent child
0 0.00000000 1.00000000 8 2,4
1 0.00000000 1.00000000 9 6,7
2 0.00000000 1.00000000 10 5,3
3 0.00000000 1.00000000 11 1,0
"""
expected_result = [[0, 1, 11, 2, 4, 8, 3, 5, 10, 6, 7, 9]]
ts = tskit.load_text(
nodes=io.StringIO(nodes_string),
edges=io.StringIO(edges_string),
strict=False,
)
tree_orders = []
for tree in ts.trees():
tree_orders.append(list(tree.nodes(order="minlex_postorder")))
assert tree_orders == expected_result
class TestSimplifyExamples(TopologyTestCase):
"""
Tests for simplify where we write out the input and expected output
or we detect expected errors.
"""
def verify_simplify(
self,
samples,
filter_sites=True,
keep_input_roots=False,
nodes_before=None,
edges_before=None,
sites_before=None,
mutations_before=None,
nodes_after=None,
edges_after=None,
sites_after=None,
mutations_after=None,
debug=False,
):
"""
Verifies that if we run simplify on the specified input we get the
required output.
"""
ts = tskit.load_text(
nodes=io.StringIO(nodes_before),
edges=io.StringIO(edges_before),
sites=io.StringIO(sites_before) if sites_before is not None else None,
mutations=(
io.StringIO(mutations_before) if mutations_before is not None else None
),
strict=False,
)
before = ts.dump_tables()
ts = tskit.load_text(
nodes=io.StringIO(nodes_after),
edges=io.StringIO(edges_after),
sites=io.StringIO(sites_after) if sites_after is not None else None,
mutations=(
io.StringIO(mutations_after) if mutations_after is not None else None
),
strict=False,
sequence_length=before.sequence_length,
)
after = ts.dump_tables()
# Make sure it's a valid tree sequence
ts = before.tree_sequence()
before.simplify(
samples=samples,
filter_sites=filter_sites,
keep_input_roots=keep_input_roots,
record_provenance=False,
)
if debug:
print("before")
print(before)
print(before.tree_sequence().draw_text())
print("after")
print(after)
print(after.tree_sequence().draw_text())
assert before == after
def test_unsorted_edges(self):
# We have two nodes at the same time and interleave edges for
# these nodes together. This is an error because all edges for
# a given parent must be contigous.
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 1
"""
edges_before = """\
left right parent child
0 1 2 0,1
0 1 3 0,1
1 2 2 0,1
1 2 3 0,1
"""
nodes = tskit.parse_nodes(io.StringIO(nodes_before), strict=False)
edges = tskit.parse_edges(io.StringIO(edges_before), strict=False)
# Cannot use load_text here because it calls sort()
tables = tskit.TableCollection(sequence_length=2)
tables.nodes.set_columns(**nodes.asdict())
tables.edges.set_columns(**edges.asdict())
with pytest.raises(_tskit.LibraryError):
tables.simplify(samples=[0, 1])
def test_single_binary_tree(self):
#
# 2 4
# / \
# 1 3 \
# / \ \
# 0 (0)(1) (2)
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 1
4 0 2
"""
edges_before = """\
left right parent child
0 1 3 0,1
0 1 4 2,3
"""
# We sample 0 and 2, so we get
nodes_after = """\
id is_sample time
0 1 0
1 1 0
2 0 2
"""
edges_after = """\
left right parent child
0 1 2 0,1
"""
self.verify_simplify(
samples=[0, 2],
nodes_before=nodes_before,
edges_before=edges_before,
nodes_after=nodes_after,
edges_after=edges_after,
)
def test_single_binary_tree_no_sample_nodes(self):
#
# 2 4
# / \
# 1 3 \
# / \ \
# 0 (0)(1) (2)
nodes_before = """\
id is_sample time
0 0 0
1 0 0
2 0 0
3 0 1
4 0 2
"""
edges_before = """\
left right parent child
0 1 3 0,1
0 1 4 2,3
"""
# We sample 0 and 2, so we get
nodes_after = """\
id is_sample time
0 1 0
1 1 0
2 0 2
"""
edges_after = """\
left right parent child
0 1 2 0,1
"""
self.verify_simplify(
samples=[0, 2],
nodes_before=nodes_before,
edges_before=edges_before,
nodes_after=nodes_after,
edges_after=edges_after,
)
def test_single_binary_tree_keep_input_root(self):
#
# 2 4
# / \
# 1 3 \
# / \ \
# 0 (0)(1) (2)
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 1
4 0 2
"""
edges_before = """\
left right parent child
0 1 3 0,1
0 1 4 2,3
"""
nodes_after = """\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 2
"""
edges_after = """\
left right parent child
0 1 2 0,1
0 1 3 2
"""
self.verify_simplify(
samples=[0, 1],
nodes_before=nodes_before,
edges_before=edges_before,
nodes_after=nodes_after,
edges_after=edges_after,
keep_input_roots=True,
)
def test_single_binary_tree_internal_sample(self):
#
# 2 4
# / \
# 1 (3) \
# / \ \
# 0 (0) 1 (2)
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 0 0
3 1 1
4 0 2
"""
edges_before = """\
left right parent child
0 1 3 0,1
0 1 4 2,3
"""
# We sample 0 and 3, so we get
nodes_after = """\
id is_sample time
0 1 0
1 1 1
"""
edges_after = """\
left right parent child
0 1 1 0
"""
self.verify_simplify(
samples=[0, 3],
nodes_before=nodes_before,
edges_before=edges_before,
nodes_after=nodes_after,
edges_after=edges_after,
)
def test_single_binary_tree_internal_sample_meet_at_root(self):
# 3 5
# / \
# 2 4 (6)
# / \
# 1 (3) \
# / \ \
# 0 (0) 1 2
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 0 0
3 1 1
4 0 2
5 0 3
6 1 2
"""
edges_before = """\
left right parent child
0 1 3 0,1
0 1 4 2,3
0 1 5 4,6
"""
# We sample 0 and 3 and 6, so we get
nodes_after = """\
id is_sample time
0 1 0
1 1 1
2 1 2
3 0 3
"""
edges_after = """\
left right parent child
0 1 1 0
0 1 3 1,2
"""
self.verify_simplify(
samples=[0, 3, 6],
nodes_before=nodes_before,
edges_before=edges_before,
nodes_after=nodes_after,
edges_after=edges_after,
)
def test_single_binary_tree_simple_mutations(self):
# 3 5
# / \
# 2 4 \
# / \ s0
# 1 3 s1 \
# / \ \ \
# 0 (0) (1) 2 (6)
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 0 0
3 0 1
4 0 2
5 0 3
6 1 0
"""
edges_before = """\
left right parent child
0 1 3 0,1
0 1 4 2,3
0 1 5 4,6
"""
sites_before = """\
id position ancestral_state
0 0.1 0
1 0.2 0
"""
mutations_before = """\
site node derived_state
0 6 1
1 2 1
"""
# We sample 0 and 2 and 6, so we get
nodes_after = """\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 1
3 0 3
"""
edges_after = """\
left right parent child
0 1 3 0,1
0 1 4 2,3
"""
sites_after = """\
id position ancestral_state
0 0.1 0
"""
mutations_after = """\
site node derived_state
0 2 1
"""
self.verify_simplify(
samples=[0, 1, 6],
nodes_before=nodes_before,
edges_before=edges_before,
sites_before=sites_before,
mutations_before=mutations_before,
nodes_after=nodes_after,
edges_after=edges_after,
sites_after=sites_after,
mutations_after=mutations_after,
)
# If we don't filter the fixed sites, we should get the same
# mutations and the original sites table back.
self.verify_simplify(
samples=[0, 1, 6],
filter_sites=False,
nodes_before=nodes_before,
edges_before=edges_before,
sites_before=sites_before,
mutations_before=mutations_before,
nodes_after=nodes_after,
edges_after=edges_after,
sites_after=sites_before,
mutations_after=mutations_after,
)
def test_single_binary_tree_keep_roots_mutations(self):
# 3 5
# m0 / \
# 2 4 \
# m1 / \ \
# 1 3 \ \
# / \ \ \
# 0 (0) (1) 2 6
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 0 0
3 0 1
4 0 2
5 0 3
6 0 0
"""
edges_before = """\
left right parent child
0 1 3 0,1
0 1 4 2,3
0 1 5 4,6
"""
sites_before = """\
id position ancestral_state
0 0.1 0
"""
mutations_before = """\
site node derived_state
0 3 2
0 4 1
"""
# We sample 0 and 2
nodes_after = """\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 3
"""
edges_after = """\
left right parent child
0 1 2 0,1
0 1 3 2
"""
sites_after = """\
id position ancestral_state
0 0.1 0
"""
mutations_after = """\
site node derived_state
0 2 2
0 2 1
"""
self.verify_simplify(
samples=[0, 1],
nodes_before=nodes_before,
edges_before=edges_before,
sites_before=sites_before,
mutations_before=mutations_before,
nodes_after=nodes_after,
edges_after=edges_after,
sites_after=sites_after,
mutations_after=mutations_after,
keep_input_roots=True,
)
def test_place_mutations_with_and_without_roots(self):
nodes_before = """\
id is_sample time
0 1 0
1 0 1
2 0 2
"""
edges_before = """\
left right parent child
0 2 1 0
0 2 2 1
"""
sites = """\
id position ancestral_state
0 1.0 0
"""
mutations_before = """\
site node derived_state time
0 0 2 0
0 1 1 1
0 2 3 2
"""
# expected result without keep_input_roots
nodes_after = """\
id is_sample time
0 1 0
"""
edges_after = """\
left right parent child
"""
mutations_after = """\
site node derived_state time
0 0 2 0
0 0 1 1
0 0 3 2
"""
# expected result with keep_input_roots
nodes_after_keep = """\
id is_sample time
0 1 0
1 0 2
"""
edges_after_keep = """\
left right parent child
0 2 1 0
"""
mutations_after_keep = """\
site node derived_state time
0 0 2 0
0 0 1 1
0 1 3 2
"""
self.verify_simplify(
samples=[0],
nodes_before=nodes_before,
edges_before=edges_before,
sites_before=sites,
mutations_before=mutations_before,
nodes_after=nodes_after,
edges_after=edges_after,
sites_after=sites,
mutations_after=mutations_after,
keep_input_roots=False,
)
self.verify_simplify(
samples=[0],
nodes_before=nodes_before,
edges_before=edges_before,
sites_before=sites,
mutations_before=mutations_before,
nodes_after=nodes_after_keep,
edges_after=edges_after_keep,
sites_after=sites,
mutations_after=mutations_after_keep,
keep_input_roots=True,
)
def test_overlapping_edges(self):
nodes = """\
id is_sample time
0 1 0
1 1 0
2 0 1
"""
edges_before = """\
left right parent child
0 2 2 0
1 3 2 1
"""
# We resolve the overlapping edges here. Since the flanking regions
# have no interesting edges, these are left out of the output.
edges_after = """\
left right parent child
1 2 2 0,1
"""
self.verify_simplify(
samples=[0, 1],
nodes_before=nodes,
edges_before=edges_before,
nodes_after=nodes,
edges_after=edges_after,
)
def test_overlapping_edges_internal_samples(self):
nodes = """\
id is_sample time
0 1 0
1 1 0
2 1 1
"""
edges = """\
left right parent child
0 2 2 0
1 3 2 1
"""
self.verify_simplify(
samples=[0, 1, 2],
nodes_before=nodes,
edges_before=edges,
nodes_after=nodes,
edges_after=edges,
)
def test_unary_edges_no_overlap(self):
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 0 1
"""
edges_before = """\
left right parent child
0 2 2 0
2 3 2 1
"""
# Because there is no overlap between the samples, we just get an
# empty set of output edges.
nodes_after = """\
id is_sample time
0 1 0
1 1 0
"""
edges_after = """\
left right parent child
"""
self.verify_simplify(
samples=[0, 1],
nodes_before=nodes_before,
edges_before=edges_before,
nodes_after=nodes_after,
edges_after=edges_after,
)
def test_unary_edges_no_overlap_internal_sample(self):
nodes_before = """\
id is_sample time
0 1 0
1 1 0
2 1 1
"""
edges_before = """\
left right parent child
0 1 2 0
1 2 2 1
"""
self.verify_simplify(
samples=[0, 1, 2],
nodes_before=nodes_before,
edges_before=edges_before,
nodes_after=nodes_before,
edges_after=edges_before,
)
class TestNonSampleExternalNodes(TopologyTestCase):
"""
Tests for situations in which we have tips that are not samples.
"""
def test_simple_case(self):
# Simplest case where we have n = 2 and external non-sample nodes.
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 0
4 0 0
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0,1,3,4
"""
)
sites = io.StringIO(
"""\
id position ancestral_state
0 0.1 0
1 0.2 0
2 0.3 0
3 0.4 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state
0 0 1
1 1 1
2 3 1
3 4 1
"""
)
ts = tskit.load_text(
nodes=nodes, edges=edges, sites=sites, mutations=mutations, strict=False
)
assert ts.sample_size == 2
assert ts.num_trees == 1
assert ts.num_nodes == 5
assert ts.num_sites == 4
assert ts.num_mutations == 4
t = next(ts.trees())
assert t.parent_dict == {0: 2, 1: 2, 3: 2, 4: 2}
assert t.root == 2
ts_simplified, node_map = ts.simplify(map_nodes=True)
assert list(node_map) == [0, 1, 2, -1, -1]
assert ts_simplified.num_nodes == 3
assert ts_simplified.num_trees == 1
t = next(ts_simplified.trees())
assert t.parent_dict == {0: 2, 1: 2}
assert t.root == 2
# We should have removed the two non-sample mutations.
assert [s.position for s in t.sites()] == [0.1, 0.2]
def test_unary_non_sample_external_nodes(self):
# Take an ordinary tree sequence and put a bunch of external non
# sample nodes on it.
ts = msprime.simulate(
15, recombination_rate=5, random_seed=self.random_seed, mutation_rate=5
)
assert ts.num_trees > 2
assert ts.num_mutations > 2
tables = ts.dump_tables()
next_node = ts.num_nodes
tables.edges.reset()
for e in ts.edges():
tables.edges.append(e)
tables.edges.append(e.replace(child=next_node))
tables.nodes.add_row(time=0)
next_node += 1
tables.sort()
ts_new = tables.tree_sequence()
assert ts_new.num_nodes == next_node
assert ts_new.sample_size == ts.sample_size
self.assert_haplotypes_equal(ts, ts_new)
self.assert_variants_equal(ts, ts_new)
ts_simplified = ts_new.simplify()
assert ts_simplified.num_nodes == ts.num_nodes
assert ts_simplified.sample_size == ts.sample_size
assert list(ts_simplified.records()) == list(ts.records())
self.assert_haplotypes_equal(ts, ts_simplified)
self.assert_variants_equal(ts, ts_simplified)
class TestMultipleRoots(TopologyTestCase):
"""
Tests for situations where we have multiple roots for the samples.
"""
def test_simplest_degenerate_case(self):
# Simplest case where we have n = 2 and no edges.
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
"""
)
edges = io.StringIO(
"""\
left right parent child
"""
)
sites = io.StringIO(
"""\
id position ancestral_state
0 0.1 0
1 0.2 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state
0 0 1
1 1 1
"""
)
ts = tskit.load_text(
nodes=nodes,
edges=edges,
sites=sites,
mutations=mutations,
sequence_length=1,
strict=False,
)
assert ts.num_nodes == 2
assert ts.num_trees == 1
assert ts.num_sites == 2
assert ts.num_mutations == 2
t = next(ts.trees())
assert t.parent_dict == {}
assert sorted(t.roots) == [0, 1]
assert list(ts.haplotypes(isolated_as_missing=False)) == ["10", "01"]
assert [
v.genotypes for v in ts.variants(as_bytes=True, isolated_as_missing=False)
] == [b"10", b"01"]
simplified = ts.simplify()
t1 = ts.dump_tables()
t2 = simplified.dump_tables()
assert t1.nodes == t2.nodes
assert t1.edges == t2.edges
def test_simplest_non_degenerate_case(self):
# Simplest case where we have n = 4 and two trees.
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 1 0
4 0 1
5 0 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 4 0,1
0 1 5 2,3
"""
)
sites = io.StringIO(
"""\
id position ancestral_state
0 0.1 0
1 0.2 0
2 0.3 0
3 0.4 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state
0 0 1
1 1 1
2 2 1
3 3 1
"""
)
ts = tskit.load_text(
nodes=nodes, edges=edges, sites=sites, mutations=mutations, strict=False
)
assert ts.num_nodes == 6
assert ts.num_trees == 1
assert ts.num_sites == 4
assert ts.num_mutations == 4
t = next(ts.trees())
assert t.parent_dict == {0: 4, 1: 4, 2: 5, 3: 5}
assert list(ts.haplotypes()) == ["1000", "0100", "0010", "0001"]
assert [v.genotypes for v in ts.variants(as_bytes=True)] == [
b"1000",
b"0100",
b"0010",
b"0001",
]
assert t.mrca(0, 1) == 4
assert t.mrca(0, 4) == 4
assert t.mrca(2, 3) == 5
assert t.mrca(0, 2) == tskit.NULL
assert t.mrca(0, 3) == tskit.NULL
assert t.mrca(2, 4) == tskit.NULL
ts_simplified, node_map = ts.simplify(map_nodes=True)
for j in range(4):
assert node_map[j] == j
assert ts_simplified.num_nodes == 6
assert ts_simplified.num_trees == 1
assert ts_simplified.num_sites == 4
assert ts_simplified.num_mutations == 4
t = next(ts_simplified.trees())
assert t.parent_dict == {0: 4, 1: 4, 2: 5, 3: 5}
def test_two_reducible_trees(self):
# We have n = 4 and two trees, with some unary nodes and non-sample leaves
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 1 0
4 0 1
5 0 1
6 0 2
7 0 3
8 0 0 # Non sample leaf
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 4 0
0 1 5 1
0 1 6 4,5
0 1 7 2,3,8
"""
)
sites = io.StringIO(
"""\
id position ancestral_state
0 0.1 0
1 0.2 0
2 0.3 0
3 0.4 0
4 0.5 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state
0 0 1
1 1 1
2 2 1
3 3 1
4 8 1
"""
)
ts = tskit.load_text(
nodes=nodes, edges=edges, sites=sites, mutations=mutations, strict=False
)
assert ts.num_nodes == 9
assert ts.num_trees == 1
assert ts.num_sites == 5
assert ts.num_mutations == 5
t = next(ts.trees())
assert t.parent_dict == {0: 4, 1: 5, 2: 7, 3: 7, 4: 6, 5: 6, 8: 7}
assert list(ts.haplotypes()) == ["10000", "01000", "00100", "00010"]
assert [v.genotypes for v in ts.variants(as_bytes=True)] == [
b"1000",
b"0100",
b"0010",
b"0001",
b"0000",
]
assert t.mrca(0, 1) == 6
assert t.mrca(2, 3) == 7
assert t.mrca(2, 8) == 7
assert t.mrca(0, 2) == tskit.NULL
assert t.mrca(0, 3) == tskit.NULL
assert t.mrca(0, 8) == tskit.NULL
ts_simplified, node_map = ts.simplify(map_nodes=True)
for j in range(4):
assert node_map[j] == j
assert ts_simplified.num_nodes == 6
assert ts_simplified.num_trees == 1
t = next(ts_simplified.trees())
# print(ts_simplified.tables)
assert list(ts_simplified.haplotypes()) == ["1000", "0100", "0010", "0001"]
assert [v.genotypes for v in ts_simplified.variants(as_bytes=True)] == [
b"1000",
b"0100",
b"0010",
b"0001",
]
# The site over the non-sample external node should have been discarded.
sites = list(t.sites())
assert sites[-1].position == 0.4
assert t.parent_dict == {0: 4, 1: 4, 2: 5, 3: 5}
def test_one_reducible_tree(self):
# We have n = 4 and two trees. One tree is reducible and the other isn't.
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 1 0
4 0 1
5 0 1
6 0 2
7 0 3
8 0 0 # Non sample leaf
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 4 0
0 1 5 1
0 1 6 4,5
0 1 7 2,3,8
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
assert ts.num_nodes == 9
assert ts.num_trees == 1
t = next(ts.trees())
assert t.parent_dict == {0: 4, 1: 5, 2: 7, 3: 7, 4: 6, 5: 6, 8: 7}
assert t.mrca(0, 1) == 6
assert t.mrca(2, 3) == 7
assert t.mrca(2, 8) == 7
assert t.mrca(0, 2) == tskit.NULL
assert t.mrca(0, 3) == tskit.NULL
assert t.mrca(0, 8) == tskit.NULL
ts_simplified = ts.simplify()
assert ts_simplified.num_nodes == 6
assert ts_simplified.num_trees == 1
t = next(ts_simplified.trees())
assert t.parent_dict == {0: 4, 1: 4, 2: 5, 3: 5}
# NOTE: This test has not been checked since updating to the text representation
# so there might be other problems with it.
def test_mutations_over_roots(self):
# Mutations over root nodes should be ok when we have multiple roots.
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 1
4 0 2
5 0 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 3 0,1
0 1 4 3
0 1 5 2
"""
)
sites = io.StringIO(
"""\
id position ancestral_state
0 0.1 0
1 0.2 0
2 0.3 0
3 0.4 0
4 0.5 0
5 0.6 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state
0 0 1
1 1 1
2 3 1
3 4 1
4 2 1
5 5 1
"""
)
ts = tskit.load_text(
nodes=nodes, edges=edges, sites=sites, mutations=mutations, strict=False
)
assert ts.num_nodes == 6
assert ts.num_trees == 1
assert ts.num_sites == 6
assert ts.num_mutations == 6
t = next(ts.trees())
assert len(list(t.sites())) == 6
haplotypes = ["101100", "011100", "000011"]
variants = [b"100", b"010", b"110", b"110", b"001", b"001"]
assert list(ts.haplotypes()) == haplotypes
assert [v.genotypes for v in ts.variants(as_bytes=True)] == variants
ts_simplified = ts.simplify(filter_sites=False)
assert list(ts_simplified.haplotypes(isolated_as_missing=False)) == haplotypes
assert variants == [
v.genotypes
for v in ts_simplified.variants(as_bytes=True, isolated_as_missing=False)
]
def test_break_single_tree(self):
# Take a single largish tree from tskit, and remove the oldest record.
# This breaks it into two subtrees.
ts = msprime.simulate(20, random_seed=self.random_seed, mutation_rate=4)
assert ts.num_mutations > 5
tables = ts.dump_tables()
tables.edges.set_columns(
left=tables.edges.left[:-1],
right=tables.edges.right[:-1],
parent=tables.edges.parent[:-1],
child=tables.edges.child[:-1],
)
ts_new = tables.tree_sequence()
assert ts.sample_size == ts_new.sample_size
assert ts.num_edges == ts_new.num_edges + 1
assert ts.num_trees == ts_new.num_trees
self.assert_haplotypes_equal(ts, ts_new)
self.assert_variants_equal(ts, ts_new)
roots = set()
t_new = next(ts_new.trees())
for u in ts_new.samples():
while t_new.parent(u) != tskit.NULL:
u = t_new.parent(u)
roots.add(u)
assert len(roots) == 2
assert sorted(roots) == sorted(t_new.roots)
class TestWithVisuals(TopologyTestCase):
"""
Some pedantic tests with ascii depictions of what's supposed to happen.
"""
def verify_simplify_topology(self, ts, sample, haplotypes=False):
# copies from test_highlevel.py
new_ts, node_map = ts.simplify(sample, map_nodes=True)
old_trees = ts.trees()
old_tree = next(old_trees)
assert ts.get_num_trees() >= new_ts.get_num_trees()
for new_tree in new_ts.trees():
new_left, new_right = new_tree.get_interval()
old_left, old_right = old_tree.get_interval()
# Skip ahead on the old tree until new_left is within its interval
while old_right <= new_left:
old_tree = next(old_trees)
old_left, old_right = old_tree.get_interval()
# If the TMRCA of all pairs of samples is the same, then we have the
# same information. We limit this to at most 500 pairs
pairs = itertools.islice(itertools.combinations(sample, 2), 500)
for pair in pairs:
mapped_pair = [node_map[u] for u in pair]
mrca1 = old_tree.get_mrca(*pair)
mrca2 = new_tree.get_mrca(*mapped_pair)
assert mrca2 == node_map[mrca1]
if haplotypes:
orig_haps = list(ts.haplotypes())
simp_haps = list(new_ts.haplotypes())
for i, j in enumerate(sample):
assert orig_haps[j] == simp_haps[i]
def test_partial_non_sample_external_nodes(self):
# A somewhat more complicated test case with a partially specified,
# non-sampled tip.
#
# Here is the situation:
#
# 1.0 7
# 0.7 / \ 6
# / \ / \
# 0.5 / 5 5 / 5
# / / \ / \ / / \
# 0.4 / / 4 / 4 / / 4
# / / / \ / / \ / / / \
# / / 3 \ / / \ / / 3 \
# / / \ / / \ / / \
# 0.0 0 1 2 1 0 2 0 1 2
#
# (0.0, 0.2), (0.2, 0.8), (0.8, 1.0)
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 0.2 # Non sample leaf
4 0 0.4
5 0 0.5
6 0 0.7
7 0 1.0
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.2 4 2,3
0.2 0.8 4 0,2
0.8 1.0 4 2,3
0.0 1.0 5 1,4
0.8 1.0 6 0,5
0.0 0.2 7 0,5
"""
)
true_trees = [
{0: 7, 1: 5, 2: 4, 3: 4, 4: 5, 5: 7, 6: -1, 7: -1},
{0: 4, 1: 5, 2: 4, 3: -1, 4: 5, 5: -1, 6: -1, 7: -1},
{0: 6, 1: 5, 2: 4, 3: 4, 4: 5, 5: 6, 6: -1, 7: -1},
]
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tree_dicts = [t.parent_dict for t in ts.trees()]
assert ts.sample_size == 3
assert ts.num_trees == 3
assert ts.num_nodes == 8
# check topologies agree:
for a, t in zip(true_trees, tree_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
# check .simplify() works here
self.verify_simplify_topology(ts, [0, 1, 2])
def test_partial_non_sample_external_nodes_2(self):
# The same situation as above, but partial tip is labeled '7' not '3':
#
# 1.0 6
# 0.7 / \ 5
# / \ / \
# 0.5 / 4 4 / 4
# / / \ / \ / / \
# 0.4 / / 3 / 3 / / 3
# / / / \ / / \ / / / \
# / / 7 \ / / \ / / 7 \
# / / \ / / \ / / \
# 0.0 0 1 2 1 0 2 0 1 2
#
# (0.0, 0.2), (0.2, 0.8), (0.8, 1.0)
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 0.4
4 0 0.5
5 0 0.7
6 0 1.0
7 0 0 # Non sample leaf
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.2 3 2,7
0.2 0.8 3 0,2
0.8 1.0 3 2,7
0.0 0.2 4 1,3
0.2 0.8 4 1,3
0.8 1.0 4 1,3
0.8 1.0 5 0,4
0.0 0.2 6 0,4
"""
)
true_trees = [
{0: 6, 1: 4, 2: 3, 3: 4, 4: 6, 5: -1, 6: -1, 7: 3},
{0: 3, 1: 4, 2: 3, 3: 4, 4: -1, 5: -1, 6: -1, 7: -1},
{0: 5, 1: 4, 2: 3, 3: 4, 4: 5, 5: -1, 6: -1, 7: 3},
]
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tree_dicts = [t.parent_dict for t in ts.trees()]
# sample size check works here since 7 > 3
assert ts.sample_size == 3
assert ts.num_trees == 3
assert ts.num_nodes == 8
# check topologies agree:
for a, t in zip(true_trees, tree_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
self.verify_simplify_topology(ts, [0, 1, 2])
def test_single_offspring_records(self):
# Here we have inserted a single-offspring record
# (for 6 on the left segment):
#
# 1.0 7
# 0.7 / 6 6
# / \ / \
# 0.5 / 5 5 / 5
# / / \ / \ / / \
# 0.4 / / 4 / 4 / / 4
# 0.3 / / / \ / / \ / / / \
# / / 3 \ / / \ / / 3 \
# / / \ / / \ / / \
# 0.0 0 1 2 1 0 2 0 1 2
#
# (0.0, 0.2), (0.2, 0.8), (0.8, 1.0)
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 0 # Non sample leaf
4 0 0.4
5 0 0.5
6 0 0.7
7 0 1.0
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.2 4 2,3
0.2 0.8 4 0,2
0.8 1.0 4 2,3
0.0 1.0 5 1,4
0.8 1.0 6 0,5
0.0 0.2 6 5
0.0 0.2 7 0,6
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
true_trees = [
{0: 7, 1: 5, 2: 4, 3: 4, 4: 5, 5: 6, 6: 7, 7: -1},
{0: 4, 1: 5, 2: 4, 3: -1, 4: 5, 5: -1, 6: -1, 7: -1},
{0: 6, 1: 5, 2: 4, 3: 4, 4: 5, 5: 6, 6: -1, 7: -1},
]
tree_dicts = [t.parent_dict for t in ts.trees()]
assert ts.sample_size == 3
assert ts.num_trees == 3
assert ts.num_nodes == 8
# check topologies agree:
for a, t in zip(true_trees, tree_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
self.verify_simplify_topology(ts, [0, 1, 2])
def test_many_single_offspring(self):
# a more complex test with single offspring
# With `(i,j,x)->k` denoting that individual `k` inherits from `i` on `[0,x)`
# and from `j` on `[x,1)`:
# 1. Begin with an individual `3` (and another anonymous one) at `t=0`.
# 2. `(3,?,1.0)->4` and `(3,?,1.0)->5` at `t=1`
# 3. `(4,3,0.9)->6` and `(3,5,0.1)->7` and then `3` dies at `t=2`
# 4. `(6,7,0.7)->8` at `t=3`
# 5. `(8,6,0.8)->9` and `(7,8,0.2)->10` at `t=4`.
# 6. `(3,9,0.6)->0` and `(9,10,0.5)->1` and `(10,4,0.4)->2` at `t=5`.
# 7. We sample `0`, `1`, and `2`.
# Here are the trees:
# t | | | |
#
# 0 --3-- | --3-- | --3-- | --3-- | --3--
# / | \ | / | \ | / \ | / \ | / \
# 1 4 | 5 | 4 * 5 | 4 5 | 4 5 | 4 5
# |\ / \ /| | |\ \ | |\ / | |\ / | |\ /|
# 2 | 6 7 | | | 6 7 | | 6 7 | | 6 7 | | 6 7 |
# | |\ /| | | | \ * | | \ | | | * | | * | ...
# 3 | | 8 | | | | 8 | | * 8 * | | 8 | | 8 |
# | |/ \| | | | / | | | / | | | * * | | / \ |
# 4 | 9 10 | | | 9 10 | | 9 10 | | 9 10 | | 9 10 |
# |/ \ / \| | | \ * | | \ \ | | \ * | | \ |
# 5 0 1 2 | 0 1 2 | 0 1 2 | 0 1 2 | 0 1 2
#
# | 0.0 - 0.1 | 0.1 - 0.2 | 0.2 - 0.4 | 0.4 - 0.5
# ... continued:
# t | | | |
#
# 0 --3-- | --3-- | --3-- | --3-- | --3--
# / \ | / \ | / \ | / \ | / | \
# 1 4 5 | 4 5 | 4 5 | 4 5 | 4 | 5
# |\ /| | \ /| | \ /| | \ /| | / /|
# 2 | 6 7 | | 6 7 | | 6 7 | | 6 7 | | 6 7 |
# | \ | | \ | | / | | | / | | | / |
# 3 ... | 8 | | 8 | | 8 | | | 8 | | | 8 |
# | / \ | | / \ | | / \ | | | \ | | | \ |
# 4 | 9 10 | | 9 10 | | 9 10 | | 9 10 | | 9 10 |
# | / | | / / | | / / | | / / | | / / |
# 5 0 1 2 | 0 1 2 | 0 1 2 | 0 1 2 | 0 1 2
#
# 0.5 - 0.6 | 0.6 - 0.7 | 0.7 - 0.8 | 0.8 - 0.9 | 0.9 - 1.0
true_trees = [
{0: 4, 1: 9, 2: 10, 3: -1, 4: 3, 5: 3, 6: 4, 7: 3, 8: 6, 9: 8, 10: 7},
{0: 4, 1: 9, 2: 10, 3: -1, 4: 3, 5: 3, 6: 4, 7: 5, 8: 6, 9: 8, 10: 7},
{0: 4, 1: 9, 2: 10, 3: -1, 4: 3, 5: 3, 6: 4, 7: 5, 8: 6, 9: 8, 10: 8},
{0: 4, 1: 9, 2: 5, 3: -1, 4: 3, 5: 3, 6: 4, 7: 5, 8: 6, 9: 8, 10: 8},
{0: 4, 1: 10, 2: 5, 3: -1, 4: 3, 5: 3, 6: 4, 7: 5, 8: 6, 9: 8, 10: 8},
{0: 9, 1: 10, 2: 5, 3: -1, 4: 3, 5: 3, 6: 4, 7: 5, 8: 6, 9: 8, 10: 8},
{0: 9, 1: 10, 2: 5, 3: -1, 4: 3, 5: 3, 6: 4, 7: 5, 8: 7, 9: 8, 10: 8},
{0: 9, 1: 10, 2: 5, 3: -1, 4: 3, 5: 3, 6: 4, 7: 5, 8: 7, 9: 6, 10: 8},
{0: 9, 1: 10, 2: 5, 3: -1, 4: 3, 5: 3, 6: 3, 7: 5, 8: 7, 9: 6, 10: 8},
]
true_haplotypes = ["0100", "0001", "1110"]
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 5
4 0 4
5 0 4
6 0 3
7 0 3
8 0 2
9 0 1
10 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0.5 1.0 10 1
0.0 0.4 10 2
0.6 1.0 9 0
0.0 0.5 9 1
0.8 1.0 8 10
0.2 0.8 8 9,10
0.0 0.2 8 9
0.7 1.0 7 8
0.0 0.2 7 10
0.8 1.0 6 9
0.0 0.7 6 8
0.4 1.0 5 2,7
0.1 0.4 5 7
0.6 0.9 4 6
0.0 0.6 4 0,6
0.9 1.0 3 4,5,6
0.1 0.9 3 4,5
0.0 0.1 3 4,5,7
"""
)
sites = io.StringIO(
"""\
position ancestral_state
0.05 0
0.15 0
0.25 0
0.4 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state parent
0 7 1 -1
0 10 0 0
0 2 1 1
1 0 1 -1
1 10 1 -1
2 8 1 -1
2 9 0 5
2 10 0 5
2 2 1 7
3 8 1 -1
"""
)
ts = tskit.load_text(nodes, edges, sites, mutations, strict=False)
tree_dicts = [t.parent_dict for t in ts.trees()]
assert ts.sample_size == 3
assert ts.num_trees == len(true_trees)
assert ts.num_nodes == 11
assert len(list(ts.edge_diffs())) == ts.num_trees
# check topologies agree:
for a, t in zip(true_trees, tree_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
for j, x in enumerate(ts.haplotypes()):
assert x == true_haplotypes[j]
self.verify_simplify_topology(ts, [0, 1, 2], haplotypes=True)
self.verify_simplify_topology(ts, [1, 0, 2], haplotypes=True)
self.verify_simplify_topology(ts, [0, 1], haplotypes=False)
self.verify_simplify_topology(ts, [1, 2], haplotypes=False)
self.verify_simplify_topology(ts, [2, 0], haplotypes=False)
def test_tricky_switches(self):
# suppose the topology has:
# left right parent child
# 0.0 0.5 6 0,1
# 0.5 1.0 6 4,5
# 0.0 0.4 7 2,3
#
# --------------------------
#
# 12 . 12 . 12 .
# / \ . / \ . / \ .
# 11 \ . / \ . / \ .
# / \ \ . / 10 . / 10 .
# / \ \ . / / \ . / / \ .
# 6 7 8 . 6 9 8 . 6 9 8 .
# / \ / \ /\ . / \ / \ /\ . / \ / \ /\ .
# 0 1 2 3 4 5 . 0 1 2 3 4 5 . 4 5 2 3 0 1 .
# . . .
# 0.0 0.4 0.5 1.0
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 1 0
4 1 0
5 1 0
6 0 1
7 0 1
8 0 1
9 0 1
10 0 2
11 0 3
12 0 4
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.5 6 0
0.0 0.5 6 1
0.5 1.0 6 4
0.5 1.0 6 5
0.0 0.4 7 2,3
0.5 1.0 8 0
0.5 1.0 8 1
0.0 0.5 8 4
0.0 0.5 8 5
0.4 1.0 9 2,3
0.4 1.0 10 8,9
0.0 0.4 11 6,7
0.4 1.0 12 6
0.0 0.4 12 8
0.4 1.0 12 10
0.0 0.4 12 11
"""
)
true_trees = [
{
0: 6,
1: 6,
2: 7,
3: 7,
4: 8,
5: 8,
6: 11,
7: 11,
8: 12,
9: -1,
10: -1,
11: 12,
12: -1,
},
{
0: 6,
1: 6,
2: 9,
3: 9,
4: 8,
5: 8,
6: 12,
7: -1,
8: 10,
9: 10,
10: 12,
11: -1,
12: -1,
},
{
0: 8,
1: 8,
2: 9,
3: 9,
4: 6,
5: 6,
6: 12,
7: -1,
8: 10,
9: 10,
10: 12,
11: -1,
12: -1,
},
]
ts = tskit.load_text(nodes, edges, strict=False)
tree_dicts = [t.parent_dict for t in ts.trees()]
assert ts.sample_size == 6
assert ts.num_trees == len(true_trees)
assert ts.num_nodes == 13
assert len(list(ts.edge_diffs())) == ts.num_trees
# check topologies agree:
for a, t in zip(true_trees, tree_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
self.verify_simplify_topology(ts, [0, 2])
self.verify_simplify_topology(ts, [0, 4])
self.verify_simplify_topology(ts, [2, 4])
def test_tricky_simplify(self):
# Continue as above but invoke simplfy:
#
# 12 . 12 .
# / \ . / \ .
# 11 \ . 11 \ .
# / \ \ . / \ \ .
# 13 \ \ . / 15 \ .
# / \ \ \ . / / \ \ .
# 6 14 7 8 . 6 14 7 8 .
# / \ / \ /\ . / \ / \ /\ .
# 0 1 2 3 4 5 . 0 1 2 3 4 5 .
# . .
# 0.0 0.1 0.4
#
# . 12 . 12 .
# . / \ . / \ .
# . / \ . / \ .
# . / 10 . / 10 .
# . / / \ . / / \ .
# . 6 9 8 . 6 9 8 .
# . / \ / \ /\ . / \ / \ /\ .
# . 0 1 2 3 4 5 . 4 5 2 3 0 1 .
# . . .
# 0.4 0.5 1.0
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 1 0
4 1 0
5 1 0
6 0 1
7 0 1
8 0 1
9 0 1
10 0 2
11 0 3
12 0 4
13 0 2
14 0 1
15 0 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.5 6 0,1
0.5 1.0 6 4,5
0.0 0.4 7 2,3
0.0 0.5 8 4,5
0.5 1.0 8 0,1
0.4 1.0 9 2,3
0.4 1.0 10 8,9
0.0 0.1 13 6,14
0.1 0.4 15 7,14
0.0 0.1 11 7,13
0.1 0.4 11 6,15
0.0 0.4 12 8,11
0.4 1.0 12 6,10
"""
)
true_trees = [
{
0: 6,
1: 6,
2: 7,
3: 7,
4: 8,
5: 8,
6: 11,
7: 11,
8: 12,
9: -1,
10: -1,
11: 12,
12: -1,
},
{
0: 6,
1: 6,
2: 9,
3: 9,
4: 8,
5: 8,
6: 12,
7: -1,
8: 10,
9: 10,
10: 12,
11: -1,
12: -1,
},
{
0: 8,
1: 8,
2: 9,
3: 9,
4: 6,
5: 6,
6: 12,
7: -1,
8: 10,
9: 10,
10: 12,
11: -1,
12: -1,
},
]
big_ts = tskit.load_text(nodes, edges, strict=False)
assert big_ts.num_trees == 1 + len(true_trees)
assert big_ts.num_nodes == 16
ts, node_map = big_ts.simplify(map_nodes=True)
assert list(node_map[:6]) == list(range(6))
assert ts.sample_size == 6
assert ts.num_nodes == 13
def test_ancestral_samples(self):
# Check that specifying samples to be not at time 0.0 works.
#
# 1.0 7
# 0.7 / \ 8 6
# / \ / \ / \
# 0.5 / 5 / 5 / 5
# / / \ / / \ / / \
# 0.4 / / 4 / / 4 / / 4
# / / / \ / / / \ / / / \
# 0.2 / / 3 \ 3 / / \ / / 3 \
# / / * \ * / / \ / / * \
# 0.0 0 1 2 1 0 2 0 1 2
# * * * * * *
# (0.0, 0.2), (0.2, 0.8), (0.8, 1.0)
#
# Simplified, keeping [1,2,3]
#
# 1.0
# 0.7 5
# / \
# 0.5 4 / 4 4
# / \ / / \ / \
# 0.4 / 3 / / 3 / 3
# / / \ / / \ / / \
# 0.2 / 2 \ 2 / \ / 2 \
# / * \ * / \ / * \
# 0.0 0 1 0 1 0 1
# * * * * * *
# (0.0, 0.2), (0.2, 0.8), (0.8, 1.0)
nodes = io.StringIO(
"""\
id is_sample time
0 0 0
1 1 0
2 1 0
3 1 0.2
4 0 0.4
5 0 0.5
6 0 0.7
7 0 1.0
8 0 0.8
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.2 4 2,3
0.2 0.8 4 0,2
0.8 1.0 4 2,3
0.0 1.0 5 1,4
0.8 1.0 6 0,5
0.2 0.8 8 3,5
0.0 0.2 7 0,5
"""
)
first_ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
ts, node_map = first_ts.simplify(map_nodes=True)
true_trees = [
{0: 7, 1: 5, 2: 4, 3: 4, 4: 5, 5: 7, 6: -1, 7: -1},
{0: 4, 1: 5, 2: 4, 3: 8, 4: 5, 5: 8, 6: -1, 7: -1},
{0: 6, 1: 5, 2: 4, 3: 4, 4: 5, 5: 6, 6: -1, 7: -1},
]
# maps [1,2,3] -> [0,1,2]
assert node_map[1] == 0
assert node_map[2] == 1
assert node_map[3] == 2
true_simplified_trees = [
{0: 4, 1: 3, 2: 3, 3: 4},
{0: 4, 1: 4, 2: 5, 4: 5},
{0: 4, 1: 3, 2: 3, 3: 4},
]
assert first_ts.sample_size == 3
assert ts.sample_size == 3
assert first_ts.num_trees == 3
assert ts.num_trees == 3
assert first_ts.num_nodes == 9
assert ts.num_nodes == 6
assert first_ts.node(3).time == 0.2
assert ts.node(2).time == 0.2
# check topologies agree:
tree_dicts = [t.parent_dict for t in first_ts.trees()]
for a, t in zip(true_trees, tree_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
tree_simplified_dicts = [t.parent_dict for t in ts.trees()]
for a, t in zip(true_simplified_trees, tree_simplified_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
# check .simplify() works here
self.verify_simplify_topology(first_ts, [1, 2, 3])
def test_all_ancestral_samples(self):
# Check that specifying samples all to be not at time 0.0 works.
#
# 1.0 7
# 0.7 / \ 8 6
# / \ / \ / \
# 0.5 / 5 / 5 / 5
# / / \ / / \ / / \
# 0.4 / / 4 / / 4 / / 4
# / / / \ / / / \ / / / \
# 0.2 / / 3 \ 3 / / \ / / 3 \
# / 1 * 2 * 1 / 2 / 1 * 2
# 0.0 0 * * * 0 * 0 * *
#
# (0.0, 0.2), (0.2, 0.8), (0.8, 1.0)
nodes = io.StringIO(
"""\
id is_sample time
0 0 0
1 1 0.1
2 1 0.1
3 1 0.2
4 0 0.4
5 0 0.5
6 0 0.7
7 0 1.0
8 0 0.8
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.2 4 2,3
0.2 0.8 4 0,2
0.8 1.0 4 2,3
0.0 1.0 5 1,4
0.8 1.0 6 0,5
0.2 0.8 8 3,5
0.0 0.2 7 0,5
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
true_trees = [
{0: 7, 1: 5, 2: 4, 3: 4, 4: 5, 5: 7, 6: -1, 7: -1},
{0: 4, 1: 5, 2: 4, 3: 8, 4: 5, 5: 8, 6: -1, 7: -1},
{0: 6, 1: 5, 2: 4, 3: 4, 4: 5, 5: 6, 6: -1, 7: -1},
]
assert ts.sample_size == 3
assert ts.num_trees == 3
assert ts.num_nodes == 9
assert ts.node(0).time == 0.0
assert ts.node(1).time == 0.1
assert ts.node(2).time == 0.1
assert ts.node(3).time == 0.2
# check topologies agree:
tree_dicts = [t.parent_dict for t in ts.trees()]
for a, t in zip(true_trees, tree_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
# check .simplify() works here
self.verify_simplify_topology(ts, [1, 2, 3])
def test_internal_sampled_node(self):
# 1.0 7
# 0.7 / \ 8 6
# / \ / \ / \
# 0.5 / 5 / 5 / 5
# / /*\ / /*\ / /*\
# 0.4 / / 4 / / 4 / / 4
# / / / \ / / / \ / / / \
# 0.2 / / 3 \ 3 / / \ / / 3 \
# / 1 * 2 * 1 / 2 / 1 * 2
# 0.0 0 * * * 0 * 0 * *
#
# (0.0, 0.2), (0.2, 0.8), (0.8, 1.0)
nodes = io.StringIO(
"""\
id is_sample time
0 0 0
1 1 0.1
2 1 0.1
3 1 0.2
4 0 0.4
5 1 0.5
6 0 0.7
7 0 1.0
8 0 0.8
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.2 4 2,3
0.2 0.8 4 0,2
0.8 1.0 4 2,3
0.0 1.0 5 1,4
0.8 1.0 6 0,5
0.2 0.8 8 3,5
0.0 0.2 7 0,5
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
true_trees = [
{0: 7, 1: 5, 2: 4, 3: 4, 4: 5, 5: 7, 6: -1, 7: -1},
{0: 4, 1: 5, 2: 4, 3: 8, 4: 5, 5: 8, 6: -1, 7: -1},
{0: 6, 1: 5, 2: 4, 3: 4, 4: 5, 5: 6, 6: -1, 7: -1},
]
assert ts.sample_size == 4
assert ts.num_trees == 3
assert ts.num_nodes == 9
assert ts.node(0).time == 0.0
assert ts.node(1).time == 0.1
assert ts.node(2).time == 0.1
assert ts.node(3).time == 0.2
# check topologies agree:
tree_dicts = [t.parent_dict for t in ts.trees()]
for a, t in zip(true_trees, tree_dicts):
for k in a.keys():
if k in t.keys():
assert t[k] == a[k]
else:
assert a[k] == tskit.NULL
# check .simplify() works here
self.verify_simplify_topology(ts, [1, 2, 3])
self.check_num_samples(
ts,
[
(0, 5, 4),
(0, 2, 1),
(0, 7, 4),
(0, 4, 2),
(1, 4, 1),
(1, 5, 3),
(1, 8, 4),
(1, 0, 0),
(2, 5, 4),
(2, 1, 1),
],
)
self.check_num_tracked_samples(
ts,
[1, 2, 5],
[
(0, 5, 3),
(0, 2, 1),
(0, 7, 3),
(0, 4, 1),
(1, 4, 1),
(1, 5, 3),
(1, 8, 3),
(1, 0, 0),
(2, 5, 3),
(2, 1, 1),
],
)
self.check_sample_iterator(
ts,
[
(0, 0, []),
(0, 5, [5, 1, 2, 3]),
(0, 4, [2, 3]),
(1, 5, [5, 1, 2]),
(2, 4, [2, 3]),
],
)
# pedantically check the Tree methods on the second tree
tst = ts.trees()
t = next(tst)
t = next(tst)
assert t.branch_length(1) == 0.4
assert not t.is_internal(0)
assert t.is_leaf(0)
assert not t.is_sample(0)
assert not t.is_internal(1)
assert t.is_leaf(1)
assert t.is_sample(1)
assert t.is_internal(5)
assert not t.is_leaf(5)
assert t.is_sample(5)
assert t.is_internal(4)
assert not t.is_leaf(4)
assert not t.is_sample(4)
assert t.root == 8
assert t.mrca(0, 1) == 5
assert t.sample_size == 4
class TestBadTrees:
"""
Tests for bad tree sequence topologies that can only be detected when we
try to create trees.
"""
def test_simplest_contradictory_children(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 1.0 2 0
0.0 1.0 3 0
"""
)
with pytest.raises(_tskit.LibraryError):
tskit.load_text(nodes=nodes, edges=edges, strict=False)
def test_partial_overlap_contradictory_children(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 1.0 2 0,1
0.5 1.0 3 0
"""
)
with pytest.raises(_tskit.LibraryError):
tskit.load_text(nodes=nodes, edges=edges, strict=False)
class TestCoiteration:
"""
Test ability to iterate over multiple (currently 2) tree sequences simultaneously
"""
def test_identical_ts(self):
ts = msprime.simulate(4, recombination_rate=1, random_seed=123)
assert ts.num_trees > 1
total_iterations = 0
for tree, (_, t1, t2) in zip(ts.trees(), ts.coiterate(ts)):
total_iterations += 1
assert tree == t1 == t2
assert ts.num_trees == total_iterations
def test_intervals(self):
ts1 = msprime.simulate(4, recombination_rate=1, random_seed=1)
assert ts1.num_trees > 1
one_tree_ts = msprime.simulate(5, random_seed=2)
multi_tree_ts = msprime.simulate(5, recombination_rate=1, random_seed=2)
assert multi_tree_ts.num_trees > 1
for ts2 in (one_tree_ts, multi_tree_ts):
bp1 = set(ts1.breakpoints())
bp2 = set(ts2.breakpoints())
assert bp1 != bp2
breaks = set()
for interval, t1, t2 in ts1.coiterate(ts2):
assert set(interval) <= set(t1.interval) | set(t2.interval)
breaks.add(interval.left)
breaks.add(interval.right)
assert t1.tree_sequence == ts1
assert t2.tree_sequence == ts2
assert breaks == bp1 | bp2
def test_simple_ts(self):
nodes = """\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 1
4 0 2
"""
edges1 = """\
left right parent child
0 0.2 3 0,1
0 0.2 4 2,3
0.2 1 3 2,1
0.2 1 4 0,3
"""
edges2 = """\
left right parent child
0 0.8 3 2,1
0 0.8 4 0,3
0.8 1 3 0,1
0.8 1 4 2,3
"""
ts1 = tskit.load_text(io.StringIO(nodes), io.StringIO(edges1), strict=False)
ts2 = tskit.load_text(io.StringIO(nodes), io.StringIO(edges2), strict=False)
coiterator = ts1.coiterate(ts2)
interval, tree1, tree2 = next(coiterator)
assert interval.left == 0
assert interval.right == 0.2
assert tree1 == ts1.at_index(0)
assert tree2 == ts2.at_index(0)
interval, tree1, tree2 = next(coiterator)
assert interval.left == 0.2
assert interval.right == 0.8
assert tree1 == ts1.at_index(1)
assert tree2 == ts2.at_index(0)
interval, tree1, tree2 = next(coiterator)
assert interval.left == 0.8
assert interval.right == 1
assert tree1 == ts1.at_index(1)
assert tree2 == ts2.at_index(1)
def test_nonequal_lengths(self):
ts1 = msprime.simulate(4, random_seed=1, length=2)
ts2 = msprime.simulate(4, random_seed=1)
with pytest.raises(ValueError, match="equal sequence length"):
next(ts1.coiterate(ts2))
def test_kwargs(self):
ts = msprime.simulate(4, recombination_rate=1, random_seed=123)
for _, t1, t2 in ts.coiterate(ts):
assert t1.num_tracked_samples() == t2.num_tracked_samples() == 0
for _, t1, t2 in ts.coiterate(ts, tracked_samples=ts.samples()):
assert t1.num_tracked_samples() == t2.num_tracked_samples() == 4
class SimplifyTestBase:
"""
Base class for simplify tests.
"""
def do_simplify(
self,
ts,
samples=None,
compare_lib=True,
filter_sites=True,
filter_populations=True,
filter_individuals=True,
keep_unary=False,
keep_input_roots=False,
):
"""
Runs the Python test implementation of simplify.
"""
if samples is None:
samples = ts.samples()
s = tests.Simplifier(
ts,
samples,
filter_sites=filter_sites,
filter_populations=filter_populations,
filter_individuals=filter_individuals,
keep_unary=keep_unary,
keep_input_roots=keep_input_roots,
)
new_ts, node_map = s.simplify()
if compare_lib:
sts, lib_node_map1 = ts.simplify(
samples,
filter_sites=filter_sites,
filter_individuals=filter_individuals,
filter_populations=filter_populations,
keep_unary=keep_unary,
keep_input_roots=keep_input_roots,
map_nodes=True,
)
lib_tables1 = sts.dump_tables()
lib_tables2 = ts.dump_tables()
lib_node_map2 = lib_tables2.simplify(
samples,
filter_sites=filter_sites,
keep_unary=keep_unary,
keep_input_roots=keep_input_roots,
filter_individuals=filter_individuals,
filter_populations=filter_populations,
)
py_tables = new_ts.dump_tables()
for lib_tables, lib_node_map in [
(lib_tables1, lib_node_map1),
(lib_tables2, lib_node_map2),
]:
assert lib_tables.nodes == py_tables.nodes
assert lib_tables.edges == py_tables.edges
assert lib_tables.migrations == py_tables.migrations
assert lib_tables.sites == py_tables.sites
assert lib_tables.mutations == py_tables.mutations
assert lib_tables.individuals == py_tables.individuals
assert lib_tables.populations == py_tables.populations
assert all(node_map == lib_node_map)
return new_ts, node_map
class TestSimplify(SimplifyTestBase):
"""
Tests that the implementations of simplify() do what they are supposed to.
"""
random_seed = 23
#
# 8
# / \
# / \
# / \
# 7 \
# / \ 6
# / 5 / \
# / / \ / \
# 4 0 1 2 3
small_tree_ex_nodes = """\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 1 0 0.00000000000000
3 1 0 0.00000000000000
4 1 0 0.00000000000000
5 0 0 0.14567111023387
6 0 0 0.21385545626353
7 0 0 0.43508024345063
8 0 0 1.60156352971203
"""
small_tree_ex_edges = """\
id left right parent child
0 0.00000000 1.00000000 5 0,1
1 0.00000000 1.00000000 6 2,3
2 0.00000000 1.00000000 7 4,5
3 0.00000000 1.00000000 8 6,7
"""
def verify_no_samples(self, ts, keep_unary=False):
"""
Zero out the flags column and verify that we get back the correct
tree sequence when we run simplify.
"""
t1 = ts.dump_tables()
t1.nodes.flags = np.zeros_like(t1.nodes.flags)
ts1, node_map1 = self.do_simplify(
ts, samples=ts.samples(), keep_unary=keep_unary
)
t1 = ts1.dump_tables()
ts2, node_map2 = self.do_simplify(ts, keep_unary=keep_unary)
t2 = ts2.dump_tables()
t1.assert_equals(t2)
def verify_single_childified(self, ts, keep_unary=False):
"""
Modify the specified tree sequence so that it has lots of unary
nodes. Run simplify and verify we get the same tree sequence back
if keep_unary is False. If keep_unary is True, the simplication
won't do anything to the original treeSequence.
"""
ts_single = tsutil.single_childify(ts)
tss, node_map = self.do_simplify(ts_single, keep_unary=keep_unary)
# All original nodes should still be present.
for u in range(ts.num_samples):
assert u == node_map[u]
# All introduced nodes should be mapped to null.
for u in range(ts.num_samples, ts_single.num_samples):
assert node_map[u] == tskit.NULL
t1 = ts.dump_tables()
t2 = tss.dump_tables()
t3 = ts_single.dump_tables()
if keep_unary:
assert set(t3.nodes.time) == set(t2.nodes.time)
assert len(t3.edges) == len(t2.edges)
assert t3.sites == t2.sites
assert len(t3.mutations) == len(t2.mutations)
else:
assert t1.nodes == t2.nodes
assert t1.edges == t2.edges
assert t1.sites == t2.sites
assert t1.mutations == t2.mutations
def verify_multiroot_internal_samples(self, ts, keep_unary=False):
ts_multiroot = tsutil.decapitate(ts, ts.num_edges // 2)
ts1 = tsutil.jiggle_samples(ts_multiroot)
ts2, node_map = self.do_simplify(ts1, keep_unary=keep_unary)
assert ts1.num_trees >= ts2.num_trees
trees2 = ts2.trees()
t2 = next(trees2)
for t1 in ts1.trees():
assert t2.interval.left <= t1.interval.left
assert t2.interval.right >= t1.interval.right
pairs = itertools.combinations(ts1.samples(), 2)
for pair in pairs:
mapped_pair = [node_map[u] for u in pair]
mrca1 = t1.get_mrca(*pair)
mrca2 = t2.get_mrca(*mapped_pair)
if mrca1 == tskit.NULL:
assert mrca2 == tskit.NULL
else:
assert node_map[mrca1] == mrca2
if t2.interval.right == t1.interval.right:
t2 = next(trees2, None)
def test_single_tree(self):
ts = msprime.simulate(10, random_seed=self.random_seed)
self.verify_no_samples(ts)
self.verify_single_childified(ts)
self.verify_multiroot_internal_samples(ts)
# Now with keep_unary=True.
self.verify_no_samples(ts, keep_unary=True)
self.verify_single_childified(ts, keep_unary=True)
self.verify_multiroot_internal_samples(ts, keep_unary=True)
def test_single_tree_mutations(self):
ts = msprime.simulate(10, mutation_rate=1, random_seed=self.random_seed)
assert ts.num_sites > 1
self.do_simplify(ts)
self.verify_single_childified(ts)
# Also with keep_unary == True.
self.do_simplify(ts, keep_unary=True)
self.verify_single_childified(ts, keep_unary=True)
def test_many_trees_mutations(self):
ts = msprime.simulate(
10, recombination_rate=1, mutation_rate=10, random_seed=self.random_seed
)
assert ts.num_trees > 2
assert ts.num_sites > 2
self.verify_no_samples(ts)
self.do_simplify(ts)
self.verify_single_childified(ts)
# Also with keep_unary == True.
self.do_simplify(ts, keep_unary=True)
self.verify_single_childified(ts, keep_unary=True)
def test_many_trees(self):
ts = msprime.simulate(5, recombination_rate=4, random_seed=self.random_seed)
assert ts.num_trees > 2
self.verify_no_samples(ts)
self.verify_single_childified(ts)
self.verify_multiroot_internal_samples(ts)
# Also with keep_unary == True.
self.verify_no_samples(ts, keep_unary=True)
self.verify_single_childified(ts, keep_unary=True)
self.verify_multiroot_internal_samples(ts, keep_unary=True)
def test_small_tree_internal_samples(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
tables = ts.dump_tables()
nodes = tables.nodes
flags = nodes.flags
# The parent of samples 0 and 1 is 5. Change this to an internal sample
# and set 0 and 1 to be unsampled.
flags[0] = 0
flags[0] = 0
flags[5] = tskit.NODE_IS_SAMPLE
nodes.flags = flags
ts = tables.tree_sequence()
assert ts.sample_size == 5
tss, node_map = self.do_simplify(ts, [3, 5])
assert node_map[3] == 0
assert node_map[5] == 1
assert tss.num_nodes == 3
assert tss.num_edges == 2
self.verify_no_samples(ts)
# with keep_unary == True
tss, node_map = self.do_simplify(ts, [3, 5], keep_unary=True)
assert node_map[3] == 0
assert node_map[5] == 1
assert tss.num_nodes == 5
assert tss.num_edges == 4
self.verify_no_samples(ts, keep_unary=True)
def test_small_tree_linear_samples(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
tables = ts.dump_tables()
nodes = tables.nodes
flags = nodes.flags
# 7 is above 0. These are the only two samples
flags[:] = 0
flags[0] = tskit.NODE_IS_SAMPLE
flags[7] = tskit.NODE_IS_SAMPLE
nodes.flags = flags
ts = tables.tree_sequence()
assert ts.sample_size == 2
tss, node_map = self.do_simplify(ts, [0, 7])
assert node_map[0] == 0
assert node_map[7] == 1
assert tss.num_nodes == 2
assert tss.num_edges == 1
t = next(tss.trees())
assert t.parent_dict == {0: 1}
# with keep_unary == True
tss, node_map = self.do_simplify(ts, [0, 7], keep_unary=True)
assert node_map[0] == 0
assert node_map[7] == 1
assert tss.num_nodes == 4
assert tss.num_edges == 3
t = next(tss.trees())
def test_small_tree_internal_and_external_samples(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
tables = ts.dump_tables()
nodes = tables.nodes
flags = nodes.flags
# 7 is above 0 and 1.
flags[:] = 0
flags[0] = tskit.NODE_IS_SAMPLE
flags[1] = tskit.NODE_IS_SAMPLE
flags[7] = tskit.NODE_IS_SAMPLE
nodes.flags = flags
ts = tables.tree_sequence()
assert ts.sample_size == 3
tss, node_map = self.do_simplify(ts, [0, 1, 7])
assert node_map[0] == 0
assert node_map[1] == 1
assert node_map[7] == 2
assert tss.num_nodes == 4
assert tss.num_edges == 3
t = next(tss.trees())
assert t.parent_dict == {0: 3, 1: 3, 3: 2}
# with keep_unary == True
tss, node_map = self.do_simplify(ts, [0, 1, 7], keep_unary=True)
assert node_map[0] == 0
assert node_map[1] == 1
assert node_map[7] == 2
assert tss.num_nodes == 5
assert tss.num_edges == 4
t = next(tss.trees())
assert t.parent_dict == {0: 3, 1: 3, 3: 2, 2: 4}
def test_small_tree_mutations(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
tables = ts.dump_tables()
# Add some simple mutations here above the nodes we're keeping.
tables.sites.add_row(position=0.25, ancestral_state="0")
tables.sites.add_row(position=0.5, ancestral_state="0")
tables.sites.add_row(position=0.75, ancestral_state="0")
tables.sites.add_row(position=0.8, ancestral_state="0")
tables.mutations.add_row(site=0, node=0, derived_state="1")
tables.mutations.add_row(site=1, node=2, derived_state="1")
tables.mutations.add_row(site=2, node=7, derived_state="1")
tables.mutations.add_row(site=3, node=0, derived_state="1")
ts = tables.tree_sequence()
assert ts.num_sites == 4
assert ts.num_mutations == 4
for keep in [True, False]:
tss = self.do_simplify(ts, [0, 2], keep_unary=keep)[0]
assert tss.sample_size == 2
assert tss.num_mutations == 4
assert list(tss.haplotypes()) == ["1011", "0100"]
def test_small_tree_filter_zero_mutations(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
ts = tsutil.insert_branch_sites(ts)
assert ts.num_sites == 8
assert ts.num_mutations == 8
for keep in [True, False]:
tss, _ = self.do_simplify(ts, [4, 0, 1], filter_sites=True, keep_unary=keep)
assert tss.num_sites == 5
assert tss.num_mutations == 5
tss, _ = self.do_simplify(
ts, [4, 0, 1], filter_sites=False, keep_unary=keep
)
assert tss.num_sites == 8
assert tss.num_mutations == 5
def test_small_tree_fixed_sites(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
tables = ts.dump_tables()
# Add some simple mutations that will be fixed after simplify
tables.sites.add_row(position=0.25, ancestral_state="0")
tables.sites.add_row(position=0.5, ancestral_state="0")
tables.sites.add_row(position=0.75, ancestral_state="0")
tables.mutations.add_row(site=0, node=2, derived_state="1")
tables.mutations.add_row(site=1, node=3, derived_state="1")
tables.mutations.add_row(site=2, node=6, derived_state="1")
ts = tables.tree_sequence()
assert ts.num_sites == 3
assert ts.num_mutations == 3
for keep in [True, False]:
tss, _ = self.do_simplify(ts, [4, 1], keep_unary=keep)
assert tss.sample_size == 2
assert tss.num_mutations == 0
assert list(tss.haplotypes()) == ["", ""]
def test_small_tree_mutations_over_root(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
tables = ts.dump_tables()
tables.sites.add_row(position=0.25, ancestral_state="0")
tables.mutations.add_row(site=0, node=8, derived_state="1")
ts = tables.tree_sequence()
assert ts.num_sites == 1
assert ts.num_mutations == 1
for keep_unary, filter_sites in itertools.product([True, False], repeat=2):
tss, _ = self.do_simplify(
ts, [0, 1], filter_sites=filter_sites, keep_unary=keep_unary
)
assert tss.num_sites == 1
assert tss.num_mutations == 1
def test_small_tree_recurrent_mutations(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
tables = ts.dump_tables()
# Add recurrent mutation on the root branches
tables.sites.add_row(position=0.25, ancestral_state="0")
tables.mutations.add_row(site=0, node=6, derived_state="1")
tables.mutations.add_row(site=0, node=7, derived_state="1")
ts = tables.tree_sequence()
assert ts.num_sites == 1
assert ts.num_mutations == 2
for keep in [True, False]:
tss = self.do_simplify(ts, [4, 3], keep_unary=keep)[0]
assert tss.sample_size == 2
assert tss.num_sites == 1
assert tss.num_mutations == 2
assert list(tss.haplotypes()) == ["1", "1"]
def test_small_tree_back_mutations(self):
ts = tskit.load_text(
nodes=io.StringIO(self.small_tree_ex_nodes),
edges=io.StringIO(self.small_tree_ex_edges),
strict=False,
)
tables = ts.dump_tables()
# Add a chain of mutations
tables.sites.add_row(position=0.25, ancestral_state="0")
tables.mutations.add_row(site=0, node=7, derived_state="1")
tables.mutations.add_row(site=0, node=5, derived_state="0")
tables.mutations.add_row(site=0, node=1, derived_state="1")
ts = tables.tree_sequence()
assert ts.num_sites == 1
assert ts.num_mutations == 3
assert list(ts.haplotypes()) == ["0", "1", "0", "0", "1"]
# First check if we simplify for all samples and keep original state.
for keep in [True, False]:
tss = self.do_simplify(ts, [0, 1, 2, 3, 4], keep_unary=keep)[0]
assert tss.sample_size == 5
assert tss.num_sites == 1
assert tss.num_mutations == 3
assert list(tss.haplotypes()) == ["0", "1", "0", "0", "1"]
# The ancestral state above 5 should be 0.
for keep in [True, False]:
tss = self.do_simplify(ts, [0, 1], keep_unary=keep)[0]
assert tss.sample_size == 2
assert tss.num_sites == 1
assert tss.num_mutations == 3
assert list(tss.haplotypes()) == ["0", "1"]
# The ancestral state above 7 should be 1.
for keep in [True, False]:
tss = self.do_simplify(ts, [4, 0, 1], keep_unary=keep)[0]
assert tss.sample_size == 3
assert tss.num_sites == 1
assert tss.num_mutations == 3
assert list(tss.haplotypes()) == ["1", "0", "1"]
def test_overlapping_unary_edges(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 2 2 0
1 3 2 1
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
assert ts.sample_size == 2
assert ts.num_trees == 3
assert ts.sequence_length == 3
for keep in [True, False]:
tss, node_map = self.do_simplify(ts, samples=[0, 1, 2], keep_unary=keep)
assert list(node_map) == [0, 1, 2]
trees = [{0: 2}, {0: 2, 1: 2}, {1: 2}]
for t in tss.trees():
assert t.parent_dict == trees[t.index]
def test_overlapping_unary_edges_internal_samples(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 2 2 0
1 3 2 1
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
assert ts.sample_size == 3
assert ts.num_trees == 3
trees = [{0: 2}, {0: 2, 1: 2}, {1: 2}]
for t in ts.trees():
assert t.parent_dict == trees[t.index]
tss, node_map = self.do_simplify(ts)
assert list(node_map) == [0, 1, 2]
def test_isolated_samples(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 1
2 1 2
"""
)
edges = io.StringIO(
"""\
left right parent child
"""
)
ts = tskit.load_text(nodes, edges, sequence_length=1, strict=False)
assert ts.num_samples == 3
assert ts.num_trees == 1
assert ts.num_nodes == 3
for keep in [True, False]:
tss, node_map = self.do_simplify(ts, keep_unary=keep)
assert ts.tables.nodes == tss.tables.nodes
assert ts.tables.edges == tss.tables.edges
assert list(node_map) == [0, 1, 2]
def test_internal_samples(self):
nodes = io.StringIO(
"""\
id is_sample population time
0 1 -1 1.00000000000000
1 0 -1 1.00000000000000
2 1 -1 1.00000000000000
3 0 -1 1.31203521181726
4 0 -1 2.26776380586006
5 1 -1 0.00000000000000
6 0 -1 0.50000000000000
7 0 -1 1.50000000000000
"""
)
edges = io.StringIO(
"""\
id left right parent child
0 0.62185118 1.00000000 1 6
1 0.00000000 0.62185118 2 6
2 0.00000000 1.00000000 3 0,2
3 0.00000000 1.00000000 4 7,3
4 0.00000000 1.00000000 6 5
5 0.00000000 1.00000000 7 1
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
tss, node_map = self.do_simplify(ts, [5, 2, 0])
assert node_map[0] == 2
assert node_map[1] == -1
assert node_map[2] == 1
assert node_map[3] == 3
assert node_map[4] == 4
assert node_map[5] == 0
assert node_map[6] == -1
assert node_map[7] == -1
assert tss.sample_size == 3
assert tss.num_trees == 2
trees = [{0: 1, 1: 3, 2: 3}, {0: 4, 1: 3, 2: 3, 3: 4}]
for t in tss.trees():
assert t.parent_dict == trees[t.index]
# with keep_unary == True
tss, node_map = self.do_simplify(ts, [5, 2, 0], keep_unary=True)
assert node_map[0] == 2
assert node_map[1] == 4
assert node_map[2] == 1
assert node_map[3] == 5
assert node_map[4] == 7
assert node_map[5] == 0
assert node_map[6] == 3
assert node_map[7] == 6
assert tss.sample_size == 3
assert tss.num_trees == 2
trees = [
{0: 3, 1: 5, 2: 5, 3: 1, 5: 7},
{0: 3, 1: 5, 2: 5, 3: 4, 4: 6, 5: 7, 6: 7},
]
for t in tss.trees():
assert t.parent_dict == trees[t.index]
def test_many_mutations_over_single_sample_ancestral_state(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 1 0
"""
)
sites = io.StringIO(
"""\
position ancestral_state
0 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state parent
0 0 1 -1
0 0 0 0
"""
)
ts = tskit.load_text(
nodes, edges, sites=sites, mutations=mutations, strict=False
)
assert ts.sample_size == 1
assert ts.num_trees == 1
assert ts.num_sites == 1
assert ts.num_mutations == 2
for keep in [True, False]:
tss, node_map = self.do_simplify(ts, keep_unary=keep)
assert tss.num_sites == 1
assert tss.num_mutations == 2
assert list(tss.haplotypes(isolated_as_missing=False)) == ["0"]
def test_many_mutations_over_single_sample_derived_state(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 1 0
"""
)
sites = io.StringIO(
"""\
position ancestral_state
0 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state parent
0 0 1 -1
0 0 0 0
0 0 1 1
"""
)
ts = tskit.load_text(
nodes, edges, sites=sites, mutations=mutations, strict=False
)
assert ts.sample_size == 1
assert ts.num_trees == 1
assert ts.num_sites == 1
assert ts.num_mutations == 3
for keep in [True, False]:
tss, node_map = self.do_simplify(ts, keep_unary=keep)
assert tss.num_sites == 1
assert tss.num_mutations == 3
assert list(tss.haplotypes(isolated_as_missing=False)) == ["1"]
def test_many_trees_filter_zero_mutations(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
ts = tsutil.insert_branch_sites(ts)
assert ts.num_sites == ts.num_mutations
assert ts.num_sites > ts.num_trees
for keep in [True, False]:
for filter_sites in [True, False]:
tss, _ = self.do_simplify(
ts, samples=None, filter_sites=filter_sites, keep_unary=keep
)
assert ts.num_sites == tss.num_sites
assert ts.num_mutations == tss.num_mutations
def test_many_trees_filter_zero_multichar_mutations(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
ts = tsutil.insert_multichar_mutations(ts)
assert ts.num_sites == ts.num_trees
assert ts.num_mutations == ts.num_trees
for keep in [True, False]:
for filter_sites in [True, False]:
tss, _ = self.do_simplify(
ts, samples=None, filter_sites=filter_sites, keep_unary=keep
)
assert ts.num_sites == tss.num_sites
assert ts.num_mutations == tss.num_mutations
def test_simple_population_filter(self):
ts = msprime.simulate(10, random_seed=2)
tables = ts.dump_tables()
tables.populations.add_row(metadata=b"unreferenced")
assert len(tables.populations) == 2
for keep in [True, False]:
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_populations=True, keep_unary=keep
)
assert tss.num_populations == 1
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_populations=False, keep_unary=keep
)
assert tss.num_populations == 2
def test_interleaved_populations_filter(self):
ts = msprime.simulate(
population_configurations=[
msprime.PopulationConfiguration(),
msprime.PopulationConfiguration(10),
msprime.PopulationConfiguration(),
msprime.PopulationConfiguration(),
],
random_seed=2,
)
assert ts.num_populations == 4
tables = ts.dump_tables()
# Edit the populations so we can identify the rows.
tables.populations.clear()
for j in range(4):
tables.populations.add_row(metadata=bytes([j]))
ts = tables.tree_sequence()
id_map = np.array([-1, 0, -1, -1], dtype=np.int32)
for keep in [True, False]:
tss, _ = self.do_simplify(ts, filter_populations=True, keep_unary=keep)
assert tss.num_populations == 1
population = tss.population(0)
assert population.metadata == bytes([1])
assert np.array_equal(
id_map[ts.tables.nodes.population], tss.tables.nodes.population
)
tss, _ = self.do_simplify(ts, filter_populations=False, keep_unary=keep)
assert tss.num_populations == 4
def test_removed_node_population_filter(self):
tables = tskit.TableCollection(1)
tables.populations.add_row(metadata=bytes(0))
tables.populations.add_row(metadata=bytes(1))
tables.populations.add_row(metadata=bytes(2))
tables.nodes.add_row(flags=1, population=0)
# Because flags=0 here, this node will be simplified out and the node
# will disappear.
tables.nodes.add_row(flags=0, population=1)
tables.nodes.add_row(flags=1, population=2)
for keep in [True, False]:
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_populations=True, keep_unary=keep
)
assert tss.num_nodes == 2
assert tss.num_populations == 2
assert tss.population(0).metadata == bytes(0)
assert tss.population(1).metadata == bytes(2)
assert tss.node(0).population == 0
assert tss.node(1).population == 1
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_populations=False, keep_unary=keep
)
assert tss.tables.populations == tables.populations
def test_simple_individual_filter(self):
tables = tskit.TableCollection(1)
tables.individuals.add_row(flags=0)
tables.individuals.add_row(flags=1)
tables.nodes.add_row(flags=1, individual=0)
tables.nodes.add_row(flags=1, individual=0)
for keep in [True, False]:
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_individuals=True, keep_unary=keep
)
assert tss.num_nodes == 2
assert tss.num_individuals == 1
assert tss.individual(0).flags == 0
tss, _ = self.do_simplify(tables.tree_sequence(), filter_individuals=False)
assert tss.tables.individuals == tables.individuals
def test_interleaved_individual_filter(self):
tables = tskit.TableCollection(1)
tables.individuals.add_row(flags=0)
tables.individuals.add_row(flags=1)
tables.individuals.add_row(flags=2)
tables.nodes.add_row(flags=1, individual=1)
tables.nodes.add_row(flags=1, individual=-1)
tables.nodes.add_row(flags=1, individual=1)
for keep in [True, False]:
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_individuals=True, keep_unary=keep
)
assert tss.num_nodes == 3
assert tss.num_individuals == 1
assert tss.individual(0).flags == 1
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_individuals=False, keep_unary=keep
)
assert tss.tables.individuals == tables.individuals
def test_removed_node_individual_filter(self):
tables = tskit.TableCollection(1)
tables.individuals.add_row(flags=0)
tables.individuals.add_row(flags=1)
tables.individuals.add_row(flags=2)
tables.nodes.add_row(flags=1, individual=0)
# Because flags=0 here, this node will be simplified out and the node
# will disappear.
tables.nodes.add_row(flags=0, individual=1)
tables.nodes.add_row(flags=1, individual=2)
for keep in [True, False]:
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_individuals=True, keep_unary=keep
)
assert tss.num_nodes == 2
assert tss.num_individuals == 2
assert tss.individual(0).flags == 0
assert tss.individual(1).flags == 2
assert tss.node(0).individual == 0
assert tss.node(1).individual == 1
tss, _ = self.do_simplify(
tables.tree_sequence(), filter_individuals=False, keep_unary=keep
)
assert tss.tables.individuals == tables.individuals
def verify_simplify_haplotypes(self, ts, samples, keep_unary=False):
sub_ts, node_map = self.do_simplify(
ts, samples, filter_sites=False, keep_unary=keep_unary
)
assert ts.num_sites == sub_ts.num_sites
sub_haplotypes = list(sub_ts.haplotypes(isolated_as_missing=False))
all_samples = list(ts.samples())
k = 0
for j, h in enumerate(ts.haplotypes(isolated_as_missing=False)):
if k == len(samples):
break
if samples[k] == all_samples[j]:
assert h == sub_haplotypes[k]
k += 1
def test_single_tree_recurrent_mutations(self):
ts = msprime.simulate(6, random_seed=10)
for mutations_per_branch in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
for keep in [True, False]:
self.verify_simplify_haplotypes(ts, samples, keep_unary=keep)
def test_many_trees_recurrent_mutations(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
for mutations_per_branch in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
for keep in [True, False]:
self.verify_simplify_haplotypes(ts, samples, keep_unary=keep)
def test_single_multiroot_tree_recurrent_mutations(self):
ts = msprime.simulate(6, random_seed=10)
ts = tsutil.decapitate(ts, ts.num_edges // 2)
for mutations_per_branch in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
for keep in [True, False]:
self.verify_simplify_haplotypes(ts, samples, keep_unary=keep)
def test_many_multiroot_trees_recurrent_mutations(self):
ts = msprime.simulate(7, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
ts = tsutil.decapitate(ts, ts.num_edges // 2)
for mutations_per_branch in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
for keep in [True, False]:
self.verify_simplify_haplotypes(ts, samples, keep_unary=keep)
def test_single_tree_recurrent_mutations_internal_samples(self):
ts = msprime.simulate(6, random_seed=10)
ts = tsutil.jiggle_samples(ts)
for mutations_per_branch in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
for keep in [True, False]:
self.verify_simplify_haplotypes(ts, samples, keep_unary=keep)
def test_many_trees_recurrent_mutations_internal_samples(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=10)
ts = tsutil.jiggle_samples(ts)
assert ts.num_trees > 3
for mutations_per_branch in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
for keep in [True, False]:
self.verify_simplify_haplotypes(ts, samples, keep_unary=keep)
class TestSimplifyKeepInputRoots(SimplifyTestBase, ExampleTopologyMixin):
"""
Tests for the keep_input_roots option to simplify.
"""
def verify(self, ts):
# Called by the examples in ExampleTopologyMixin
samples = ts.samples()
self.verify_keep_input_roots(ts, samples[:2])
self.verify_keep_input_roots(ts, samples[:3])
self.verify_keep_input_roots(ts, samples[:-1])
self.verify_keep_input_roots(ts, samples)
def verify_keep_input_roots(self, ts, samples):
ts = tsutil.insert_unique_metadata(ts, ["individuals"])
ts_with_roots, node_map = self.do_simplify(
ts, samples, keep_input_roots=True, filter_sites=False, compare_lib=True
)
new_to_input_map = {
value: key for key, value in enumerate(node_map) if value != tskit.NULL
}
for (left, right), input_tree, tree_with_roots in ts.coiterate(ts_with_roots):
input_roots = input_tree.roots
assert len(tree_with_roots.roots) > 0
for root in tree_with_roots.roots:
# Check that the roots in the current
input_root = new_to_input_map[root]
assert input_root in input_roots
input_node = ts.node(input_root)
new_node = ts_with_roots.node(root)
assert new_node.time == input_node.time
assert new_node.population == input_node.population
if new_node.individual == tskit.NULL:
assert new_node.individual == input_node.individual
else:
assert (
ts_with_roots.individual(new_node.individual).metadata
== ts.individual(input_node.individual).metadata
)
assert new_node.metadata == input_node.metadata
# This should only be marked as a sample if it's an
# element of the samples list.
assert new_node.is_sample() == (input_root in samples)
# Find the MRCA of the samples below this root.
root_samples = list(tree_with_roots.samples(root))
mrca = functools.reduce(tree_with_roots.mrca, root_samples)
if mrca != root:
# If the MRCA is not equal to the root, then there should
# be a unary branch joining them.
assert tree_with_roots.parent(mrca) == root
assert tree_with_roots.children(root) == (mrca,)
# Any mutations that were on the path from the old MRCA
# to the root should be mapped to this node, and any mutations
# above the root should still be there.
u = new_to_input_map[mrca]
root_path = []
while u != tskit.NULL:
root_path.append(u)
u = input_tree.parent(u)
input_sites = {
site.position: site
for site in input_tree.sites()
if site.position >= left and site.position < right
}
new_sites = {
site.position: site
for site in tree_with_roots.sites()
if site.position >= left and site.position < right
}
assert set(input_sites.keys()) == set(new_sites.keys())
positions = input_sites.keys()
for position in positions:
assert left <= position < right
new_site = new_sites[position]
# We assume the metadata contains a unique key for each mutation.
new_mutations = {
mut.metadata: mut for mut in new_site.mutations
}
# Just make sure the metadata is actually unique.
assert len(new_mutations) == len(new_site.mutations)
input_site = input_sites[position]
for input_mutation in input_site.mutations:
if input_mutation.node in root_path:
new_node = (
mrca if input_mutation.node != input_root else root
)
# The same mutation should exist and be mapped to
# new_node
new_mutation = new_mutations[input_mutation.metadata]
# We have turned filter sites off, so sites should
# be comparable
assert new_mutation.site == input_mutation.site
assert (
new_mutation.derived_state
== input_mutation.derived_state
)
assert new_mutation.node == new_node
return ts_with_roots
def test_many_trees(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
self.verify_keep_input_roots(ts, samples)
def test_many_trees_internal_samples(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=10)
ts = tsutil.jiggle_samples(ts)
assert ts.num_trees > 3
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
self.verify_keep_input_roots(ts, samples)
def test_many_multiroot_trees(self):
ts = msprime.simulate(7, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
ts = tsutil.decapitate(ts, ts.num_edges // 2)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
self.verify_keep_input_roots(ts, samples)
def test_wright_fisher_unsimplified(self):
num_generations = 10
tables = wf.wf_sim(10, num_generations, deep_history=False, seed=2)
tables.sort()
ts = tables.tree_sequence()
simplified = self.verify_keep_input_roots(ts, ts.samples())
roots = set()
for tree in simplified.trees():
for root in tree.roots:
roots.add(root)
assert tree.time(root) == num_generations
init_nodes = np.where(simplified.tables.nodes.time == num_generations)[0]
assert set(init_nodes) == roots
def test_single_tree_recurrent_mutations(self):
ts = msprime.simulate(6, random_seed=10)
for mutations_per_branch in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
self.verify_keep_input_roots(ts, samples)
def test_many_trees_recurrent_mutations(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=8)
assert ts.num_trees > 2
for mutations_per_branch in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
for num_samples in range(1, ts.num_samples):
for samples in itertools.combinations(ts.samples(), num_samples):
self.verify_keep_input_roots(ts, samples)
class TestMapToAncestors:
"""
Tests the AncestorMap class.
"""
random_seed = 13
#
# 8
# / \
# / \
# / \
# 7 \
# / \ 6
# / 5 / \
# / / \ / \
# 4 0 1 2 3
nodes = """\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 1 0 0.00000000000000
3 1 0 0.00000000000000
4 1 0 0.00000000000000
5 0 0 0.14567111023387
6 0 0 0.21385545626353
7 0 0 0.43508024345063
8 0 0 1.60156352971203
"""
edges = """\
id left right parent child
0 0.00000000 1.00000000 5 0,1
1 0.00000000 1.00000000 6 2,3
2 0.00000000 1.00000000 7 4,5
3 0.00000000 1.00000000 8 6,7
"""
#
# 9 10
# / \ / \
# / \ / 8
# / \ / / \
# 7 \ / / \
# / \ 6 / / 6
# / 5 / \ / 5 / \
# / / \ / \ / / \ / \
# 4 0 1 2 3 4 0 1 2 3
#
# 0 ------------------ 0.5 ------------------ 1.0
nodes0 = """\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 1 0 0.00000000000000
3 1 0 0.00000000000000
4 1 0 0.00000000000000
5 0 0 0.14567111023387
6 0 0 0.21385545626353
7 0 0 0.43508024345063
8 0 0 0.60156352971203
9 0 0 0.90000000000000
10 0 0 1.20000000000000
"""
edges0 = """\
id left right parent child
0 0.00000000 1.00000000 5 0,1
1 0.00000000 1.00000000 6 2,3
2 0.00000000 0.50000000 7 4,5
3 0.50000000 1.00000000 8 5,6
4 0.00000000 0.50000000 9 6,7
5 0.50000000 1.00000000 10 4,8
"""
nodes1 = """\
id is_sample population time
0 0 0 1.0
1 1 0 0.0
2 1 0 0.0
"""
edges1 = """\
id left right parent child
0 0.00000000 1.00000000 0 1,2
"""
def do_map(self, ts, ancestors, samples=None, compare_lib=True):
"""
Runs the Python test implementation of link_ancestors.
"""
if samples is None:
samples = ts.samples()
s = tests.AncestorMap(ts, samples, ancestors)
ancestor_table = s.link_ancestors()
if compare_lib:
lib_result = ts.tables.link_ancestors(samples, ancestors)
assert ancestor_table == lib_result
return ancestor_table
def test_deprecated_name(self):
# copied from test_single_tree_one_ancestor below
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
samples = ts.samples()
ancestors = [8]
s = tests.AncestorMap(ts, samples, ancestors)
tss = s.link_ancestors()
lib_result = ts.tables.map_ancestors(samples, ancestors)
assert tss == lib_result
assert list(tss.parent) == [8, 8, 8, 8, 8]
assert list(tss.child) == [0, 1, 2, 3, 4]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_one_ancestor(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, ancestors=[8])
assert list(tss.parent) == [8, 8, 8, 8, 8]
assert list(tss.child) == [0, 1, 2, 3, 4]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_unordered_nodes(self):
nodes = io.StringIO(self.nodes1)
edges = io.StringIO(self.edges1)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, ancestors=[0])
assert list(tss.parent) == [0, 0]
assert list(tss.child) == [1, 2]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_two_ancestors(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, ancestors=[6, 7])
assert list(tss.parent) == [6, 6, 7, 7, 7]
assert list(tss.child) == [2, 3, 0, 1, 4]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_no_ancestors(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, samples=[2, 3], ancestors=[7])
assert tss.num_rows == 0
def test_single_tree_samples_or_ancestors_not_in_tree(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
with pytest.raises(AssertionError):
self.do_map(ts, samples=[-1, 3], ancestors=[5])
with pytest.raises(AssertionError):
self.do_map(ts, samples=[2, 3], ancestors=[10])
def test_single_tree_ancestors_descend_from_other_ancestors(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, ancestors=[7, 8])
assert list(tss.parent) == [7, 7, 7, 8, 8, 8]
assert list(tss.child) == [0, 1, 4, 2, 3, 7]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_internal_samples(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, samples=[2, 3, 4, 5], ancestors=[7, 8])
assert list(tss.parent) == [7, 7, 8, 8, 8]
assert list(tss.child) == [4, 5, 2, 3, 7]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_samples_and_ancestors_overlap(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, samples=[1, 2, 3, 5], ancestors=[5, 6, 7])
assert list(tss.parent) == [5, 6, 6, 7]
assert list(tss.child) == [1, 2, 3, 5]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_unary_ancestor(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, samples=[1, 2, 4], ancestors=[5, 7, 8])
assert list(tss.parent) == [5, 7, 7, 8, 8]
assert list(tss.child) == [1, 4, 5, 2, 7]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_ancestors_descend_from_samples(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, samples=[1, 7], ancestors=[5, 8])
assert list(tss.parent) == [5, 7, 8]
assert list(tss.child) == [1, 5, 7]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_single_tree_samples_descend_from_samples(self):
nodes = io.StringIO(self.nodes)
edges = io.StringIO(self.edges)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, samples=[3, 6], ancestors=[8])
assert list(tss.parent) == [6, 8]
assert list(tss.child) == [3, 6]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_multiple_trees_to_single_tree(self):
nodes = io.StringIO(self.nodes0)
edges = io.StringIO(self.edges0)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, ancestors=[5, 6])
assert list(tss.parent) == [5, 5, 6, 6]
assert list(tss.child) == [0, 1, 2, 3]
assert all(tss.left) == 0
assert all(tss.right) == 1
def test_multiple_trees_one_ancestor(self):
nodes = io.StringIO(self.nodes0)
edges = io.StringIO(self.edges0)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
tss = self.do_map(ts, ancestors=[9, 10])
assert list(tss.parent) == [9, 9, 9, 9, 9, 10, 10, 10, 10, 10]
assert list(tss.child) == [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
assert all(tss.left) == 0
assert all(tss.right) == 1
def verify(self, ts, sample_nodes, ancestral_nodes):
tss = self.do_map(ts, ancestors=ancestral_nodes, samples=sample_nodes)
# ancestors = list(set(tss.parent))
# Loop through the rows of the ancestral branch table.
current_ancestor = tss.parent[0]
current_descendants = [tss.child[0]]
current_left = tss.left[0]
current_right = tss.right[0]
for _, row in enumerate(tss):
if (
row.parent != current_ancestor
or row.left != current_left
or row.right != current_right
):
# Loop through trees.
for tree in ts.trees():
if tree.interval.left >= current_right:
break
while tree.interval.right <= current_left:
tree.next()
# Check that the most recent ancestor of the descendants is the
# current_ancestor.
current_descendants = list(set(current_descendants))
for des in current_descendants:
par = tree.get_parent(des)
while par not in ancestral_nodes and par not in sample_nodes:
par = tree.get_parent(par)
assert par == current_ancestor
# Reset the current ancestor and descendants, left and right coords.
current_ancestor = row.parent
current_descendants = [row.child]
current_left = row.left
current_right = row.right
else:
# Collate a list of children corresponding to each ancestral node.
current_descendants.append(row.child)
def test_sim_single_coalescent_tree(self):
ts = msprime.simulate(30, random_seed=1, length=10)
ancestors = [3 * n for n in np.arange(0, ts.num_nodes // 3)]
self.verify(ts, ts.samples(), ancestors)
random_samples = [4 * n for n in np.arange(0, ts.num_nodes // 4)]
self.verify(ts, random_samples, ancestors)
def test_sim_coalescent_trees(self):
ts = msprime.simulate(8, recombination_rate=5, random_seed=1, length=2)
ancestors = [3 * n for n in np.arange(0, ts.num_nodes // 3)]
self.verify(ts, ts.samples(), ancestors)
random_samples = [4 * n for n in np.arange(0, ts.num_nodes // 4)]
self.verify(ts, random_samples, ancestors)
def test_sim_coalescent_trees_internal_samples(self):
ts = msprime.simulate(8, recombination_rate=5, random_seed=10, length=2)
assert ts.num_trees > 2
ancestors = [4 * n for n in np.arange(0, ts.num_nodes // 4)]
self.verify(tsutil.jiggle_samples(ts), ts.samples(), ancestors)
random_samples = [4 * n for n in np.arange(0, ts.num_nodes // 4)]
self.verify(tsutil.jiggle_samples(ts), random_samples, ancestors)
def test_sim_many_multiroot_trees(self):
ts = msprime.simulate(7, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
ts = tsutil.decapitate(ts, ts.num_edges // 2)
ancestors = [4 * n for n in np.arange(0, ts.num_nodes // 4)]
self.verify(ts, ts.samples(), ancestors)
random_samples = [4 * n for n in np.arange(0, ts.num_nodes // 4)]
self.verify(ts, random_samples, ancestors)
def test_sim_wright_fisher_generations(self):
number_of_gens = 5
tables = wf.wf_sim(10, number_of_gens, deep_history=False, seed=2)
tables.sort()
ts = tables.tree_sequence()
ancestors = [4 * n for n in np.arange(0, ts.num_nodes // 4)]
self.verify(ts, ts.samples(), ancestors)
for gen in range(1, number_of_gens):
ancestors = [u.id for u in ts.nodes() if u.time == gen]
self.verify(ts, ts.samples(), ancestors)
random_samples = [4 * n for n in np.arange(0, ts.num_nodes // 4)]
self.verify(ts, random_samples, ancestors)
for gen in range(1, number_of_gens):
ancestors = [u.id for u in ts.nodes() if u.time == gen]
self.verify(ts, random_samples, ancestors)
class TestMutationParent:
"""
Tests that mutation parent is correctly specified, and that we correctly
recompute it with compute_mutation_parent.
"""
seed = 42
def verify_parents(self, ts):
parent = tsutil.compute_mutation_parent(ts)
tables = ts.tables
assert np.array_equal(parent, tables.mutations.parent)
tables.mutations.parent = np.zeros_like(tables.mutations.parent) - 1
assert np.all(tables.mutations.parent == tskit.NULL)
tables.compute_mutation_parents()
assert np.array_equal(parent, tables.mutations.parent)
def test_example(self):
nodes = io.StringIO(
"""\
id is_sample time
0 0 2.0
1 0 1.0
2 0 1.0
3 1 0
4 1 0
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.5 2 3
0.0 0.8 2 4
0.5 1.0 1 3
0.0 1.0 0 1
0.0 1.0 0 2
0.8 1.0 0 4
"""
)
sites = io.StringIO(
"""\
position ancestral_state
0.1 0
0.5 0
0.9 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state parent
0 1 1 -1
0 2 1 -1
0 3 2 1
1 0 1 -1
1 1 1 3
1 3 2 4
1 2 1 3
1 4 2 6
2 0 1 -1
2 1 1 8
2 2 1 8
2 4 1 8
"""
)
ts = tskit.load_text(
nodes=nodes, edges=edges, sites=sites, mutations=mutations, strict=False
)
self.verify_parents(ts)
def test_single_muts(self):
ts = msprime.simulate(
10, random_seed=self.seed, mutation_rate=3.0, recombination_rate=1.0
)
self.verify_parents(ts)
def test_with_jukes_cantor(self):
ts = msprime.simulate(
10, random_seed=self.seed, mutation_rate=0.0, recombination_rate=1.0
)
# make *lots* of recurrent mutations
mut_ts = tsutil.jukes_cantor(
ts, num_sites=10, mu=1, multiple_per_node=False, seed=self.seed
)
self.verify_parents(mut_ts)
def test_with_jukes_cantor_multiple_per_node(self):
ts = msprime.simulate(
10, random_seed=self.seed, mutation_rate=0.0, recombination_rate=1.0
)
# make *lots* of recurrent mutations
mut_ts = tsutil.jukes_cantor(
ts, num_sites=10, mu=1, multiple_per_node=True, seed=self.seed
)
self.verify_parents(mut_ts)
def verify_branch_mutations(self, ts, mutations_per_branch):
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
assert ts.num_mutations > 1
self.verify_parents(ts)
def test_single_tree_one_mutation_per_branch(self):
ts = msprime.simulate(6, random_seed=10)
self.verify_branch_mutations(ts, 1)
def test_single_tree_two_mutations_per_branch(self):
ts = msprime.simulate(10, random_seed=9)
self.verify_branch_mutations(ts, 2)
def test_single_tree_three_mutations_per_branch(self):
ts = msprime.simulate(8, random_seed=9)
self.verify_branch_mutations(ts, 3)
def test_single_multiroot_tree_recurrent_mutations(self):
ts = msprime.simulate(6, random_seed=10)
ts = tsutil.decapitate(ts, ts.num_edges // 2)
for mutations_per_branch in [1, 2, 3]:
self.verify_branch_mutations(ts, mutations_per_branch)
def test_many_multiroot_trees_recurrent_mutations(self):
ts = msprime.simulate(7, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
ts = tsutil.decapitate(ts, ts.num_edges // 2)
for mutations_per_branch in [1, 2, 3]:
self.verify_branch_mutations(ts, mutations_per_branch)
class TestMutationTime:
"""
Tests that mutation time is correctly specified, and that we correctly
recompute it with compute_mutation_times.
"""
seed = 42
def verify_times(self, ts):
tables = ts.tables
# Clear out the existing mutations as they come from msprime
tables.mutations.time = np.full(
tables.mutations.time.shape, -1, dtype=np.float64
)
assert np.all(tables.mutations.time == -1)
# Compute times with C method and dumb python method
tables.compute_mutation_times()
python_time = tsutil.compute_mutation_times(ts)
assert np.allclose(python_time, tables.mutations.time, rtol=1e-15, atol=1e-15)
def test_example(self):
nodes = io.StringIO(
"""\
id is_sample time
0 0 2.0
1 0 1.0
2 0 1.0
3 1 0
4 1 0
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 0.5 2 3
0.0 0.8 2 4
0.5 1.0 1 3
0.0 1.0 0 1
0.0 1.0 0 2
0.8 1.0 0 4
"""
)
sites = io.StringIO(
"""\
position ancestral_state
0.1 0
0.5 0
0.9 0
"""
)
mutations = io.StringIO(
"""\
site node time derived_state parent
0 1 1.5 1 -1
0 2 1.5 1 -1
0 3 0.5 2 1
1 0 2.0 1 -1
1 1 1.5 1 3
1 3 0.5 2 4
1 2 1.5 1 3
1 4 0.5 2 6
2 0 2.0 1 -1
2 1 1.5 1 8
2 2 1.5 1 8
2 4 1.0 1 8
"""
)
ts = tskit.load_text(
nodes=nodes, edges=edges, sites=sites, mutations=mutations, strict=False
)
# ts.dump_text(mutations=sys.stdout)
# self.assertFalse(True)
tables = ts.tables
python_time = tsutil.compute_mutation_times(ts)
assert np.allclose(python_time, tables.mutations.time, rtol=1e-15, atol=1e-15)
tables.mutations.time = np.full(
tables.mutations.time.shape, -1, dtype=np.float64
)
assert np.all(tables.mutations.time == -1)
tables.compute_mutation_times()
assert np.allclose(python_time, tables.mutations.time, rtol=1e-15, atol=1e-15)
def test_single_muts(self):
ts = msprime.simulate(
10, random_seed=self.seed, mutation_rate=3.0, recombination_rate=1.0
)
self.verify_times(ts)
def test_with_jukes_cantor(self):
ts = msprime.simulate(
10, random_seed=self.seed, mutation_rate=0.0, recombination_rate=1.0
)
# make *lots* of recurrent mutations
mut_ts = tsutil.jukes_cantor(
ts, num_sites=10, mu=1, multiple_per_node=False, seed=self.seed
)
self.verify_times(mut_ts)
def test_with_jukes_cantor_multiple_per_node(self):
ts = msprime.simulate(
10, random_seed=self.seed, mutation_rate=0.0, recombination_rate=1.0
)
# make *lots* of recurrent mutations
mut_ts = tsutil.jukes_cantor(
ts, num_sites=10, mu=1, multiple_per_node=True, seed=self.seed
)
self.verify_times(mut_ts)
def verify_branch_mutations(self, ts, mutations_per_branch):
ts = tsutil.insert_branch_mutations(ts, mutations_per_branch)
assert ts.num_mutations > 1
self.verify_times(ts)
def test_single_tree_one_mutation_per_branch(self):
ts = msprime.simulate(6, random_seed=10)
self.verify_branch_mutations(ts, 1)
def test_single_tree_two_mutations_per_branch(self):
ts = msprime.simulate(10, random_seed=9)
self.verify_branch_mutations(ts, 2)
def test_single_tree_three_mutations_per_branch(self):
ts = msprime.simulate(8, random_seed=9)
self.verify_branch_mutations(ts, 3)
def test_single_multiroot_tree_recurrent_mutations(self):
ts = msprime.simulate(6, random_seed=10)
ts = tsutil.decapitate(ts, ts.num_edges // 2)
for mutations_per_branch in [1, 2, 3]:
self.verify_branch_mutations(ts, mutations_per_branch)
def test_many_multiroot_trees_recurrent_mutations(self):
ts = msprime.simulate(7, recombination_rate=1, random_seed=10)
assert ts.num_trees > 3
ts = tsutil.decapitate(ts, ts.num_edges // 2)
for mutations_per_branch in [1, 2, 3]:
self.verify_branch_mutations(ts, mutations_per_branch)
class TestSimpleTreeAlgorithm:
"""
Tests for the direct implementation of Algorithm T in tsutil.py.
See TestHoleyTreeSequences above for further tests on wacky topologies.
"""
def test_zero_nodes(self):
tables = tskit.TableCollection(1)
ts = tables.tree_sequence()
assert ts.sequence_length == 1
assert ts.num_trees == 1
# Test the simple tree iterator.
trees = list(tsutil.algorithm_T(ts))
assert len(trees) == 1
(left, right), parent = trees[0]
assert left == 0
assert right == 1
assert parent == []
def test_one_node(self):
tables = tskit.TableCollection(1)
tables.nodes.add_row()
ts = tables.tree_sequence()
assert ts.sequence_length == 1
assert ts.num_trees == 1
# Test the simple tree iterator.
trees = list(tsutil.algorithm_T(ts))
assert len(trees) == 1
(left, right), parent = trees[0]
assert left == 0
assert right == 1
assert parent == [-1]
def test_single_coalescent_tree(self):
ts = msprime.simulate(10, random_seed=1, length=10)
tree = ts.first()
p1 = [tree.parent(j) for j in range(ts.num_nodes)]
interval, p2 = next(tsutil.algorithm_T(ts))
assert interval == tree.interval
assert p1 == p2
def test_coalescent_trees(self):
ts = msprime.simulate(8, recombination_rate=5, random_seed=1, length=2)
assert ts.num_trees > 2
new_trees = tsutil.algorithm_T(ts)
for tree in ts.trees():
interval, p2 = next(new_trees)
p1 = [tree.parent(j) for j in range(ts.num_nodes)]
assert interval == tree.interval
assert p1 == p2
with pytest.raises(StopIteration):
next(new_trees)
class TestSampleLists(ExampleTopologyMixin):
"""
Tests for the sample lists algorithm.
"""
def verify(self, ts):
tree1 = tsutil.SampleListTree(ts)
s = str(tree1)
assert s is not None
trees = ts.trees(sample_lists=True)
for left, right in tree1.sample_lists():
tree2 = next(trees)
assert (left, right) == tree2.interval
for u in tree2.nodes():
assert tree1.left_sample[u] == tree2.left_sample(u)
assert tree1.right_sample[u] == tree2.right_sample(u)
for j in range(ts.num_samples):
assert tree1.next_sample[j] == tree2.next_sample(j)
assert right == ts.sequence_length
tree1 = tsutil.SampleListTree(ts)
trees = ts.trees(sample_lists=False)
sample_index_map = ts.samples()
for _, _ in tree1.sample_lists():
tree2 = next(trees)
for u in range(ts.num_nodes):
samples2 = list(tree2.samples(u))
samples1 = []
index = tree1.left_sample[u]
if index != tskit.NULL:
assert sample_index_map[tree1.left_sample[u]] == samples2[0]
assert sample_index_map[tree1.right_sample[u]] == samples2[-1]
stop = tree1.right_sample[u]
while True:
assert index != -1
samples1.append(sample_index_map[index])
if index == stop:
break
index = tree1.next_sample[index]
assert samples1 == samples2
assert right == ts.sequence_length
class TestOneSampleRoot(ExampleTopologyMixin):
"""
Tests for the standard root threshold of subtending at least
one sample.
"""
def verify(self, ts):
tree1 = tsutil.RootThresholdTree(ts, root_threshold=1)
tree2 = tskit.Tree(ts)
tree2.first()
for interval in tree1.iterate():
assert interval == tree2.interval
assert tree1.roots() == tree2.roots
# Definition here is the set unique path ends from samples
roots = set()
for u in ts.samples():
while u != tskit.NULL:
path_end = u
u = tree2.parent(u)
roots.add(path_end)
assert set(tree1.roots()) == roots
tree2.next()
assert tree2.index == -1
class TestKSamplesRoot(ExampleTopologyMixin):
"""
Tests for the root criteria of subtending at least k samples.
"""
def verify(self, ts):
for k in range(1, 5):
tree1 = tsutil.RootThresholdTree(ts, root_threshold=k)
tree2 = tskit.Tree(ts, root_threshold=k)
tree2.first()
for interval in tree1.iterate():
assert interval == tree2.interval
# Definition here is the set unique path ends from samples
# that subtend at least k samples
roots = set()
for u in ts.samples():
while u != tskit.NULL:
path_end = u
u = tree2.parent(u)
if tree2.num_samples(path_end) >= k:
roots.add(path_end)
assert set(tree1.roots()) == roots
assert tree1.roots() == tree2.roots
tree2.next()
assert tree2.index == -1
class TestSquashEdges:
"""
Tests of the squash_edges function.
"""
def do_squash(self, ts, compare_lib=True):
squashed = ts.tables.edges
squashed.squash()
if compare_lib:
squashed_list = squash_edges(ts)
squashed_py = tskit.EdgeTable()
for e in squashed_list:
squashed_py.append(e)
# Check the Python and C implementations produce the same output.
assert squashed_py == squashed
return squashed
def test_simple_case(self):
# 2
# / \
# 0 1
nodes = io.StringIO(
"""\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 0 0 1.00000000000000
"""
)
edges = io.StringIO(
"""\
id left right parent child
0 0.00000000 0.50000000 2 0
1 0.00000000 0.50000000 2 1
2 0.50000000 1.00000000 2 0
3 0.50000000 1.00000000 2 1
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
edges = self.do_squash(ts)
assert all(edges.left) == 0
assert all(edges.right) == 1
assert list(edges.parent) == [2, 2]
assert list(edges.child) == [0, 1]
def test_simple_case_unordered_intervals(self):
# 1
# |
# 0
nodes = io.StringIO(
"""\
id is_sample population time
0 1 0 0.0
1 0 0 1.0
"""
)
edges = io.StringIO(
"""\
id left right parent child
0 0.40 1.0 1 0
0 0.00 0.40 1 0
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
edges = self.do_squash(ts)
assert edges.left[0] == 0
assert edges.right[0] == 1
assert edges.parent[0] == 1
assert edges.child[0] == 0
def test_simple_case_unordered_children(self):
# 2
# / \
# 0 1
nodes = io.StringIO(
"""\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 0 0 1.00000000000000
"""
)
edges = io.StringIO(
"""\
id left right parent child
0 0.50000000 1.00000000 2 1
1 0.50000000 1.00000000 2 0
2 0.00000000 0.50000000 2 1
3 0.00000000 0.50000000 2 0
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
edges = self.do_squash(ts)
assert all(edges.left) == 0
assert all(edges.right) == 1
assert list(edges.parent) == [2, 2]
assert list(edges.child) == [0, 1]
def test_simple_case_unordered_children_and_intervals(self):
# 2
# / \
# 0 1
nodes = io.StringIO(
"""\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 0 0 1.00000000000000
"""
)
edges = io.StringIO(
"""\
id left right parent child
0 0.50000000 1.00000000 2 1
2 0.00000000 0.50000000 2 1
3 0.00000000 0.50000000 2 0
1 0.50000000 1.00000000 2 0
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
edges = self.do_squash(ts)
assert all(edges.left) == 0
assert all(edges.right) == 1
assert list(edges.parent) == [2, 2]
assert list(edges.child) == [0, 1]
def test_squash_multiple_parents_and_children(self):
# 4 5
# / \ / \
# 0 1 2 3
nodes = io.StringIO(
"""\
id is_sample population time
0 1 0 0.00000000000000
1 1 0 0.00000000000000
2 1 0 0.00000000000000
3 1 0 0.00000000000000
4 0 0 1.00000000000000
5 0 0 1.00000000000000
"""
)
edges = io.StringIO(
"""\
id left right parent child
5 0.50000000 1.00000000 5 3
6 0.50000000 1.00000000 5 2
7 0.00000000 0.50000000 5 3
8 0.00000000 0.50000000 5 2
9 0.40000000 1.00000000 4 1
10 0.00000000 0.40000000 4 1
11 0.40000000 1.00000000 4 0
12 0.00000000 0.40000000 4 0
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
edges = self.do_squash(ts)
assert all(edges.left) == 0
assert all(edges.right) == 1
assert list(edges.parent) == [4, 4, 5, 5]
assert list(edges.child) == [0, 1, 2, 3]
def test_squash_overlapping_intervals(self):
nodes = io.StringIO(
"""\
id is_sample population time
0 1 0 0.0
1 0 0 1.0
"""
)
edges = io.StringIO(
"""\
id left right parent child
0 0.00 0.50 1 0
1 0.40 0.80 1 0
2 0.60 1.00 1 0
"""
)
with pytest.raises(tskit.LibraryError):
tskit.load_text(nodes=nodes, edges=edges, strict=False)
def verify_slice_and_squash(self, ts):
"""
Slices a tree sequence so that there are edge endpoints at
all integer locations, then squashes these edges and verifies
that the resulting edge table is the same as the input edge table.
"""
sliced_edges = []
# Create new sliced edge table.
for e in ts.edges():
left = e.left
right = e.right
if left == np.floor(left):
r_left = np.ceil(left) + 1
else:
r_left = np.ceil(left)
if right == np.floor(right):
r_right = np.floor(right)
else:
r_right = np.floor(right) + 1
new_range = [left]
for r in np.arange(r_left, r_right):
new_range.append(r)
new_range.append(right)
assert len(new_range) > 1
# Add new edges to the list.
for r in range(1, len(new_range)):
new = tskit.Edge(new_range[r - 1], new_range[r], e.parent, e.child)
sliced_edges.append(new)
# Shuffle the edges and create a new edge table.
random.shuffle(sliced_edges)
sliced_table = tskit.EdgeTable()
for e in sliced_edges:
sliced_table.append(e)
# Squash the edges and check against input table.
sliced_table.squash()
assert sliced_table == ts.tables.edges
def test_sim_single_coalescent_tree(self):
ts = msprime.simulate(20, random_seed=4, length=10)
assert ts.num_trees == 1
self.verify_slice_and_squash(ts)
def test_sim_big_coalescent_trees(self):
ts = msprime.simulate(20, recombination_rate=5, random_seed=4, length=10)
assert ts.num_trees > 2
self.verify_slice_and_squash(ts)
def squash_edges(ts):
"""
Returns the edges in the tree sequence squashed.
"""
t = ts.tables.nodes.time
edges = list(ts.edges())
edges.sort(key=lambda e: (t[e.parent], e.parent, e.child, e.left))
if len(edges) == 0:
return []
squashed = []
last_e = edges[0]
for e in edges[1:]:
condition = (
e.parent != last_e.parent
or e.child != last_e.child
or e.left != last_e.right
)
if condition:
squashed.append(last_e)
last_e = e
last_e.right = e.right
squashed.append(last_e)
return squashed
def reduce_topology(ts):
"""
Returns a tree sequence with the minimal information required to represent
the tree topologies at its sites. Uses a left-to-right algorithm.
"""
tables = ts.dump_tables()
edge_map = {}
def add_edge(left, right, parent, child):
new_edge = tskit.Edge(left, right, parent, child)
if child not in edge_map:
edge_map[child] = new_edge
else:
edge = edge_map[child]
if edge.right == left and edge.parent == parent:
# Squash
edge.right = right
else:
tables.edges.append(edge)
edge_map[child] = new_edge
tables.edges.clear()
edge_buffer = []
first_site = True
for tree in ts.trees():
# print(tree.interval)
# print(tree.draw(format="unicode"))
if tree.num_sites > 0:
sites = list(tree.sites())
if first_site:
x = 0
# print("First site", sites)
first_site = False
else:
x = sites[0].position
# Flush the edge buffer.
for left, parent, child in edge_buffer:
add_edge(left, x, parent, child)
# Add edges for each node in the tree.
edge_buffer = []
for root in tree.roots:
for u in tree.nodes(root):
if u != root:
edge_buffer.append((x, tree.parent(u), u))
# Add the final edges.
for left, parent, child in edge_buffer:
add_edge(left, tables.sequence_length, parent, child)
# Flush the remaining edges to the table
for edge in edge_map.values():
tables.edges.append(edge)
tables.sort()
ts = tables.tree_sequence()
# Now simplify to remove redundant nodes.
return ts.simplify(map_nodes=True, filter_sites=False)
class TestReduceTopology:
"""
Tests to ensure that reduce topology in simplify is equivalent to the
reduce_topology function above.
"""
def verify(self, ts):
source_tables = ts.tables
X = source_tables.sites.position
position_count = {x: 0 for x in X}
position_count[0] = 0
position_count[ts.sequence_length] = 0
mts, node_map = reduce_topology(ts)
for edge in mts.edges():
assert edge.left in position_count
assert edge.right in position_count
position_count[edge.left] += 1
position_count[edge.right] += 1
if ts.num_sites == 0:
# We should have zero edges output.
assert mts.num_edges == 0
elif X[0] != 0:
# The first site (if it's not zero) should be mapped to zero so
# this never occurs in edges.
assert position_count[X[0]] == 0
minimised_trees = mts.trees()
minimised_tree = next(minimised_trees)
minimised_tree_sites = minimised_tree.sites()
for tree in ts.trees():
for site in tree.sites():
minimised_site = next(minimised_tree_sites, None)
if minimised_site is None:
minimised_tree = next(minimised_trees)
minimised_tree_sites = minimised_tree.sites()
minimised_site = next(minimised_tree_sites)
assert site.position == minimised_site.position
assert site.ancestral_state == minimised_site.ancestral_state
assert site.metadata == minimised_site.metadata
assert len(site.mutations) == len(minimised_site.mutations)
for mutation, minimised_mutation in zip(
site.mutations, minimised_site.mutations
):
assert mutation.derived_state == minimised_mutation.derived_state
assert mutation.metadata == minimised_mutation.metadata
assert mutation.parent == minimised_mutation.parent
assert node_map[mutation.node] == minimised_mutation.node
if tree.num_sites > 0:
mapped_dict = {
node_map[u]: node_map[v] for u, v in tree.parent_dict.items()
}
assert mapped_dict == minimised_tree.parent_dict
assert np.array_equal(ts.genotype_matrix(), mts.genotype_matrix())
edges = list(mts.edges())
squashed = squash_edges(mts)
assert len(edges) == len(squashed)
assert edges == squashed
# Verify against simplify implementations.
s = tests.Simplifier(
ts, ts.samples(), reduce_to_site_topology=True, filter_sites=False
)
sts1, _ = s.simplify()
sts2 = ts.simplify(reduce_to_site_topology=True, filter_sites=False)
t1 = mts.tables
for sts in [sts2, sts2]:
t2 = sts.tables
assert t1.nodes == t2.nodes
assert t1.edges == t2.edges
assert t1.sites == t2.sites
assert t1.mutations == t2.mutations
assert t1.populations == t2.populations
assert t1.individuals == t2.individuals
return mts
def test_no_recombination_one_site(self):
ts = msprime.simulate(15, random_seed=1)
tables = ts.dump_tables()
tables.sites.add_row(position=0.25, ancestral_state="0")
mts = self.verify(tables.tree_sequence())
assert mts.num_trees == 1
def test_simple_recombination_one_site(self):
ts = msprime.simulate(15, random_seed=1, recombination_rate=2)
tables = ts.dump_tables()
tables.sites.add_row(position=0.25, ancestral_state="0")
mts = self.verify(tables.tree_sequence())
assert mts.num_trees == 1
def test_simple_recombination_fixed_sites(self):
ts = msprime.simulate(5, random_seed=1, recombination_rate=2)
tables = ts.dump_tables()
for x in [0.25, 0.5, 0.75]:
tables.sites.add_row(position=x, ancestral_state="0")
self.verify(tables.tree_sequence())
def get_integer_edge_ts(self, n, m):
recombination_map = msprime.RecombinationMap.uniform_map(m, 1, num_loci=m)
ts = msprime.simulate(n, random_seed=1, recombination_map=recombination_map)
assert ts.num_trees > 1
for edge in ts.edges():
assert int(edge.left) == edge.left
assert int(edge.right) == edge.right
return ts
def test_integer_edges_one_site(self):
ts = self.get_integer_edge_ts(5, 10)
tables = ts.dump_tables()
tables.sites.add_row(position=1, ancestral_state="0")
mts = self.verify(tables.tree_sequence())
assert mts.num_trees == 1
def test_integer_edges_all_sites(self):
ts = self.get_integer_edge_ts(5, 10)
tables = ts.dump_tables()
for x in range(10):
tables.sites.add_row(position=x, ancestral_state="0")
mts = self.verify(tables.tree_sequence())
assert mts.num_trees == ts.num_trees
def test_simple_recombination_site_at_zero(self):
ts = msprime.simulate(5, random_seed=1, recombination_rate=2)
tables = ts.dump_tables()
tables.sites.add_row(position=0, ancestral_state="0")
mts = self.verify(tables.tree_sequence())
assert mts.num_trees == 1
def test_simple_recombination(self):
ts = msprime.simulate(5, random_seed=1, recombination_rate=2, mutation_rate=2)
self.verify(ts)
def test_large_recombination(self):
ts = msprime.simulate(
25, random_seed=12, recombination_rate=5, mutation_rate=15
)
self.verify(ts)
def test_no_recombination(self):
ts = msprime.simulate(5, random_seed=1, mutation_rate=2)
self.verify(ts)
def test_no_mutation(self):
ts = msprime.simulate(5, random_seed=1)
self.verify(ts)
def test_zero_sites(self):
ts = msprime.simulate(5, random_seed=2)
assert ts.num_sites == 0
mts = ts.simplify(reduce_to_site_topology=True)
assert mts.num_trees == 1
assert mts.num_edges == 0
def test_many_roots(self):
ts = msprime.simulate(25, random_seed=12, recombination_rate=2, length=10)
tables = tsutil.decapitate(ts, ts.num_edges // 2).dump_tables()
for x in range(10):
tables.sites.add_row(x, "0")
self.verify(tables.tree_sequence())
def test_branch_sites(self):
ts = msprime.simulate(15, random_seed=12, recombination_rate=2, length=10)
ts = tsutil.insert_branch_sites(ts)
self.verify(ts)
def test_jiggled_samples(self):
ts = msprime.simulate(8, random_seed=13, recombination_rate=2, length=10)
ts = tsutil.jiggle_samples(ts)
self.verify(ts)
def search_sorted(a, v):
"""
Implementation of searchsorted based on binary search with the same
semantics as numpy's searchsorted. Used as the basis of the C
implementation which we use in the simplify algorithm.
"""
upper = len(a)
if upper == 0:
return 0
lower = 0
while upper - lower > 1:
mid = (upper + lower) // 2
if v >= a[mid]:
lower = mid
else:
upper = mid
offset = 0
if a[lower] < v:
offset = 1
return lower + offset
class TestSearchSorted:
"""
Tests for the basic implementation of search_sorted.
"""
def verify(self, a):
a = np.array(a)
start, end = a[0], a[-1]
# Check random values.
np.random.seed(43)
for v in np.random.uniform(start, end, 10):
assert search_sorted(a, v) ==
|
np.searchsorted(a, v)
|
numpy.searchsorted
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from data.util import imresize
from scipy.io import loadmat
from torch.autograd import Variable
def DUF_downsample(x, scale=4):
"""Downsamping with Gaussian kernel used in the DUF official code
Args:
x (Tensor, [B, T, C, H, W]): frames to be downsampled.
scale (int): downsampling factor: 2 | 3 | 4.
"""
assert scale in [2, 3, 4], "Scale [{}] is not supported".format(scale)
def gkern(kernlen=13, nsig=1.6):
import scipy.ndimage.filters as fi
inp = np.zeros((kernlen, kernlen))
# set element at the middle to one, a dirac delta
inp[kernlen // 2, kernlen // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
return fi.gaussian_filter(inp, nsig)
B, T, C, H, W = x.size()
x = x.view(-1, 1, H, W)
pad_w, pad_h = 6 + scale * 2, 6 + scale * 2 # 6 is the pad of the gaussian filter
r_h, r_w = 0, 0
if scale == 3:
r_h = 3 - (H % 3)
r_w = 3 - (W % 3)
x = F.pad(x, [pad_w, pad_w + r_w, pad_h, pad_h + r_h], "reflect")
gaussian_filter = (
torch.from_numpy(gkern(13, 0.4 * scale)).type_as(x).unsqueeze(0).unsqueeze(0)
)
x = F.conv2d(x, gaussian_filter, stride=scale)
x = x[:, :, 2:-2, 2:-2]
x = x.view(B, T, C, x.size(2), x.size(3))
return x
def PCA(data, k=2):
X = torch.from_numpy(data)
X_mean = torch.mean(X, 0)
X = X - X_mean.expand_as(X)
U, S, V = torch.svd(torch.t(X))
return U[:, :k] # PCA matrix
def random_batch_kernel(
batch,
l=21,
sig_min=0.2,
sig_max=4.0,
rate_iso=1.0,
tensor=True,
random_disturb=False,
):
if rate_iso == 1:
sigma = np.random.uniform(sig_min, sig_max, (batch, 1, 1))
ax = np.arange(-l // 2 + 1.0, l // 2 + 1.0)
xx, yy = np.meshgrid(ax, ax)
xx = xx[None].repeat(batch, 0)
yy = yy[None].repeat(batch, 0)
kernel = np.exp(-(xx ** 2 + yy ** 2) / (2.0 * sigma ** 2))
kernel = kernel / np.sum(kernel, (1, 2), keepdims=True)
return torch.FloatTensor(kernel) if tensor else kernel
else:
sigma_x = np.random.uniform(sig_min, sig_max, (batch, 1, 1))
sigma_y = np.random.uniform(sig_min, sig_max, (batch, 1, 1))
D = np.zeros((batch, 2, 2))
D[:, 0, 0] = sigma_x.squeeze() ** 2
D[:, 1, 1] = sigma_y.squeeze() ** 2
radians = np.random.uniform(-np.pi, np.pi, (batch))
mask_iso = np.random.uniform(0, 1, (batch)) < rate_iso
radians[mask_iso] = 0
sigma_y[mask_iso] = sigma_x[mask_iso]
U = np.zeros((batch, 2, 2))
U[:, 0, 0] = np.cos(radians)
U[:, 0, 1] = -np.sin(radians)
U[:, 1, 0] = np.sin(radians)
U[:, 1, 1] = np.cos(radians)
sigma = np.matmul(U, np.matmul(D, U.transpose(0, 2, 1)))
ax = np.arange(-l // 2 + 1.0, l // 2 + 1.0)
xx, yy = np.meshgrid(ax, ax)
xy = np.hstack((xx.reshape((l * l, 1)), yy.reshape(l * l, 1))).reshape(l, l, 2)
xy = xy[None].repeat(batch, 0)
inverse_sigma = np.linalg.inv(sigma)[:, None, None]
kernel = np.exp(
-0.5
* np.matmul(
np.matmul(xy[:, :, :, None], inverse_sigma), xy[:, :, :, :, None]
)
)
kernel = kernel.reshape(batch, l, l)
if random_disturb:
kernel = kernel + np.random.uniform(0, 0.25, (batch, l, l)) * kernel
kernel = kernel / np.sum(kernel, (1, 2), keepdims=True)
return torch.FloatTensor(kernel) if tensor else kernel
def stable_batch_kernel(batch, l=21, sig=2.6, tensor=True):
sigma = sig
ax = np.arange(-l // 2 + 1.0, l // 2 + 1.0)
xx, yy = np.meshgrid(ax, ax)
xx = xx[None].repeat(batch, 0)
yy = yy[None].repeat(batch, 0)
kernel =
|
np.exp(-(xx ** 2 + yy ** 2) / (2.0 * sigma ** 2))
|
numpy.exp
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 10d_viseme_tabular_model.ipynb (unless otherwise specified).
__all__ = ['save_state_dict', 'load_state_dict', 'NpBatchNorm1d', 'NpLinear', 'NpReLU', 'NpModel', 'get_class_count',
'load_tabular_model', 'create_confusion_matrix', 'plot_confusion_matrix', 'get_idxs_of_interest']
# Cell
from ..core import *
from pathlib import Path
import numpy as np
import json
# Cell
def save_state_dict(path, state_dict, **training_info):
"Saves `state_dict` and `training_info` to a new model directory"
path, model_id = Path(path), now()
output_path = path/f'model_{model_id}'
output_path.mkdir()
file_name = output_path/'state_dict.npz'
metadata = dict(path=path_to_str(path), output_path=path_to_str(output_path),
file_name=path_to_str(file_name), training_info=training_info)
with open(output_path/'metadata.json', 'w') as f: json.dump(metadata, f, indent=2)
np.savez(file_name, **{k:state_dict[k].detach().cpu().numpy() for k in state_dict})
with np.load(file_name) as _: pass # check that we didn't need to pickle
return output_path
# Cell
def load_state_dict(path):
"Load `state_dict.npz` from `path` (a model directory)"
return np.load(Path(path)/'state_dict.npz')
# Cell
class NpBatchNorm1d:
"Applies Batch Normalization"
# https://github.com/pytorch/pytorch/blob/420b37f3c67950ed93cd8aa7a12e673fcfc5567b/aten/src/ATen/native/Normalization.cpp#L61-L126
def __init__(self, weight, bias, running_mean, running_var, num_batches_tracked=None):
self.weight, self.bias = weight, bias
self.running_mean, self.running_std = running_mean, np.sqrt(running_var + 1e-5)
def __call__(self, x):
x = x - self.running_mean
x = x / self.running_std
x = x * self.weight
x = x + self.bias
return x
# Cell
class NpLinear:
"Applies a linear transformation"
def __init__(self, weight, bias=None):
self.weight, self.bias = weight.T, bias
def __call__(self, x):
x = x @ self.weight
if self.bias is not None:
x = x + self.bias
return x
# Cell
class NpReLU:
"Applies element wise max of x and zero"
def __call__(self, x):
return np.maximum(x, 0)
# Cell
class NpModel:
"A sequential module container"
def __init__(self, *modules):
self.modules = modules
def __call__(self, x):
for module in self.modules:
x = module(x)
return x
# Cell
def get_class_count(model):
return model.modules[-1].weight.shape[1]
# Cell
def load_tabular_model(path):
"Load `state_dict.npz` from `path` (a model directory) and create a tabular model"
state_dict = load_state_dict(path)
# TODO: this only works for the model config we used ... TODO: make it a bit more generic
return NpModel(NpBatchNorm1d(**get_dict_subset(state_dict, 'bn_cont')),
NpLinear(**get_dict_subset(state_dict, 'layers.0.0')),
NpReLU(),
NpBatchNorm1d(**get_dict_subset(state_dict, 'layers.0.2')),
NpLinear(**get_dict_subset(state_dict, 'layers.1.0')),
NpReLU(),
NpBatchNorm1d(**get_dict_subset(state_dict, 'layers.1.2')),
NpLinear(**get_dict_subset(state_dict, 'layers.2.0')))
# Cell
def create_confusion_matrix(model, df, cont_names, y_name):
"Confusion matrix as a numpy array"
class_count = get_class_count(model)
confusion_matrix = np.zeros([class_count,class_count], dtype=int)
output = model(df[cont_names].to_numpy())
preds =
|
np.argmax(output, axis=1)
|
numpy.argmax
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import numpy as np
from astroquery.vizier import Vizier
def massTorres(teff, erteff, logg, erlogg, feh, erfeh):
"""
Calculate stellar mass using the Torres et al. (2010) callibration.
Parameters
----------
teff, erteff : floats
Effective temperature and associated uncertainty.
logg, erlogg : floats
Surface gravity and associated uncertainty.
feh, erfeh : floats
Metallicity [Fe/H] and associated uncertainty.
Returns
-------
meanMass, sigMass : floats
Estimate for the stellar mass and associated uncertainty.
"""
# Number of Monte Carlo trials for the uncertainty calculation.
ntrials = 10000
randomteff = teff + erteff * np.random.randn(ntrials)
randomlogg = logg + erlogg * np.random.randn(ntrials)
randomfeh = feh + erfeh * np.random.randn(ntrials)
# Parameters for the Torres calibration
a1 = 1.5689
a2 = 1.3787
a3 = 0.4243
a4 = 1.139
a5 = -0.1425
a6 = 0.01969
a7 = 0.1010
X = np.log10(randomteff) - 4.1
logMass = a1 + a2*X + a3*X**2 + a4*X**3 + a5*randomlogg**2 + a6*randomlogg**3 + a7*randomfeh
meanlogMass = np.mean(logMass)
siglogMass = np.sum((logMass - meanlogMass)**2) / (ntrials - 1)
# Add (quadratically) the intrinsic error of the calibration (0.027 in log mass).
siglogMass = np.sqrt(0.027**2 + siglogMass)
meanMass = 10**meanlogMass
sigMass = 10**(meanlogMass + siglogMass) - meanMass
# Correct the mass for the offset relative to isochrone-derived masses.
if .7<=meanMass<=1.3:
# correction comes from Santos+(2013), the SWEET-Cat paper
randomMass = meanMass + sigMass * np.random.randn(ntrials)
corrected_Mass = 0.791 * randomMass**2 - 0.575 * randomMass + 0.701
meanMassCor =
|
np.mean(corrected_Mass)
|
numpy.mean
|
import os
import os.path as osp
import numpy as np
from glob import glob
from tqdm import tqdm
import mmcv
def disp_modulate(disp_map, max_value=1):
""" Transfer the value of disp maps to the [img] range -1 ~ 1
"""
EPS = 1e-3
Gamma = 0.3
EXPAND = 10
disp_map = (disp_map * EXPAND).astype(np.float32)
zero_part1 = disp_map < EPS
zero_part2 = disp_map > -EPS
zero_part = zero_part1 * zero_part2
sign = np.sign(disp_map)
disp_map = np.abs(disp_map)
disp_img = np.power(disp_map, Gamma).clip(0, 1)
disp_img = disp_img * sign
disp_img[zero_part] = 0 # range: [-1, 1]
if max_value == 255:
disp_img = (disp_img + 1) / 2 * 255
disp_img = np.rint(disp_img).clip(0, 255).astype(np.uint8)
return disp_img
def disp_demodulate(disp_img):
""" Transfer the values of visualized disp images
from [0, 255] to the normal disp values
"""
EXPAND = 10
iGamma = 10 / 3
assert disp_img.dtype == np.uint8
zero_part1 = disp_img == 127
zero_part2 = disp_img == 128
zero_part = zero_part1 & zero_part2
disp_img = disp_img / 127.5 - 1
sign = np.sign(disp_img)
disp_img[zero_part] = 0
disp_img = np.abs(disp_img).clip(0, 1)
disp_map =
|
np.power(disp_img, iGamma)
|
numpy.power
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from dataclasses import dataclass, asdict
from typing import Optional, Literal, Tuple
from pathlib import Path
from tqdm.auto import tqdm
import json
import numpy as np
import random
import pytorch_lightning as pl
from gluonts.dataset.field_names import FieldName # type: ignore
from gluonts.dataset.repository.datasets import get_dataset, materialize_dataset # type: ignore
from torch.utils.data import DataLoader
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
import warnings
warnings.filterwarnings("ignore")
from meta.data.batch import TripletBatch
from meta.datasets.splits import DatasetSplits
from meta.data.sampling import TripletDataset
from meta.datasets.registry import register_data_module
@dataclass
class MetaData:
"""
Meta data for artificial datasets
"""
context_length_multiple: int
support_set_size: int
prediction_length: int
freq: str
support_length_multiple: int = 4
num_queries: int = 1
@classmethod
def parse_file(cls, file: Path):
with open(file) as json_file:
return MetaData(**json.load(json_file))
def save(self, file: Path):
with open(file, "w") as fp:
json.dump(asdict(self), fp)
@register_data_module
class ArtificialDataModule(pl.LightningDataModule):
"""
A data module which provides an artificial dataset.
"""
def __init__(
self,
dataset_name: str,
context_length_multiple: int,
support_length_multiple: int,
support_set_size: int,
num_queries: int = 1,
data_dir: Path = Path.home() / ".mxnet" / "gluont-ts",
prediction_length: Optional[int] = None,
test_set_size: int = 200,
val_set_size: int = 200,
train_set_size: int = 10000,
standardize: bool = False,
batch_size_train: int = 128,
batch_size_val: int = 1000,
batch_size_test: int = 1000,
num_workers: int = 0,
seed: Optional[int] = None,
):
super().__init__()
self.dataset_name = dataset_name
self.data_dir = data_dir
self.context_length_multiple = context_length_multiple
self.support_length_multiple = support_length_multiple
self._prediction_length = prediction_length
self.test_set_size = test_set_size
self.val_set_size = val_set_size
self.train_set_size = train_set_size
self.support_set_size = support_set_size
self.num_queries = num_queries
self.standardize = standardize
self.splits: DatasetSplits
self.batch_size_train = batch_size_train
self.batch_size_val = batch_size_val
self.batch_size_test = batch_size_test
self.num_workers = num_workers
self.seed = seed
@property
def context_length(self) -> int:
return self.context_length_multiple * self.meta.prediction_length
@property
def support_length(self) -> int:
return self.support_length_multiple * self.prediction_length
@property
def prediction_length(self) -> int:
return self._prediction_length or self.meta.prediction_length
@property
def root(self) -> Path:
"""
Returns the directory where all the data pertaining to this dataset is stored.
"""
return self.data_dir / "artificial" / self.dataset_name
@property
def meta(self) -> MetaData:
"""
Returns the dataset's metadata.
"""
return MetaData.parse_file(self.root / "metadata.json") if self.root.exists() else None
def setup(self, stage: Optional[str] = None) -> None:
self.generate()
self.splits = DatasetSplits(
self.meta, self.root, self.dataset_name, self.prediction_length, self.standardize
)
self.splits.train().prepare()
self.splits.val().prepare()
self.splits.test().prepare()
def train_dataloader(self) -> DataLoader[TripletBatch]:
split = self.splits.train()
return DataLoader(
TripletDataset(queries=split.data(), support_sets=split.support_set()),
collate_fn=TripletBatch.collate, # type: ignore
batch_size=self.batch_size_train,
num_workers=self.num_workers,
pin_memory=True,
)
def val_dataloader(self) -> DataLoader[TripletBatch]:
split = self.splits.val()
return DataLoader(
TripletDataset(queries=split.data(), support_sets=split.support_set()),
collate_fn=TripletBatch.collate, # type: ignore
batch_size=self.batch_size_val,
num_workers=self.num_workers,
pin_memory=True,
)
def test_dataloader(self) -> DataLoader[TripletBatch]:
split = self.splits.train()
return DataLoader(
TripletDataset(queries=split.data(), support_sets=split.support_set()),
collate_fn=TripletBatch.collate, # type: ignore
batch_size=self.batch_size_test,
num_workers=self.num_workers,
pin_memory=True,
)
def generate(self) -> None:
meta = MetaData(
context_length_multiple=self.context_length_multiple,
support_length_multiple=self.support_length_multiple,
support_set_size=self.support_set_size,
prediction_length=self.prediction_length,
num_queries=self.num_queries,
freq="1H",
)
if self.root.exists():
# check if the meta data of the dataset fits the requirements
if self.meta == meta:
return
else:
raise ValueError(
"Meta data of artificial dataset not compatible with requirements"
)
self.root.mkdir(parents=True, exist_ok=True)
meta.save(self.root / "metadata.json")
for split, n_samples in zip(
["train", "val", "test"], [self.train_set_size, self.val_set_size, self.test_set_size]
):
self.generate_split(split, n_samples)
def generate_split(self, split: Literal["train", "val", "test"], n_samples: int) -> None:
queries, support_sets = zip(
*[
generate_artificial_tuplets(
dataset_name=self.dataset_name,
context_length=self.context_length,
support_length=self.support_length,
prediction_length=self.prediction_length,
support_set_size=self.support_set_size,
item_id=i,
)
for i in tqdm(range(n_samples), desc="generating artificial data")
]
)
_write_data_to_file(self.root / split / "data.json", queries)
_write_data_to_file(self.root / split / ".support_set.json", support_sets)
@classmethod
def name(cls) -> str:
return "dm_artificial"
def evaluate_model(self, **kwargs):
pass
def _write_data_to_file(file: Path, data: Tuple) -> None:
file.parent.mkdir(parents=True, exist_ok=True)
with file.open("w") as f:
content = "\n".join([json.dumps(d) for d in data])
f.write(content + "\n")
def generate_artificial_tuplets(
dataset_name: str,
context_length: int,
support_length: int,
prediction_length: int,
support_set_size: int,
item_id: int,
) -> Tuple:
if dataset_name == "marker":
noise_level = 0.01
context = np.random.normal(0, noise_level, context_length)
signal_type = random.choice([-1, 1])
target = signal_type * np.arange(0, 1, 1 / prediction_length)
query = np.concatenate((context, target))
# support time series without the marker
support_set = list(
np.random.normal(0, noise_level, context_length) for _ in range(support_set_size - 1)
)
# build support time series which contains the marker
marker = (
(-1)
* signal_type
* np.concatenate((np.arange(0, 1, 1 / 6), np.flip(np.arange(0, 1, 1 / 6))))
)
marker_start = np.random.choice(np.arange(0, context_length - len(marker)))
support_ts = np.random.normal(0, noise_level, context_length)
support_ts[marker_start : marker_start + len(marker)] += marker
# insert at random position in support set
support_set.insert(np.random.choice(support_set_size), support_ts)
q = {"target": query.tolist(), "item_id": item_id, "start": "2012-01-01 00:00:00"}
s = [
{"target": s.tolist(), "start": "2012-01-01 00:00:00", "item_id": item_id}
for s in support_set
]
return q, s
if dataset_name == "marker_v2":
noise_level = 0.01
context = np.random.normal(0, noise_level, context_length)
signal_type = random.choice([-1, 1])
target = signal_type * np.arange(0, 1, 1 / prediction_length)
query = np.concatenate((context, target))
# support time series without the marker
support_set = list(
np.random.normal(0, noise_level, context_length) for _ in range(support_set_size - 1)
)
# build support time series which contains the marker
ramp = np.arange(0, 1, 1 / 6)
if signal_type == -1:
marker = np.concatenate((ramp, np.flip(ramp)))
else:
marker = np.concatenate((ramp, np.array([1, 0.8, 1]), np.flip(ramp)))
marker_start = np.random.choice(np.arange(0, context_length - len(marker)))
support_ts = np.random.normal(0, noise_level, context_length)
support_ts[marker_start : marker_start + len(marker)] += marker
# insert at random position in support set
support_set.insert(np.random.choice(support_set_size), support_ts)
q = {"target": query.tolist(), "item_id": item_id, "start": "2012-01-01 00:00:00"}
s = [
{"target": s.tolist(), "start": "2012-01-01 00:00:00", "item_id": item_id}
for s in support_set
]
return q, s
if dataset_name == "marker_v3":
noise_level = 0.01
context = np.random.normal(0, noise_level, context_length)
signal_type = random.choice([-1, 0, 1])
target = signal_type * np.arange(0, 1, 1 / prediction_length)
query = np.concatenate((context, target))
# support time series without the marker
support_set = list(
np.random.normal(0, noise_level, context_length) for _ in range(support_set_size - 1)
)
# build support time series which contains the marker
ramp = np.arange(0, 1, 1 / 6)
if signal_type == -1:
marker = np.concatenate((ramp,
|
np.flip(ramp)
|
numpy.flip
|
import numpy as np
def transform_coord(y_coordMat, x_coordMat, rotationCenter, transformVect):
""" Transform x-y coordinate (y_mat & x_mat) by transformVect | round to int | return rotated y & x coord as vector"""
""" y_mat and x_mat are the coord to be rotated | rotationCenter [y;x] or [y;x;phi] are the centre of rotation by theta
transformVect [y;x;theta]: y & x are relative to rotationCenter if center [y;x], or relative to world ref frame if center [y;x;phi],
theta is the angle in rad which the coord to be rotated """
y_rc= rotationCenter[0]
x_rc= rotationCenter[1]
y_translate= transformVect[0]
x_translate= transformVect[1]
# change transform to be relative to rotationCenter frame if in form of [y;x;phi]
if rotationCenter.shape[0]>2:
y_translate= y_translate*np.cos(rotationCenter[2]) + x_translate*np.sin(rotationCenter[2])
x_translate= x_translate*np.cos(rotationCenter[2]) - y_translate*
|
np.sin(rotationCenter[2])
|
numpy.sin
|
"""
Tests for stochastic fictitious learning
"""
import numpy as np
from hypothesis import given
from hypothesis.extra.numpy import arrays
from nashpy.learning.stochastic_fictitious_play import (
get_distribution_response_to_play_count,
stochastic_fictitious_play,
)
@given(M=arrays(np.int8, (2, 2)))
def test_property_get_distribution_response_to_play_count(M):
etha = 2
epsilon_bar = 2
play_count = np.zeros(M.shape[0])
distribution_response = get_distribution_response_to_play_count(
A=M, play_count=play_count, epsilon_bar=epsilon_bar, etha=etha
)
assert len(distribution_response) == len(play_count)
assert np.all(distribution_response) >= 0
assert np.isclose(np.sum(distribution_response), 1)
def test_get_distribution_response_to_play_count_2():
np.random.seed(0)
M = np.array([[3, 2], [7, 6]])
etha = 2
epsilon_bar = 2
play_count = [40, 30]
expected_distribution = np.array([0.1028108461, 0.8971891539])
distribution_response = get_distribution_response_to_play_count(
A=M, play_count=play_count, epsilon_bar=epsilon_bar, etha=etha
)
assert np.allclose(distribution_response, expected_distribution)
assert np.sum(distribution_response) == 1
def test_get_distribution_response_to_play_count_3():
np.random.seed(0)
M = np.array([[3, 2], [7, 6]])
etha = 0.2
epsilon_bar = 0.05
play_count = np.array([0, 0])
r_dist = get_distribution_response_to_play_count(
A=M, play_count=play_count, epsilon_bar=epsilon_bar, etha=etha
)
assert np.allclose(r_dist, np.array([1.97718056e-09, 9.99999998e-01]))
c_dist = get_distribution_response_to_play_count(
A=M.transpose(),
play_count=play_count,
epsilon_bar=epsilon_bar,
etha=etha,
)
assert np.allclose(c_dist, np.array([0.99340266, 0.00659734]))
def test_stochastic_fictitious_play_given_etha_epsilon():
np.random.seed(0)
iterations = 1
M = np.array([[3, 2], [7, 6]])
etha = 0.2
epsilon_bar = 0.05
counts_and_distributions = tuple(
stochastic_fictitious_play(
A=M, B=-M, etha=etha, epsilon_bar=epsilon_bar, iterations=iterations
)
)
playcounts, dist = counts_and_distributions[1]
r_playcounts, c_playcounts = playcounts
r_dist, c_dist = dist
assert np.array_equal(playcounts, [np.array([0, 1.0]),
|
np.array([0, 1.0])
|
numpy.array
|
from api import *
import matplotlib.pyplot as plt
import numpy as np
def plot_thirtydegree_graph():
x = np.array([200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
y = np.array([0.072,0.300,0.679,1.216,1.963,2.718,3.778,4.840,6.136,7.864])
y1 =
|
np.polyfit(x, y, 4)
|
numpy.polyfit
|
import numpy as np
from collections import deque
class Retrospective:
"""
Hierarchical Sequential D-MDL algorithm with SCAW2 (Prospective)
"""
def __init__(
self,
encoding_func,
d,
M=5,
min_datapoints=1,
delta_0=0.05,
delta_1=0.05,
delta_2=0.05,
how_to_drop='all',
order=-1,
reliability=True
):
"""
Args:
encoding_func: encoding function
d: dimension of the parametric class
M: maximum number of buckets which have the same data points
min_datapoints: minimum number of data points to calculate the NML code length
delta_0: upper bound on the Type-I error probability of 0th D-MDL
delta_1: upper bound on the Type-I error probability of 1st D-MDL
delta_2: upper bound on the Type-I error probability of 2nd D-MDL
how_to_drop: cut: drop the data before the optimal cut point, all: drop the all data
order: determine which order of D-MDL will be calculated. Note that calculate all the D-MDLs
if order==-1.
reliability: use asymptotic reliability or not
"""
self.__encoding_func = encoding_func
self.__d = d
self.__M = M
self.__min_datapoints = min_datapoints
self.__delta_0 = delta_0
self.__delta_1 = delta_1
self.__delta_2 = delta_2
self.__how_to_drop = how_to_drop
self.__buckets = deque([])
self.__order = order
self.__reliability = reliability
assert how_to_drop in ('cutpoint', 'all')
def calc_all_stats(self, X):
"""
Calculate alarms, scores, cutpoints, and window sizes.
Args:
X: input data
Returns:
list, list, list, ndarray: alarms, scores, cutpoints, and window sizes
"""
detector = Prospective(
encoding_func=self.__encoding_func,
d=self.__d,
M=self.__M,
min_datapoints=self.__min_datapoints,
delta_0=self.__delta_0,
delta_1=self.__delta_1,
delta_2=self.__delta_2,
how_to_drop=self.__how_to_drop,
order=self.__order,
reliability=self.__reliability
)
# alarms
alarms_0 = []
alarms_1 = []
alarms_2 = []
# maximum scores
max_scores_0 = []
max_scores_1 = []
max_scores_2 = []
# optimal cut points
cutpoints_0 = []
cutpoints_1 = []
cutpoints_2 = []
# window sizes
window_sizes = []
for i, X_i in enumerate(X):
alarms, max_scores, cutpoints, window_size = detector.update(
X_i)
alarms_0.append(alarms[0])
alarms_1.append(alarms[1])
alarms_2.append(alarms[2])
max_scores_0.append(max_scores[0])
max_scores_1.append(max_scores[1])
max_scores_2.append(max_scores[2])
cutpoints_0.append(cutpoints[0])
cutpoints_1.append(cutpoints[1])
cutpoints_2.append(cutpoints[2])
window_sizes.append(window_size)
cutpoints_0 = np.array(cutpoints_0)
cutpoints_1 = np.array(cutpoints_1)
cutpoints_2 = np.array(cutpoints_2)
alarms_0 = np.array(alarms_0)
alarms_1 = np.array(alarms_1)
alarms_2 = np.array(alarms_2)
cutpoints_0 = cutpoints_0[~np.isnan(cutpoints_0)]
cutpoints_1 = cutpoints_1[~np.isnan(cutpoints_1)]
cutpoints_2 = cutpoints_2[~np.isnan(cutpoints_2)]
alarms_0 = np.where(alarms_0 > 0.5)[0]
alarms_1 = np.where(alarms_1 > 0.5)[0]
alarms_2 = np.where(alarms_2 > 0.5)[0]
for i in range(len(alarms_0)):
if i != 0:
cutpoints_0[i] += alarms_0[i - 1]
for i in range(len(alarms_1)):
if i != 0:
cutpoints_1[i] += alarms_1[i - 1]
for i in range(len(alarms_2)):
if i != 0:
cutpoints_2[i] += alarms_2[i - 1]
return [alarms_0, alarms_1, alarms_2], [np.array(max_scores_0), np.array(max_scores_1), np.array(max_scores_2)], [cutpoints_0, cutpoints_1, cutpoints_2], np.array(window_sizes)
def calc_scores(self, X):
"""
Calculate scores. Return self.__order of D-MDL.
If self.__order==-1, return all order of D-MDL as a list.
Args:
X: input data
Returns:
Union[ndarray, list]: scores of the input data
"""
_, max_scores, _, _ = self.calc_all_stats(X)
if self.__order != -1:
return max_scores[self.__order]
else:
return max_scores
def make_alarms(self, X):
"""
Make alarms with the threshold. Return self.__order of D-MDL.
If self.__order==-1, return all order of D-MDL as a list.
Args:
X: input data
Returns:
Union[ndarray, list]: indice of alarms
"""
alarms, _, _, _ = self.calc_all_stats(X)
if self.__order != -1:
return alarms[self.__order]
else:
return alarms
class Prospective:
"""
Hierarchical Sequential D-MDL algorithm with SCAW2 (Prospective)
"""
def __init__(
self,
encoding_func,
d,
M=5,
min_datapoints=1,
delta_0=0.05,
delta_1=0.05,
delta_2=0.05,
how_to_drop='all',
order=-1,
reliability=True
):
"""
Args:
encoding_func: encoding function
d: dimension of the parametric class
M: maximum number of buckets which have the same data points
min_datapoints: minimum number of data points to calculate the NML code length
delta_0: upper bound on the Type-I error probability of 0th D-MDL
delta_1: upper bound on the Type-I error probability of 1st D-MDL
delta_2: upper bound on the Type-I error probability of 2nd D-MDL
how_to_drop: cut: drop the data before the optimal cut point, all: drop the all data
order: determine which order of D-MDL will be calculated. Note that calculate all the D-MDLs
if order==-1.
reliability: use asymptotic reliability or not
"""
self.__encoding_func = encoding_func
self.__d = d
self.__M = M
self.__min_datapoints = min_datapoints
self.__delta_0 = delta_0
self.__delta_1 = delta_1
self.__delta_2 = delta_2
self.__how_to_drop = how_to_drop
self.__buckets = deque([])
self.__order = order
self.__reliability = reliability
assert how_to_drop in ('cutpoint', 'all')
def update(self, x):
"""
calculate the score of the input datum
Args:
x: input datum
Returns:
list, list, list, ndarray: alarms, scores, cutpoints, window size
"""
# compute the sufficient statistics of the input datum
newT = self._suffi_stats(x)
self._combine(newT) # combine buckets
# the number of data contained in buckets
n = sum(map((lambda x: 2 ** x[3]), self.__buckets))
# alarms for change and change signs
alarm_0 = 0
alarm_1 = 0
alarm_2 = 0
# cut points for change and change signs
cutpoint_0 = np.nan
cutpoint_1 = np.nan
cutpoint_2 = np.nan
# calculate all the order of D-MDL
scores_0, scores_1, scores_2 = self._calc_stats()
# calculate maximum scores of each order of D-MDL
if np.isnan(scores_0).all():
max_score_0 = np.nan
else:
max_score_0 = np.nanmax(scores_0)
if np.isnan(scores_1).all():
max_score_1 = np.nan
else:
max_score_1 = np.nanmax(scores_1)
if np.isnan(scores_2).all():
max_score_2 = np.nan
else:
max_score_2 = np.nanmax(scores_2)
# max_score_0 is np.nan means the number of data points is not
# sufficient to compute 0th D-MDL
if np.isnan(max_score_0):
ret_alarms = [alarm_0, alarm_1, alarm_2]
ret_scores = [max_score_0 / n, max_score_1 / n, max_score_2 / n]
ret_cutpoints = [cutpoint_0, cutpoint_1, cutpoint_2]
return ret_alarms, ret_scores, ret_cutpoints, n
# 0th alarm
if max_score_0 >= self._calculate_threshold(nn=len(self.__buckets) - 1, n=n, order=0):
alarm_0 = 1
# 1st alarm
if np.isnan(max_score_1) == False and max_score_1 >= self._calculate_threshold(nn=len(self.__buckets) - 1, n=n, order=1):
cut_1_bucket = np.nanargmax(scores_1)
cutpoint_1 = sum(map(
(lambda x: 2 ** x[3]), [self.__buckets[i] for i in range(0, cut_1_bucket + 1)]))
alarm_1 = 1
# 2nd alarm
if np.isnan(max_score_2) == False and max_score_2 >= self._calculate_threshold(nn=len(self.__buckets) - 1, n=n, order=2):
cut_2_bucket = np.nanargmax(scores_2)
cutpoint_2 = sum(map(
(lambda x: 2 ** x[3]), [self.__buckets[i] for i in range(0, cut_2_bucket + 1)]))
alarm_2 = 1
if alarm_0 == 1: # if 0th alarm was raised
cut_0_bucket = np.nanargmax(scores_0)
cutpoint_0 = sum(map(
(lambda x: 2 ** x[3]), [self.__buckets[i] for i in range(0, cut_0_bucket + 1)]))
for j in range(0, cut_0_bucket + 1):
self.__buckets.popleft()
capacity_sum = sum(map((lambda x: 2 ** x[3]), self.__buckets))
ret_alarms = [alarm_0, alarm_1, alarm_2]
ret_scores = [max_score_0 / n, max_score_1 / n, max_score_2 / n]
ret_cutpoints = [cutpoint_0, cutpoint_1, cutpoint_2]
if self.__how_to_drop == 'cutpoint':
return ret_alarms, ret_scores, ret_cutpoints, capacity_sum
if self.__how_to_drop == 'all':
self.__buckets = deque([])
return ret_alarms, ret_scores, ret_cutpoints, 0
else:
ret_alarms = [alarm_0, alarm_1, alarm_2]
ret_scores = [max_score_0 / n, max_score_1 / n, max_score_2 / n]
ret_cutpoints = [cutpoint_0, cutpoint_1, cutpoint_2]
return ret_alarms, ret_scores, ret_cutpoints, n
def _combine(self, newT):
"""
combine a new datum to the buckets
new_buckets according to the "bucket rule"
Args:
newT: input datum
"""
# make a new datum into a bucket: form of
# (suffi_stats, suffi_stats_right, suffi_stats_left log(length))
# suffi_stats contains the sufficient statistics of the bucket
# suffi_stats_right contains those of the bucket without the rightest missing datum
# suffi_stats_left contains those of the bucket without the leftest
# missing datum
# append a datum
empty =
|
np.zeros(newT.shape)
|
numpy.zeros
|
import networkx as nx
from random import random, choice
import numpy as np
from matplotlib.collections import LineCollection
lobby_color = np.array([0, 1.0, 0])
party_color = np.array([0, 1.0, 0])
default_color = np.array([0, 0, 0])
high_charisma_color = np.array([0, 0, 1.0])
high_firmness_color = np.array([1.0, 0, 0])
high_charisma_and_firmness_color = np.array([1.0, 0, 1.0])
within_region_edge_color =
|
np.array([0.0, 0.0, 0.0, 1.0])
|
numpy.array
|
"""
This example script does the following:
-Loads individual files from the location defined in datasets.tsv file
-Parses individual files in memory (inMemory = True in "for onename in in_files))
-If your system does not have enough memory, you might need to switch to hdf5 here.
-Merges files corresponding to the same experiment together, on the HDD.
-Filters datasets, builds heatmaps
-Combines multiple replicas of the same experiment together, builds heatmaps
--Datasets are defined in the datasets.tsv file
--genome is defined by genomeFolder function, and workingGenome identifyer
--output files are arranged to folders named by their workingGenome IDs
Warnings:
Running this over NFS might cause unexpected slow-downs because NFS is
unhappy with repeated read/write cycles to the same file
You could do entire thing in memory, if you have RAM or your datasets are small.
Actually, using HDF5 is then equivalent to storing compressed data in RAM,
and might be in fact pretty fast.
General recommendation: if you have 16+GB of RAM, and .sra (.fastq.gz) files were less than 30GB, then you should be fine with parsing things in memory.
"""
from multiprocessing import Pool
from hiclib.fragmentHiC import HiCdataset as HiCdatasetorig
from mirnylib.systemutils import fmap, setExceptionHook
import numpy as np
import os
import sys
from mirnylib.numutils import uniqueIndex
import pyximport; pyximport.install()
setExceptionHook()
def ensure(f):
"creates directory for the file if doesn't exist, and returns filename"
d = os.path.dirname(f)
if os.path.isdir(d):
return f
else:
try:
os.makedirs(d)
except:
raise ValueError("Cannot create directory")
return f
def genomeFolder(name):
return os.path.join("/home/magus/HiC2011/data", name) # Fetch genome folder by genome name
class HiCdataset(HiCdatasetorig):
"Modification of HiCDataset to include all filters"
def filterLessThanDistance(self):
# This is the old function used to filter "duplicates".
#After the final submission of the manuscript, It was replaced by a better function that does the same,
#but at bp resolution, not 100 bp.
M = self.N
for i in range(5):
for j in range(5):
chrStrandID = 10000000 * 10000000 * (np.array(self.chrms1 * (self.strands1 + 1), dtype = np.int64) * 100 + self.chrms2 * (self.strands2 + 1))
print(len(np.unique(chrStrandID)))
posid = np.array((self.cuts1 + i * 100) // 500, dtype = np.int64) * 10000000 + (self.cuts2 + i * 100) // 500
N = self.N
self.maskFilter(uniqueIndex(posid + chrStrandID))
print(N, "filtered to", self.N)
self.metadata["321_quasiDuplicatesRemoved"] = M - self.N
def filterLessThanDistanceUpgraded(self, distance=500):
# This is a faster and more efficient way to remove reads that start/end within 500bp
# It is written in Cython, so it operates at 1bp resolution.
import maskRemover
c1 = np.array(self.chrms1, dtype = np.int16)
c2 = np.array(self.chrms2, dtype = np.int16)
p1 = np.array(self.cuts1, dtype = np.int32)
p2 = np.array(self.cuts2, dtype = np.int32)
s1 = np.array(self.strands1, dtype = np.int8)
s2 = np.array(self.strands2, dtype = np.int8)
removeMask = maskRemover.duplicateRemoveMask(c1, c2, p1, p2, s1, s2, offset=distance, method="max")
M = self.N
self.maskFilter(removeMask == 0)
self.metadata["321_quasiDuplicatesRemoved"] = M - self.N
coolResolutions = [10000000, 5000000, 2000000, 1000000, 500000, 200000, 100000, 40000, 20000, 10000, 5000, 2000, 1000]
skip = 1 # how many to skip for single replica datasets
def refineDataset(filenames):
"""
Parameters
----------
filenames[0] is a list of filenames of incoming files
filenames[1] is a folder for outgoing file
filenames[2] is a working genome, that is output directory
filenames[3] is an enzyme for a given experiment
create : bool, optional
If True, parse each file.
If False, assume that files were already parsed
(e.g. if you are just playing around with filtering parameters)
delete : bool, optional
If True, delete parsed files after merging.
Man, these files may be huge... if you don't have a 10TB RAID, this may be useful.
parseInMemory : bool, optional
Perform parsing input files in memory.
"""
in_files = filenames[0]
out_file = filenames[1]
workingGenome = filenames[2]
enzyme = 500
def parseMappedFile(onename):
np.random.seed()
# create dataset in memory, parse and then save to destination
TR = HiCdataset("bla" + str(np.random.randint(100000000000)), genome=genomeFolder(workingGenome),
maximumMoleculeLength=2000, enzymeName=enzyme, tmpFolder="tmp",dictToStoreIDs="dict",
inMemory=True, compression=None) # remove inMemory if you don't have enough RAM
TR.parseInputData(dictLike=onename)
TR.save(ensure(onename + "_parsed.frag"))
list(map(parseMappedFile, in_files))
"Merging files alltogether, applying filters"
TR = HiCdataset("bla" + str(
|
np.random.randint(100000000000)
|
numpy.random.randint
|
"""
Basic utilities for working with images.
Any function here that takes arrays can take either numpy or cupy arrays.
All functions are typically faster with GPU arrays except trim_zeros().
"""
from functools import lru_cache
import numpy
import scipy.ndimage
EPS = numpy.spacing(1)
EPS_SQRT = numpy.sqrt(EPS)
FLOAT64_NMANT = numpy.finfo(float).nmant
BIT_TYPES = [numpy.bool_, bool] # bool8?
INT_TYPES = numpy.sctypes['int'] + [int]
UINT_TYPES = numpy.sctypes['uint']
FLOAT_TYPES = numpy.sctypes['float'] + [float]
BASIC_TYPES = BIT_TYPES + INT_TYPES + UINT_TYPES + FLOAT_TYPES
try:
import cupy
import cupyx.scipy.ndimage
HAS_CUPY = True
except ImportError:
HAS_CUPY = False
##### Image Verification #####
def is_on_gpu(arr):
"""Checks if an array is on the GPU (i.e. it uses cupy)."""
return HAS_CUPY and isinstance(arr, cupy.ndarray)
def get_array_module(*args):
"""
Returns either the numpy or cupy module, cupy module is returned if any argument is on the GPU.
"""
return cupy if any(is_on_gpu(arg) for arg in args) else numpy
def get_ndimage_module(*args):
"""
Returns either the scipy.ndimage or cupyx.scipy.ndimage module, cupy module is returned if any
argument is on the GPU.
"""
return cupyx.scipy.ndimage if any(is_on_gpu(arg) for arg in args) else scipy.ndimage
def as_numpy(arr):
"""Get the given array as a numpy array."""
return arr.get() if is_on_gpu(arr) else
|
numpy.asanyarray(arr)
|
numpy.asanyarray
|
"""Run a model simulation."""
# Default climate data is ERA-Interim; specify CMIP5 by specifying a filename to the argument:
# (Command line) python run_simulation_list_multiprocess.py -gcm_list_fn=C:\...\gcm_rcpXX_filenames.txt
# - Default is running ERA-Interim in parallel with five processors.
# (Spyder) %run run_simulation_list_multiprocess.py C:\...\gcm_rcpXX_filenames.txt -option_parallels=0
# - Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Built-in libraries
import argparse
import collections
import inspect
import multiprocessing
import os
import time
# External libraries
import pandas as pd
import pickle
import pymc
from pymc import deterministic
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
from scipy import stats
import xarray as xr
import salem
# Local libraries
import class_climate
#import class_mbdata
import pygem.pygem_input as pygem_prms
from pygem.massbalance import PyGEMMassBalance
#from pygem.glacierdynamics import MassRedistributionCurveModel
from pygem.oggm_compat import single_flowline_glacier_directory, single_flowline_glacier_directory_with_calving
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygem.pygem_modelsetup as modelsetup
import spc_split_glaciers as split_glaciers
#from oggm import cfg
from oggm import graphics
from oggm import tasks
from oggm import utils
from oggm.core import climate
from oggm.core.flowline import FluxBasedModel
from oggm.core.inversion import calving_flux_from_depth
import math
import torch
import gpytorch
import sklearn.model_selection
#%% FUNCTIONS
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
ref_gcm_name (optional) : str
reference gcm name
num_simultaneous_processes (optional) : int
number of cores to use in parallels
option_parallels (optional) : int
switch to use parallels or not
rgi_glac_number_fn : str
filename of .pkl file containing a list of glacier numbers which is used to run batches on the supercomputer
rgi_glac_number : str
rgi glacier number to run for supercomputer
option_ordered : int
option to keep glaciers ordered or to grab every n value for the batch
(the latter helps make sure run times on each core are similar as it removes any timing differences caused by
regional variations)
progress_bar : int
Switch for turning the progress bar on or off (default = 0 (off))
debug : int
Switch for turning debug printing on or off (default = 0 (off))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="run calibration in parallel")
# add arguments
parser.add_argument('-ref_gcm_name', action='store', type=str, default=pygem_prms.ref_gcm_name,
help='reference gcm name')
parser.add_argument('-num_simultaneous_processes', action='store', type=int, default=4,
help='number of simultaneous processes (cores) to use')
parser.add_argument('-option_parallels', action='store', type=int, default=1,
help='Switch to use or not use parallels (1 - use parallels, 0 - do not)')
parser.add_argument('-rgi_glac_number_fn', action='store', type=str, default=None,
help='Filename containing list of rgi_glac_number, helpful for running batches on spc')
parser.add_argument('-option_ordered', action='store', type=int, default=1,
help='switch to keep lists ordered or not')
parser.add_argument('-progress_bar', action='store', type=int, default=0,
help='Boolean for the progress bar to turn it on or off (default 0 is off)')
parser.add_argument('-debug', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off)')
parser.add_argument('-rgi_glac_number', action='store', type=str, default=None,
help='rgi glacier number for supercomputer')
return parser
#%%
def mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=None, t1=None, t2=None,
option_areaconstant=1, return_tbias_mustmelt=False, return_tbias_mustmelt_wmb=False,
# return_volremaining=False
):
"""
Run the mass balance and calculate the mass balance [mwea]
Parameters
----------
option_areaconstant : Boolean
Returns
-------
mb_mwea : float
mass balance [m w.e. a-1]
"""
# RUN MASS BALANCE MODEL
mbmod = PyGEMMassBalance(gdir, modelprms, glacier_rgi_table, fls=fls, option_areaconstant=True,
debug=pygem_prms.debug_mb, debug_refreeze=pygem_prms.debug_refreeze)
years = np.arange(0, int(gdir.dates_table.shape[0]/12))
for year in years:
mbmod.get_annual_mb(fls[0].surface_h, fls=fls, fl_id=0, year=year, debug=False)
# Option for must melt condition
if return_tbias_mustmelt:
# Number of years and bins with negative climatic mass balance
nbinyears_negmbclim = len(np.where(mbmod.glac_bin_massbalclim_annual < 0)[0])
return nbinyears_negmbclim
elif return_tbias_mustmelt_wmb:
nbinyears_negmbclim = len(np.where(mbmod.glac_bin_massbalclim_annual < 0)[0])
t1_idx = gdir.mbdata['t1_idx']
t2_idx = gdir.mbdata['t2_idx']
nyears = gdir.mbdata['nyears']
mb_mwea = mbmod.glac_wide_massbaltotal[t1_idx:t2_idx+1].sum() / mbmod.glac_wide_area_annual[0] / nyears
return nbinyears_negmbclim, mb_mwea
# Otherwise return specific mass balance
else:
# Specific mass balance [mwea]
t1_idx = gdir.mbdata['t1_idx']
t2_idx = gdir.mbdata['t2_idx']
nyears = gdir.mbdata['nyears']
mb_mwea = mbmod.glac_wide_massbaltotal[t1_idx:t2_idx+1].sum() / mbmod.glac_wide_area_annual[0] / nyears
return mb_mwea
#def retrieve_tbias_bnds(gdir, modelprms, glacier_rgi_table, fls=None, debug=False):
# """
# Calculate parameters for prior distributions for the MCMC analysis
# """
# # Maximum mass loss [mwea] (based on consensus ice thickness estimate)
# with open(gdir.get_filepath('consensus_mass'), 'rb') as f:
# consensus_mass = pickle.load(f)
# mb_max_loss = -1 * consensus_mass / pygem_prms.density_water / gdir.rgi_area_m2 / (gdir.dates_table.shape[0] / 12)
#
# # ----- TEMPERATURE BIAS UPPER BOUND -----
# # Temperature where no melt
# tbias_max_acc = (-1 * (gdir.historical_climate['temp'] + gdir.historical_climate['lr'] *
# (fls[0].surface_h.min() - gdir.historical_climate['elev'])).max())
# if debug:
# print('tbias_max_acc:', np.round(tbias_max_acc,2))
#
# # Looping forward and backward to ensure optimization does not get stuck
# modelprms['tbias'] = tbias_max_acc
# mb_mwea_1 = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# # use absolute value because with area evolving the maximum value is a limit
# while mb_mwea_1 > mb_max_loss:
# modelprms['tbias'] = modelprms['tbias'] + 1
# mb_mwea_1 = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# if debug:
# print('TC:', np.round(modelprms['tbias'],2), 'mb_mwea_1:', np.round(mb_mwea_1,2),
# 'mb_max_loss:', np.round(mb_max_loss,2))
# # Looping backward for tempchange at max loss
# while mb_mwea_1 < mb_max_loss:
# modelprms['tbias'] = modelprms['tbias'] - 0.05
# mb_mwea_1 = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# if debug:
# print('tbias:', np.round(modelprms['tbias'],2), 'mb_mwea_1:', np.round(mb_mwea_1,2),
# 'mb_max_loss:', np.round(mb_max_loss,2))
# tbias_max_loss = modelprms['tbias']
# tbias_bndhigh = tbias_max_loss
# if debug:
# print('tbias_bndhigh:', np.round(tbias_bndhigh,2), 'mb_max_loss:', np.round(mb_max_loss,2))
#
# # ----- TEMPERATURE BIAS LOWER BOUND -----
# # Since the mass balance ablation is conditional on the glacier evolution, there can be cases where higher
# # temperature biases still have 0 for nbinyears_negmbclim. Hence, the need to loop beyond the first instance, and
# # then go back and check that you're using the good cases from there onward. This ensures starting point is good.
# modelprms['tbias'] = tbias_max_acc
# nbinyears_negmbclim = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls, return_tbias_mustmelt=True)
# nbinyears_negmbclim_list = [nbinyears_negmbclim]
# tbias_negmbclim_list = [modelprms['tbias']]
# tbias_smallstep_switch = False
# while nbinyears_negmbclim < 10 and modelprms['tbias'] < tbias_bndhigh:
# # Switch from large to small step sizes to speed up calculations
# if tbias_smallstep_switch == False:
# tbias_stepsize = 1
# else:
# tbias_stepsize = 0.05
# tbias_old = modelprms['tbias']
# modelprms['tbias'] = modelprms['tbias'] + tbias_stepsize
# nbinyears_negmbclim = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls, return_tbias_mustmelt=True)
# # Record if using big step and no there is no melt or if using small step and there is melt
# if nbinyears_negmbclim == 0 or (nbinyears_negmbclim > 0 and tbias_smallstep_switch == True):
# nbinyears_negmbclim_list.append(nbinyears_negmbclim)
# tbias_negmbclim_list.append(modelprms['tbias'])
# # First time nbinyears_negmbclim is > 0, flip switch to use smalll step and restart with last tbias
# if nbinyears_negmbclim > 0 and tbias_smallstep_switch == False:
# tbias_smallstep_switch = True
# modelprms['tbias'] = tbias_old
# nbinyears_negmbclim = 0
# if debug:
# print('tbias:', np.round(modelprms['tbias'],2), 'nbinyears_negmbclim:', nbinyears_negmbclim)
# tbias_bndlow = tbias_negmbclim_list[np.where(np.array(nbinyears_negmbclim_list) == 0)[0][-1] + 1]
# if debug:
# print('tbias_bndlow:', np.round(tbias_bndlow,2))
#
# return tbias_bndlow, tbias_bndhigh, mb_max_loss
class ExactGPModel(gpytorch.models.ExactGP):
""" Use the simplest form of GP model, exact inference """
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=3))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def create_emulator(glacier_str, sims_df, y_cn,
X_cns=['tbias','kp','ddfsnow'],
em_fp=pygem_prms.output_filepath + 'emulator/', debug=False):
# This is required for the supercomputer such that resources aren't stolen from other cpus
torch.set_num_threads(1)
assert y_cn in sims_df.columns, 'emulator error: y_cn not in sims_df'
X = sims_df.loc[:,X_cns].values
y = sims_df[y_cn].values
# Normalize data
X_mean = X.mean(axis=0)
X_std = X.std(axis=0)
X_norm = (X - X_mean) / X_std
y_mean = y.mean()
y_std = y.std()
y_norm = (y - y_mean) / y_std
# Split into training and test data and cast to torch tensors
X_train,X_test,y_train,y_test = [torch.tensor(x).to(torch.float)
for x in sklearn.model_selection.train_test_split(X_norm,y_norm)]
# Add a small amount of noise
y_train += torch.randn(*y_train.shape)*0.01
# initialize likelihood and model
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPModel(X_train, y_train, likelihood)
# Plot test set predictions prior to training
# Get into evaluation (predictive posterior) mode
model.eval()
likelihood.eval()
with torch.no_grad():#, gpytorch.settings.fast_pred_var():
y_pred = likelihood(model(X_test))
idx = np.argsort(y_test.numpy())
with torch.no_grad():
lower, upper = y_pred.confidence_region()
if debug:
f, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.plot(y_test.numpy()[idx], y_pred.mean.numpy()[idx], 'k*')
ax.fill_between(y_test.numpy()[idx], lower.numpy()[idx], upper.numpy()[idx], alpha=0.5)
plt.show()
# ----- Find optimal model hyperparameters -----
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.03) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(1000):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = model(X_train)
# Calc loss and backprop gradients
loss = -mll(output, y_train)
loss.backward()
# if debug and i%100==0:
# print(i, loss.item(), model.covar_module.base_kernel.lengthscale[0],
# model.likelihood.noise.item())
optimizer.step()
# Plot posterior distributions (with test data on x-axis)
# Get into evaluation (predictive posterior) mode
model.eval()
likelihood.eval()
with torch.no_grad():#, gpytorch.settings.fast_pred_var():
y_pred = likelihood(model(X_test))
idx = np.argsort(y_test.numpy())
with torch.no_grad():
lower, upper = y_pred.confidence_region()
if debug:
f, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.plot(y_test.numpy()[idx], y_pred.mean.numpy()[idx], 'k*')
ax.fill_between(y_test.numpy()[idx], lower.numpy()[idx], upper.numpy()[idx],
alpha=0.5)
plt.show()
if debug:
# Compare user-defined parameter sets within the emulator
tbias_set = (np.arange(-7,4,0.5)).reshape(-1,1)
kp_set = np.zeros(tbias_set.shape) + 1
ddf_set = np.zeros(tbias_set.shape) + 0.0041
modelprms_set = np.hstack((tbias_set, kp_set, ddf_set))
modelprms_set_norm = (modelprms_set - X_mean) / X_std
y_set_norm = model(torch.tensor(modelprms_set_norm).to(torch.float)).mean.detach().numpy()
y_set = y_set_norm * y_std + y_mean
f, ax = plt.subplots(1, 1, figsize=(4, 4))
kp_1_idx = np.where(sims_df['kp'] == 1)[0]
ax.plot(sims_df.loc[kp_1_idx,'tbias'], sims_df.loc[kp_1_idx,y_cn])
ax.plot(tbias_set,y_set,'.')
ax.set_xlabel('tbias (degC)')
if y_cn == 'mb_mwea':
ax.set_ylabel('PyGEM MB (mwea)')
elif y_cn == 'nbinyrs_negmbclim':
ax.set_ylabel('nbinyrs_negmbclim (-)')
plt.show()
# Compare the modeled and emulated mass balances
y_em_norm = model(torch.tensor(X_norm).to(torch.float)).mean.detach().numpy()
y_em = y_em_norm * y_std + y_mean
f, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.plot(y,y_em,'.')
ax.plot([y.min(),y.max()], [y.min(), y.max()])
if y_cn == 'mb_mwea':
ax.set_xlabel('emulator MB (mwea)')
ax.set_ylabel('PyGEM MB (mwea)')
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
elif y_cn == 'nbinyrs_negmbclim':
ax.set_xlabel('emulator nbinyrs_negmbclim (-)')
ax.set_ylabel('PyGEM nbinyrs_negmbclim (-)')
plt.show()
# ----- EXPORT EMULATOR -----
# Save emulator (model state, x_train, y_train, etc.)
em_mod_fn = glacier_str + '-emulator-' + y_cn + '.pth'
em_mod_fp = em_fp + 'models/' + glacier_str.split('.')[0].zfill(2) + '/'
if not os.path.exists(em_mod_fp):
os.makedirs(em_mod_fp, exist_ok=True)
torch.save(model.state_dict(), em_mod_fp + em_mod_fn)
# Extra required datasets
em_extra_dict = {'X_train': X_train,
'X_mean': X_mean,
'X_std': X_std,
'y_train': y_train,
'y_mean': y_mean,
'y_std': y_std}
em_extra_fn = em_mod_fn.replace('.pth','_extra.pkl')
with open(em_mod_fp + em_extra_fn, 'wb') as f:
pickle.dump(em_extra_dict, f)
return X_train, X_mean, X_std, y_train, y_mean, y_std, likelihood, model
#%%
def main(list_packed_vars):
"""
Model simulation
Parameters
----------
list_packed_vars : list
list of packed variables that enable the use of parallels
Returns
-------
netcdf files of the simulation output (specific output is dependent on the output option)
"""
# Unpack variables
count = list_packed_vars[0]
glac_no = list_packed_vars[1]
gcm_name = list_packed_vars[2]
parser = getparser()
args = parser.parse_args()
time_start = time.time()
if args.debug == 1:
debug = True
else:
debug = False
# ===== LOAD GLACIERS =====
main_glac_rgi = modelsetup.selectglaciersrgitable(glac_no=glac_no)
# ===== TIME PERIOD =====
dates_table = modelsetup.datesmodelrun(
startyear=pygem_prms.ref_startyear, endyear=pygem_prms.ref_endyear, spinupyears=pygem_prms.ref_spinupyears,
option_wateryear=pygem_prms.ref_wateryear)
# ===== LOAD CLIMATE DATA =====
# Climate class
assert gcm_name in ['ERA5', 'ERA-Interim'], 'Error: Calibration not set up for ' + gcm_name
gcm = class_climate.GCM(name=gcm_name)
# Air temperature [degC]
gcm_temp, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, main_glac_rgi, dates_table)
if pygem_prms.option_ablation == 2 and gcm_name in ['ERA5']:
gcm_tempstd, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.tempstd_fn, gcm.tempstd_vn,
main_glac_rgi, dates_table)
else:
gcm_tempstd = np.zeros(gcm_temp.shape)
# Precipitation [m]
gcm_prec, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, main_glac_rgi, dates_table)
# Elevation [m asl]
gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi)
# Lapse rate [degC m-1]
gcm_lr, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.lr_fn, gcm.lr_vn, main_glac_rgi, dates_table)
# ===== LOOP THROUGH GLACIERS TO RUN CALIBRATION =====
for glac in range(main_glac_rgi.shape[0]):
print(main_glac_rgi.loc[main_glac_rgi.index.values[glac],'RGIId'])
# if debug or glac == 0 or glac == main_glac_rgi.shape[0]:
# print(gcm_name,':', main_glac_rgi.loc[main_glac_rgi.index.values[glac],'RGIId'])
# Select subsets of data
glacier_rgi_table = main_glac_rgi.loc[main_glac_rgi.index.values[glac], :]
glacier_str = '{0:0.5f}'.format(glacier_rgi_table['RGIId_float'])
# ===== Load glacier data: area (km2), ice thickness (m), width (km) =====
try:
# if not glacier_rgi_table['TermType'] in [1,5] or pygem_prms.ignore_calving:
gdir = single_flowline_glacier_directory(glacier_str, logging_level='CRITICAL')
# else:
# gdir = single_flowline_glacier_directory_with_calving(glacier_str)
fls = gdir.read_pickle('inversion_flowlines')
glacier_area = fls[0].widths_m * fls[0].dx_meter
# Add climate data to glacier directory
gdir.historical_climate = {'elev': gcm_elev[glac],
'temp': gcm_temp[glac,:],
'tempstd': gcm_tempstd[glac,:],
'prec': gcm_prec[glac,:],
'lr': gcm_lr[glac,:]}
gdir.dates_table = dates_table
# ----- Calibration data -----
try:
mbdata_fn = gdir.get_filepath('mb_obs')
with open(mbdata_fn, 'rb') as f:
gdir.mbdata = pickle.load(f)
# Load data
mb_obs_mwea = gdir.mbdata['mb_mwea']
mb_obs_mwea_err = gdir.mbdata['mb_mwea_err']
# Add time indices consistent with dates_table for mb calculations
t1_year = gdir.mbdata['t1_datetime'].year
t1_month = gdir.mbdata['t1_datetime'].month
t2_year = gdir.mbdata['t2_datetime'].year
t2_month = gdir.mbdata['t2_datetime'].month
t1_idx = dates_table[(t1_year == dates_table['year']) & (t1_month == dates_table['month'])].index.values[0]
t2_idx = dates_table[(t2_year == dates_table['year']) & (t2_month == dates_table['month'])].index.values[0]
# Record indices
gdir.mbdata['t1_idx'] = t1_idx
gdir.mbdata['t2_idx'] = t2_idx
if debug:
print(' mb_data (mwea): ' + str(np.round(mb_obs_mwea,2)) + ' +/- ' + str(np.round(mb_obs_mwea_err,2)))
except:
gdir.mbdata = None
except:
fls = None
# ----- Mass balance model ------
if (fls is not None) and (gdir.mbdata is not None) and (glacier_area.sum() > 0):
modelprms = {'kp': pygem_prms.kp,
'tbias': pygem_prms.tbias,
'ddfsnow': pygem_prms.ddfsnow,
'ddfice': pygem_prms.ddfice,
'tsnow_threshold': pygem_prms.tsnow_threshold,
'precgrad': pygem_prms.precgrad}
# ===== CALIBRATION OPTIONS =====
# 'MCMC' : use MCMC method to determine posterior probability distributions of the three parameters tbias,
# ddfsnow and kp. Then create an ensemble of parameter sets evenly sampled from these
# distributions, and output these sets of parameters and their corresponding mass balances to be
# used in the simulations.
if pygem_prms.option_calibration == 'MCMC':
# ===== Define functions needed for MCMC method =====
def run_MCMC(gdir,
kp_disttype=pygem_prms.kp_disttype,
kp_gamma_alpha=None, kp_gamma_beta=None,
kp_lognorm_mu=None, kp_lognorm_tau=None,
kp_mu=None, kp_sigma=None, kp_bndlow=None, kp_bndhigh=None,
kp_start=None,
tbias_disttype=pygem_prms.tbias_disttype,
tbias_mu=None, tbias_sigma=None, tbias_bndlow=None, tbias_bndhigh=None,
tbias_start=None,
ddfsnow_disttype=pygem_prms.ddfsnow_disttype,
ddfsnow_mu=pygem_prms.ddfsnow_mu, ddfsnow_sigma=pygem_prms.ddfsnow_sigma,
ddfsnow_bndlow=pygem_prms.ddfsnow_bndlow, ddfsnow_bndhigh=pygem_prms.ddfsnow_bndhigh,
ddfsnow_start=pygem_prms.ddfsnow_start,
iterations=10, mcmc_burn_no=pygem_prms.mcmc_burn_no, thin=pygem_prms.thin_interval,
tune_interval=1000, step=None, tune_throughout=True, save_interval=None,
burn_till_tuned=False, stop_tuning_after=5,
verbose=0, progress_bar=args.progress_bar, dbname=None,
use_potentials=True, mb_max_loss=None):
"""
Runs the MCMC algorithm.
Runs the MCMC algorithm by setting the prior distributions and calibrating the probability
distributions of three model parameters for the mass balance function.
Parameters
----------
kp_disttype : str
Distribution type of precipitation factor (either 'lognormal', 'uniform', or 'custom')
kp_lognorm_mu, kp_lognorm_tau : float
Lognormal mean and tau (1/variance) of precipitation factor
kp_mu, kp_sigma, kp_bndlow, kp_bndhigh, kp_start : float
Mean, stdev, lower bound, upper bound, and start value of precipitation factor
tbias_disttype : str
Distribution type of tbias (either 'truncnormal' or 'uniform')
tbias_mu, tbias_sigma, tbias_bndlow, tbias_bndhigh, tbias_start : float
Mean, stdev, lower bound, upper bound, and start value of temperature bias
ddfsnow_disttype : str
Distribution type of degree day factor of snow (either 'truncnormal' or 'uniform')
ddfsnow_mu, ddfsnow_sigma, ddfsnow_bndlow, ddfsnow_bndhigh, ddfsnow_start : float
Mean, stdev, lower bound, upper bound, and start value of degree day factor of snow
iterations : int
Total number of iterations to do (default 10).
mcmc_burn_no : int
Variables will not be tallied until this many iterations are complete (default 0).
thin : int
Variables will be tallied at intervals of this many iterations (default 1).
tune_interval : int
Step methods will be tuned at intervals of this many iterations (default 1000).
step : str
Choice of step method to use (default metropolis-hastings).
tune_throughout : boolean
If true, tuning will continue after the burnin period; otherwise tuning will halt at the end of
the burnin period (default True).
save_interval : int or None
If given, the model state will be saved at intervals of this many iterations (default None).
burn_till_tuned: boolean
If True the Sampler will burn samples until all step methods are tuned. A tuned step methods is
one that was not tuned for the last `stop_tuning_after` tuning intervals. The burn-in phase will
have a minimum of 'burn' iterations but could be longer if tuning is needed. After the phase is
done the sampler will run for another (iter - burn) iterations, and will tally the samples
according to the 'thin' argument. This means that the total number of iteration is updated
throughout the sampling procedure. If True, it also overrides the tune_thorughout argument, so
no step method will be tuned when sample are being tallied (default False).
stop_tuning_after: int
The number of untuned successive tuning interval needed to be reached in order for the burn-in
phase to be done (if burn_till_tuned is True) (default 5).
verbose : int
An integer controlling the verbosity of the models output for debugging (default 0).
progress_bar : boolean
Display progress bar while sampling (default True).
dbname : str
Choice of database name the sample should be saved to (default None).
use_potentials : Boolean
Boolean to use of potential functions to further constrain likelihood functionns
mb_max_loss : float
Mass balance [mwea] at which the glacier completely melts
Returns
-------
pymc.MCMC.MCMC
Returns a model that contains sample traces of tbias, ddfsnow, kp and massbalance. These
samples can be accessed by calling the trace attribute. For example:
model.trace('ddfsnow')[:]
gives the trace of ddfsnow values.
A trace, or Markov Chain, is an array of values outputed by the MCMC simulation which defines
the posterior probability distribution of the variable at hand.
"""
# ===== EMULATORS FOR FAST PROCESSING =====
# Emulator filepath
em_mod_fp = pygem_prms.emulator_fp + 'models/' + glacier_str.split('.')[0].zfill(2) + '/'
# ----- EMULATOR: Mass balance -----
em_mb_fn = glacier_str + '-emulator-mb_mwea.pth'
if not os.path.exists(em_mod_fp + em_mb_fn):
(X_train, X_mean, X_std, y_train, y_mean, y_std, likelihood, em_model_mb) = (
create_emulator(glacier_str, sims_df, y_cn='mb_mwea'))
else:
# ----- LOAD EMULATOR -----
# This is required for the supercomputer such that resources aren't stolen from other cpus
torch.set_num_threads(1)
state_dict = torch.load(em_mod_fp + em_mb_fn)
emulator_extra_fn = em_mb_fn.replace('.pth','_extra.pkl')
with open(em_mod_fp + emulator_extra_fn, 'rb') as f:
emulator_extra_dict = pickle.load(f)
X_train = emulator_extra_dict['X_train']
X_mean = emulator_extra_dict['X_mean']
X_std = emulator_extra_dict['X_std']
y_train = emulator_extra_dict['y_train']
y_mean = emulator_extra_dict['y_mean']
y_std = emulator_extra_dict['y_std']
# initialize likelihood and model
likelihood = gpytorch.likelihoods.GaussianLikelihood()
# Create a new GP model
em_model_mb = ExactGPModel(X_train, y_train, likelihood)
em_model_mb.load_state_dict(state_dict)
em_model_mb.eval()
# Mass balance emulator function
def run_emulator_mb(modelprms):
""" Run the emulator
"""
modelprms_1d_norm = ((np.array([modelprms['tbias'], modelprms['kp'], modelprms['ddfsnow']]) -
X_mean) / X_std)
modelprms_2d_norm = modelprms_1d_norm.reshape(1,3)
mb_mwea_norm = em_model_mb(
torch.tensor(modelprms_2d_norm).to(torch.float)).mean.detach().numpy()[0]
mb_mwea = mb_mwea_norm * y_std + y_mean
return mb_mwea
# # ----- EMULATOR: nbinyrs_negmbclim -----
# em_meltyrs_fn = glacier_str + '-emulator-nbinyrs_negmbclim.pth'
# if not os.path.exists(em_mod_fp + em_meltyrs_fn):
# (X_train_meltyrs, X_mean_meltyrs, X_std_meltyrs,
# y_train_meltyrs, y_mean_meltyrs, y_std_meltyrs,
# likelihood_meltyrs, em_model_meltyrs) = (
# create_emulator(glacier_str, sims_df, y_cn='nbinyrs_negmbclim'))
# else:
# # ----- LOAD EMULATOR -----
# # This is required for the supercomputer such that resources aren't stolen from other cpus
# torch.set_num_threads(1)
#
# state_dict_meltyrs = torch.load(em_mod_fp + em_meltyrs_fn)
#
# em_meltyrs_extra_fn = em_meltyrs_fn.replace('.pth','_extra.pkl')
# with open(em_mod_fp + em_meltyrs_extra_fn, 'rb') as f:
# em_meltyrs_extra_dict = pickle.load(f)
#
# X_train_meltyrs = em_meltyrs_extra_dict['X_train']
# X_mean_meltyrs = em_meltyrs_extra_dict['X_mean']
# X_std_meltyrs = em_meltyrs_extra_dict['X_std']
# y_train_meltyrs = em_meltyrs_extra_dict['y_train']
# y_mean_meltyrs = em_meltyrs_extra_dict['y_mean']
# y_std_meltyrs = em_meltyrs_extra_dict['y_std']
#
# # initialize likelihood and model
# likelihood_meltyrs = gpytorch.likelihoods.GaussianLikelihood()
#
# # Create a new GP model
# em_model_meltyrs = ExactGPModel(X_train_meltyrs, y_train_meltyrs, likelihood_meltyrs)
# em_model_meltyrs.load_state_dict(state_dict_meltyrs)
# em_model_meltyrs.eval()
#
# # Mass balance emulator function
# def run_emulator_meltyrs(modelprms):
# """ Run the emulator to get nbinyrs_negmbclim """
# modelprms_1d_norm = ((np.array([modelprms['tbias'], modelprms['kp'], modelprms['ddfsnow']]) -
# X_mean_meltyrs) / X_std_meltyrs)
# modelprms_2d_norm = modelprms_1d_norm.reshape(1,3)
# nbinyrs_negmbclim_norm = em_model_meltyrs(
# torch.tensor(modelprms_2d_norm).to(torch.float)).mean.detach().numpy()[0]
# nbinyrs_negmbclim = nbinyrs_negmbclim_norm * y_std_meltyrs + y_mean_meltyrs
# return nbinyrs_negmbclim
# Rough estimate of minimum elevation mass balance function
def calc_mb_total_minelev(modelprms):
""" Approximate estimate of the mass balance at minimum elevation """
fl = fls[0]
min_elev = fl.surface_h.min()
glacier_gcm_temp = gdir.historical_climate['temp']
glacier_gcm_prec = gdir.historical_climate['prec']
glacier_gcm_lr = gdir.historical_climate['lr']
glacier_gcm_elev = gdir.historical_climate['elev']
# Temperature using gcm and glacier lapse rates
# T_bin = T_gcm + lr_gcm * (z_ref - z_gcm) + lr_glac * (z_bin - z_ref) + tempchange
T_minelev = (glacier_gcm_temp + glacier_gcm_lr *
(glacier_rgi_table.loc[pygem_prms.option_elev_ref_downscale] - glacier_gcm_elev) +
glacier_gcm_lr *
(min_elev - glacier_rgi_table.loc[pygem_prms.option_elev_ref_downscale]) +
modelprms['tbias'])
# Precipitation using precipitation factor and precipitation gradient
# P_bin = P_gcm * prec_factor * (1 + prec_grad * (z_bin - z_ref))
P_minelev = (glacier_gcm_prec * modelprms['kp'] * (1 + modelprms['precgrad'] * (min_elev -
glacier_rgi_table.loc[pygem_prms.option_elev_ref_downscale])))
# Accumulation using tsnow_threshold
Acc_minelev = np.zeros(P_minelev.shape)
Acc_minelev[T_minelev <= modelprms['tsnow_threshold']] = (
P_minelev[T_minelev <= modelprms['tsnow_threshold']])
# Melt
# energy available for melt [degC day]
melt_energy_available = T_minelev * dates_table['daysinmonth'].values
melt_energy_available[melt_energy_available < 0] = 0
# assume all snow melt because anything more would melt underlying ice in lowermost bin
# SNOW MELT [m w.e.]
Melt_minelev = modelprms['ddfsnow'] * melt_energy_available
# Total mass balance over entire period at minimum elvation
mb_total_minelev = (Acc_minelev - Melt_minelev).sum()
return mb_total_minelev
# ===== CHECK STARTING CONDITIONS (adjust tbias as needed) =====
# Test initial model parameters provide good starting condition
modelprms['kp'] = kp_start
modelprms['tbias'] = tbias_start
modelprms['ddfsnow'] = ddfsnow_start
# check starting mass balance is not less than the maximum mass loss
mb_mwea_start = run_emulator_mb(modelprms)
tbias_step = 0.1
while mb_mwea_start < mb_max_loss:
modelprms['tbias'] = modelprms['tbias'] - tbias_step
mb_mwea_start = run_emulator_mb(modelprms)
# print('tbias:', modelprms['tbias'], mb_mwea_start)
# check melting occurs for starting conditions
mb_total_minelev_start = calc_mb_total_minelev(modelprms)
tbias_smallstep = 0.01
while mb_total_minelev_start > 0 and mb_mwea_start > mb_max_loss:
modelprms['tbias'] = modelprms['tbias'] + tbias_smallstep
mb_total_minelev_start = calc_mb_total_minelev(modelprms)
mb_mwea_start = run_emulator_mb(modelprms)
# print('tbias:', modelprms['tbias'], mb_mwea_start, mb_total_minelev_start)
tbias_start = modelprms['tbias']
# ===== PRIOR DISTRIBUTIONS =====
# Priors dict to record values for export
priors_dict = {}
priors_dict['kp_disttype'] = kp_disttype
priors_dict['tbias_disttype'] = tbias_disttype
priors_dict['ddfsnow_disttype'] = ddfsnow_disttype
# Precipitation factor [-]
if kp_disttype == 'gamma':
kp = pymc.Gamma('kp', alpha=kp_gamma_alpha, beta=kp_gamma_beta, value=kp_start)
priors_dict['kp_gamma_alpha'] = kp_gamma_alpha
priors_dict['kp_gamma_beta'] = kp_gamma_beta
elif kp_disttype =='lognormal':
# lognormal distribution (roughly 0.3 to 3)
kp_start = np.exp(kp_start)
kp = pymc.Lognormal('kp', mu=kp_lognorm_mu, tau=kp_lognorm_tau, value=kp_start)
priors_dict['kp_lognorm_mu'] = kp_lognorm_mu
priors_dict['kp_lognorm_tau'] = kp_lognorm_tau
elif kp_disttype == 'uniform':
kp = pymc.Uniform('kp', lower=kp_bndlow, upper=kp_bndhigh, value=kp_start)
priors_dict['kp_bndlow'] = kp_bndlow
priors_dict['kp_bndhigh'] = kp_bndhigh
# Temperature bias [degC]
if tbias_disttype == 'normal':
tbias = pymc.Normal('tbias', mu=tbias_mu, tau=1/(tbias_sigma**2), value=tbias_start)
priors_dict['tbias_mu'] = tbias_mu
priors_dict['tbias_sigma'] = tbias_sigma
elif tbias_disttype =='truncnormal':
tbias = pymc.TruncatedNormal('tbias', mu=tbias_mu, tau=1/(tbias_sigma**2),
a=tbias_bndlow, b=tbias_bndhigh, value=tbias_start)
priors_dict['tbias_mu'] = tbias_mu
priors_dict['tbias_sigma'] = tbias_sigma
priors_dict['tbias_bndlow'] = tbias_bndlow
priors_dict['tbias_bndhigh'] = tbias_bndhigh
elif tbias_disttype =='uniform':
tbias = pymc.Uniform('tbias', lower=tbias_bndlow, upper=tbias_bndhigh, value=tbias_start)
priors_dict['tbias_bndlow'] = tbias_bndlow
priors_dict['tbias_bndhigh'] = tbias_bndhigh
# Degree day factor of snow [mwe degC-1 d-1]
# always truncated normal distribution with mean 0.0041 mwe degC-1 d-1 and standard deviation of
# 0.0015 (Braithwaite, 2008), since it's based on data; uniform should only be used for testing
if ddfsnow_disttype == 'truncnormal':
ddfsnow = pymc.TruncatedNormal('ddfsnow', mu=ddfsnow_mu, tau=1/(ddfsnow_sigma**2),
a=ddfsnow_bndlow, b=ddfsnow_bndhigh, value=ddfsnow_start)
priors_dict['ddfsnow_mu'] = ddfsnow_mu
priors_dict['ddfsnow_sigma'] = ddfsnow_sigma
priors_dict['ddfsnow_bndlow'] = ddfsnow_bndlow
priors_dict['ddfsnow_bndhigh'] = ddfsnow_bndhigh
elif ddfsnow_disttype == 'uniform':
ddfsnow = pymc.Uniform('ddfsnow', lower=ddfsnow_bndlow, upper=ddfsnow_bndhigh,
value=ddfsnow_start)
priors_dict['ddfsnow_bndlow'] = ddfsnow_bndlow
priors_dict['ddfsnow_bndhigh'] = ddfsnow_bndhigh
# ===== DETERMINISTIC FUNCTION ====
# Define deterministic function for MCMC model based on our a priori probobaility distributions.
@deterministic(plot=False)
def massbal(tbias=tbias, kp=kp, ddfsnow=ddfsnow):
""" Likelihood function for mass balance [mwea] based on model parameters """
modelprms_copy = modelprms.copy()
if tbias is not None:
modelprms_copy['tbias'] = float(tbias)
if kp is not None:
modelprms_copy['kp'] = float(kp)
if ddfsnow is not None:
modelprms_copy['ddfsnow'] = float(ddfsnow)
modelprms_copy['ddfice'] = modelprms_copy['ddfsnow'] / pygem_prms.ddfsnow_iceratio
# mb_mwea = mb_mwea_calc(gdir, modelprms_copy, glacier_rgi_table, fls=fls)
mb_mwea = run_emulator_mb(modelprms_copy)
return mb_mwea
# ===== POTENTIAL FUNCTIONS =====
# Potential functions are used to impose additional constrains on the model
@pymc.potential
def mb_max(mb_max_loss=mb_max_loss, massbal=massbal):
""" Model parameters cannot completely melt the glacier """
if massbal < mb_max_loss:
return -np.inf
else:
return 0
@pymc.potential
def must_melt(tbias=tbias, kp=kp, ddfsnow=ddfsnow):
"""
Likelihood function for mass balance [mwea] based on model parameters
"""
modelprms_copy = modelprms.copy()
if tbias is not None:
modelprms_copy['tbias'] = float(tbias)
if kp is not None:
modelprms_copy['kp'] = float(kp)
if ddfsnow is not None:
modelprms_copy['ddfsnow'] = float(ddfsnow)
modelprms_copy['ddfice'] = modelprms_copy['ddfsnow'] / pygem_prms.ddfsnow_iceratio
mb_total_minelev = calc_mb_total_minelev(modelprms_copy)
if mb_total_minelev < 0:
return 0
else:
return -np.inf
# @pymc.potential
# def must_melt(tbias=tbias, kp=kp, ddfsnow=ddfsnow):
# """
# Likelihood function for mass balance [mwea] based on model parameters
# """
# modelprms_copy = modelprms.copy()
# if tbias is not None:
# modelprms_copy['tbias'] = float(tbias)
# if kp is not None:
# modelprms_copy['kp'] = float(kp)
# if ddfsnow is not None:
# modelprms_copy['ddfsnow'] = float(ddfsnow)
# modelprms_copy['ddfice'] = modelprms_copy['ddfsnow'] / pygem_prms.ddfsnow_iceratio
## nbinyears_negmbclim = mb_mwea_calc(gdir, modelprms_copy, glacier_rgi_table, fls=fls,
## return_tbias_mustmelt=True)
# nbinyears_negmbclim = run_emulator_meltyrs(modelprms_copy)
# if nbinyears_negmbclim > 0:
# return 0
# else:
# return -np.inf
# ===== OBSERVED DATA =====
# Observed data defines the observed likelihood of mass balances (based on geodetic observations)
obs_massbal = pymc.Normal('obs_massbal', mu=massbal, tau=(1/(mb_obs_mwea_err**2)),
value=float(mb_obs_mwea), observed=True)
# Set model
if use_potentials:
model = pymc.MCMC([{'kp':kp, 'tbias':tbias, 'ddfsnow':ddfsnow,
'massbal':massbal, 'obs_massbal':obs_massbal}, mb_max, must_melt])
else:
model = pymc.MCMC({'kp':kp, 'tbias':tbias, 'ddfsnow':ddfsnow,
'massbal':massbal, 'obs_massbal':obs_massbal})
# Step method (if changed from default)
# Adaptive metropolis is supposed to perform block update, i.e., update all model parameters
# together based on their covariance, which would reduce autocorrelation; however, tests show
# doesn't make a difference.
if step == 'am':
model.use_step_method(pymc.AdaptiveMetropolis, [kp, tbias, ddfsnow], delay = 1000)
# Sample
if args.progress_bar == 1:
progress_bar_switch = True
else:
progress_bar_switch = False
model.sample(iter=iterations, burn=mcmc_burn_no, thin=thin,
tune_interval=tune_interval, tune_throughout=tune_throughout,
save_interval=save_interval, verbose=verbose, progress_bar=progress_bar_switch)
# Close database
model.db.close()
return model, priors_dict
try:
# ===== RUNNING MCMC =====
# Prior distributions (specified or informed by regions)
if pygem_prms.priors_reg_fullfn is not None:
# Load priors
priors_df = pd.read_csv(pygem_prms.priors_reg_fullfn)
priors_idx = np.where((priors_df.O1Region == glacier_rgi_table['O1Region']) &
(priors_df.O2Region == glacier_rgi_table['O2Region']))[0][0]
# Precipitation factor priors
kp_gamma_alpha = priors_df.loc[priors_idx, 'kp_alpha']
kp_gamma_beta = priors_df.loc[priors_idx, 'kp_beta']
# Temperature bias priors
tbias_mu = priors_df.loc[priors_idx, 'tbias_mean']
tbias_sigma = priors_df.loc[priors_idx, 'tbias_std']
else:
# Precipitation factor priors
kp_gamma_alpha = pygem_prms.kp_gamma_alpha
kp_gamma_beta = pygem_prms.kp_gamma_beta
# Temperature bias priors
tbias_mu = pygem_prms.tbias_mu
tbias_sigma = pygem_prms.tbias_sigma
modelprms_export = {}
# fit the MCMC model
for n_chain in range(0,pygem_prms.n_chains):
if debug:
print('\n', glacier_str, ' chain' + str(n_chain))
if n_chain == 0:
# Starting values: middle
tbias_start = tbias_mu
kp_start = kp_gamma_alpha / kp_gamma_beta
ddfsnow_start = pygem_prms.ddfsnow_mu
elif n_chain == 1:
# Starting values: lowest
tbias_start = tbias_mu - 1.96 * tbias_sigma
ddfsnow_start = pygem_prms.ddfsnow_mu - 1.96 * pygem_prms.ddfsnow_sigma
kp_start = stats.gamma.ppf(0.05,kp_gamma_alpha, scale=1/kp_gamma_beta)
elif n_chain == 2:
# Starting values: high
tbias_start = tbias_mu + 1.96 * tbias_sigma
ddfsnow_start = pygem_prms.ddfsnow_mu + 1.96 * pygem_prms.ddfsnow_sigma
kp_start = stats.gamma.ppf(0.95,kp_gamma_alpha, scale=1/kp_gamma_beta)
# Determine bounds to check TC starting values and estimate maximum mass loss
modelprms['kp'] = kp_start
modelprms['ddfsnow'] = ddfsnow_start
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
# ----- TEMPERATURE BIAS BOUNDS -----
# Selects from emulator sims dataframe
sims_fp = pygem_prms.emulator_fp + 'sims/' + glacier_str.split('.')[0].zfill(2) + '/'
sims_fn = glacier_str + '-' + str(pygem_prms.emulator_sims) + '_emulator_sims.csv'
sims_df = pd.read_csv(sims_fp + sims_fn)
sims_df_subset = sims_df.loc[sims_df['kp']==1, :]
tbias_bndhigh = sims_df_subset['tbias'].max()
tbias_bndlow = sims_df_subset['tbias'].min()
if debug:
print('tbias_bndlow:', np.round(tbias_bndlow,2), 'tbias_bndhigh:', np.round(tbias_bndhigh,2))
# Adjust tbias_init based on bounds
if tbias_start > tbias_bndhigh:
tbias_start = tbias_bndhigh
elif tbias_start < tbias_bndlow:
tbias_start = tbias_bndlow
# ----- Mass balance max loss -----
# Maximum mass loss [mwea] (based on consensus ice thickness estimate)
# consensus_mass has units of kg
if os.path.exists(gdir.get_filepath('consensus_mass')):
with open(gdir.get_filepath('consensus_mass'), 'rb') as f:
consensus_mass = pickle.load(f)
else:
# Mean global ice thickness from Farinotti et al. (2019) used for missing consensus glaciers
ice_thickness_constant = 224
consensus_mass = glacier_rgi_table.Area * 1e6 * ice_thickness_constant * pygem_prms.density_ice
mb_max_loss = (-1 * consensus_mass / pygem_prms.density_water / gdir.rgi_area_m2 /
(gdir.dates_table.shape[0] / 12))
if debug:
print('\ntbias_start:', np.round(tbias_start,3), 'pf_start:', np.round(kp_start,3),
'ddf_start:', np.round(ddfsnow_start,4), 'mb_max_loss:', np.round(mb_max_loss,2))
model, priors_dict = run_MCMC(
gdir,
iterations=pygem_prms.mcmc_sample_no, mcmc_burn_no=pygem_prms.mcmc_burn_no,
step=pygem_prms.mcmc_step,
kp_gamma_alpha=kp_gamma_alpha, kp_gamma_beta=kp_gamma_beta, kp_start=kp_start,
tbias_mu=tbias_mu, tbias_sigma=tbias_sigma, tbias_start=tbias_start,
ddfsnow_start=ddfsnow_start, mb_max_loss=mb_max_loss,
tbias_bndlow=tbias_bndlow, tbias_bndhigh=tbias_bndhigh,
use_potentials=True)
if debug:
print('\nacceptance ratio:', model.step_method_dict[next(iter(model.stochastics))][0].ratio)
print('mb_mwea_mean:', np.round(np.mean(model.trace('massbal')[:]),3),
'mb_mwea_std:', np.round(np.std(model.trace('massbal')[:]),3),
'\nmb_obs_mean:', np.round(mb_obs_mwea,3), 'mb_obs_std:', np.round(mb_obs_mwea_err,3))
# Store data from model to be exported
chain_str = 'chain_' + str(n_chain)
modelprms_export['tbias'] = {chain_str : list(model.trace('tbias')[:])}
modelprms_export['kp'] = {chain_str : list(model.trace('kp')[:])}
modelprms_export['ddfsnow'] = {chain_str : list(model.trace('ddfsnow')[:])}
modelprms_export['ddfice'] = {chain_str : list(model.trace('ddfsnow')[:] /
pygem_prms.ddfsnow_iceratio)}
modelprms_export['mb_mwea'] = {chain_str : list(model.trace('massbal')[:])}
# Export model parameters
modelprms_export['precgrad'] = [pygem_prms.precgrad]
modelprms_export['tsnow_threshold'] = [pygem_prms.tsnow_threshold]
modelprms_export['mb_obs_mwea'] = [mb_obs_mwea]
modelprms_export['mb_obs_mwea_err'] = [mb_obs_mwea_err]
modelprms_export['priors'] = priors_dict
modelprms_fn = glacier_str + '-modelprms_dict.pkl'
modelprms_fp = (pygem_prms.output_filepath + 'calibration/' + glacier_str.split('.')[0].zfill(2)
+ '/')
if not os.path.exists(modelprms_fp):
os.makedirs(modelprms_fp, exist_ok=True)
modelprms_fullfn = modelprms_fp + modelprms_fn
if os.path.exists(modelprms_fullfn):
with open(modelprms_fullfn, 'rb') as f:
modelprms_dict = pickle.load(f)
modelprms_dict[pygem_prms.option_calibration] = modelprms_export
else:
modelprms_dict = {pygem_prms.option_calibration: modelprms_export}
with open(modelprms_fullfn, 'wb') as f:
pickle.dump(modelprms_dict, f)
if not os.path.exists(modelprms_fp):
os.makedirs(modelprms_fp, exist_ok=True)
modelprms_fullfn = modelprms_fp + modelprms_fn
if os.path.exists(modelprms_fullfn):
with open(modelprms_fullfn, 'rb') as f:
modelprms_dict = pickle.load(f)
modelprms_dict[pygem_prms.option_calibration] = modelprms_export
else:
modelprms_dict = {pygem_prms.option_calibration: modelprms_export}
with open(modelprms_fullfn, 'wb') as f:
pickle.dump(modelprms_dict, f)
# MCMC LOG SUCCESS
mcmc_good_fp = pygem_prms.output_filepath + 'mcmc_success/' + glacier_str.split('.')[0].zfill(2) + '/'
if not os.path.exists(mcmc_good_fp):
os.makedirs(mcmc_good_fp, exist_ok=True)
txt_fn_good = glacier_str + "-mcmc_success.txt"
with open(mcmc_good_fp + txt_fn_good, "w") as text_file:
text_file.write(glacier_str + ' successfully exported mcmc results')
except:
# MCMC LOG FAILURE
mcmc_fail_fp = pygem_prms.output_filepath + 'mcmc_fail/' + glacier_str.split('.')[0].zfill(2) + '/'
if not os.path.exists(mcmc_fail_fp):
os.makedirs(mcmc_fail_fp, exist_ok=True)
txt_fn_fail = glacier_str + "-mcmc_fail.txt"
with open(mcmc_fail_fp + txt_fn_fail, "w") as text_file:
text_file.write(glacier_str + ' failed to complete MCMC')
#%% ===== HUSS AND HOCK (2015) CALIBRATION =====
elif pygem_prms.option_calibration == 'HH2015':
tbias_init = pygem_prms.tbias_init
tbias_step = pygem_prms.tbias_step
kp_init = pygem_prms.kp_init
kp_bndlow = pygem_prms.kp_bndlow
kp_bndhigh = pygem_prms.kp_bndhigh
ddfsnow_init = pygem_prms.ddfsnow_init
ddfsnow_bndlow = pygem_prms.ddfsnow_bndlow
ddfsnow_bndhigh = pygem_prms.ddfsnow_bndhigh
# assert pygem_prms.ddfsnow_iceratio == 0.5, 'Error: ddfsnow_iceratio for HH2015 must be 0.5'
# ----- Initialize model parameters -----
modelprms['tbias'] = tbias_init
modelprms['kp'] = kp_init
modelprms['ddfsnow'] = ddfsnow_init
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
continue_param_search = True
# ----- FUNCTIONS: COMPUTATIONALLY FASTER AND MORE ROBUST THAN SCIPY MINIMIZE -----
def update_bnds(prm2opt, prm_bndlow, prm_bndhigh, prm_mid, mb_mwea_low, mb_mwea_high, mb_mwea_mid,
debug=False):
# If mass balance less than observation, reduce tbias
if prm2opt == 'kp':
if mb_mwea_mid < mb_obs_mwea:
prm_bndlow_new, mb_mwea_low_new = prm_mid, mb_mwea_mid
prm_bndhigh_new, mb_mwea_high_new = prm_bndhigh, mb_mwea_high
else:
prm_bndlow_new, mb_mwea_low_new = prm_bndlow, mb_mwea_low
prm_bndhigh_new, mb_mwea_high_new = prm_mid, mb_mwea_mid
elif prm2opt == 'ddfsnow':
if mb_mwea_mid < mb_obs_mwea:
prm_bndlow_new, mb_mwea_low_new = prm_bndlow, mb_mwea_low
prm_bndhigh_new, mb_mwea_high_new = prm_mid, mb_mwea_mid
else:
prm_bndlow_new, mb_mwea_low_new = prm_mid, mb_mwea_mid
prm_bndhigh_new, mb_mwea_high_new = prm_bndhigh, mb_mwea_high
elif prm2opt == 'tbias':
if mb_mwea_mid < mb_obs_mwea:
prm_bndlow_new, mb_mwea_low_new = prm_bndlow, mb_mwea_low
prm_bndhigh_new, mb_mwea_high_new = prm_mid, mb_mwea_mid
else:
prm_bndlow_new, mb_mwea_low_new = prm_mid, mb_mwea_mid
prm_bndhigh_new, mb_mwea_high_new = prm_bndhigh, mb_mwea_high
prm_mid_new = (prm_bndlow_new + prm_bndhigh_new) / 2
modelprms[prm2opt] = prm_mid_new
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
mb_mwea_mid_new = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
if debug:
print(prm2opt + '_bndlow:', np.round(prm_bndlow_new,2),
'mb_mwea_low:', np.round(mb_mwea_low_new,2))
print(prm2opt + '_bndhigh:', np.round(prm_bndhigh_new,2),
'mb_mwea_high:', np.round(mb_mwea_high_new,2))
print(prm2opt + '_mid:', np.round(prm_mid_new,2),
'mb_mwea_mid:', np.round(mb_mwea_mid_new,3))
return (prm_bndlow_new, prm_bndhigh_new, prm_mid_new,
mb_mwea_low_new, mb_mwea_high_new, mb_mwea_mid_new)
def single_param_optimizer(modelprms_subset, mb_obs_mwea, prm2opt=None,
kp_bnds=None, tbias_bnds=None, ddfsnow_bnds=None,
mb_mwea_threshold=0.005, debug=False):
assert prm2opt is not None, 'For single_param_optimizer you must specify parameter to optimize'
if prm2opt == 'kp':
prm_bndlow = kp_bnds[0]
prm_bndhigh = kp_bnds[1]
modelprms['tbias'] = modelprms_subset['tbias']
modelprms['ddfsnow'] = modelprms_subset['ddfsnow']
elif prm2opt == 'ddfsnow':
prm_bndlow = ddfsnow_bnds[0]
prm_bndhigh = ddfsnow_bnds[1]
modelprms['kp'] = modelprms_subset['kp']
modelprms['tbias'] = modelprms_subset['tbias']
elif prm2opt == 'tbias':
prm_bndlow = tbias_bnds[0]
prm_bndhigh = tbias_bnds[1]
modelprms['kp'] = modelprms_subset['kp']
modelprms['ddfsnow'] = modelprms_subset['ddfsnow']
# Lower bound
modelprms[prm2opt] = prm_bndlow
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
mb_mwea_low = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# Upper bound
modelprms[prm2opt] = prm_bndhigh
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
mb_mwea_high = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# Middle bound
prm_mid = (prm_bndlow + prm_bndhigh) / 2
modelprms[prm2opt] = prm_mid
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
mb_mwea_mid = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
if debug:
print(prm2opt + '_bndlow:', np.round(prm_bndlow,2), 'mb_mwea_low:', np.round(mb_mwea_low,2))
print(prm2opt + '_bndhigh:', np.round(prm_bndhigh,2), 'mb_mwea_high:', np.round(mb_mwea_high,2))
print(prm2opt + '_mid:', np.round(prm_mid,2), 'mb_mwea_mid:', np.round(mb_mwea_mid,3))
# Optimize the model parameter
if np.absolute(mb_mwea_low - mb_obs_mwea) <= mb_mwea_threshold:
modelprms[prm2opt] = prm_bndlow
mb_mwea_mid = mb_mwea_low
elif np.absolute(mb_mwea_low - mb_obs_mwea) <= mb_mwea_threshold:
modelprms[prm2opt] = prm_bndhigh
mb_mwea_mid = mb_mwea_high
else:
ncount = 0
while np.absolute(mb_mwea_mid - mb_obs_mwea) > mb_mwea_threshold:
if debug:
print('\n ncount:', ncount)
(prm_bndlow, prm_bndhigh, prm_mid, mb_mwea_low, mb_mwea_high, mb_mwea_mid) = (
update_bnds(prm2opt, prm_bndlow, prm_bndhigh, prm_mid,
mb_mwea_low, mb_mwea_high, mb_mwea_mid, debug=debug))
ncount += 1
return modelprms, mb_mwea_mid
# ===== ROUND 1: PRECIPITATION FACTOR ======
if debug:
print('Round 1:')
if debug:
print(glacier_str + ' kp: ' + str(np.round(modelprms['kp'],2)) +
' ddfsnow: ' + str(np.round(modelprms['ddfsnow'],4)) +
' tbias: ' + str(np.round(modelprms['tbias'],2)))
# Lower bound
modelprms['kp'] = kp_bndlow
mb_mwea_kp_low = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# Upper bound
modelprms['kp'] = kp_bndhigh
mb_mwea_kp_high = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# Optimal precipitation factor
if mb_obs_mwea < mb_mwea_kp_low:
kp_opt = kp_bndlow
mb_mwea = mb_mwea_kp_low
elif mb_obs_mwea > mb_mwea_kp_high:
kp_opt = kp_bndhigh
mb_mwea = mb_mwea_kp_high
else:
# Single parameter optimizer (computationally more efficient and less prone to fail)
modelprms_subset = {'kp':kp_init, 'ddfsnow': ddfsnow_init, 'tbias': tbias_init}
kp_bnds = (kp_bndlow, kp_bndhigh)
modelprms_opt, mb_mwea = single_param_optimizer(
modelprms_subset, mb_obs_mwea, prm2opt='kp', kp_bnds=kp_bnds, debug=debug)
kp_opt = modelprms_opt['kp']
continue_param_search = False
# Update parameter values
modelprms['kp'] = kp_opt
if debug:
print(' kp:', np.round(kp_opt,2), 'mb_mwea:', np.round(mb_mwea,2))
# ===== ROUND 2: DEGREE-DAY FACTOR OF SNOW ======
if continue_param_search:
if debug:
print('Round 2:')
# Lower bound
modelprms['ddfsnow'] = ddfsnow_bndlow
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
mb_mwea_ddflow = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# Upper bound
modelprms['ddfsnow'] = ddfsnow_bndhigh
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
mb_mwea_ddfhigh = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# Optimal degree-day factor of snow
if mb_obs_mwea < mb_mwea_ddfhigh:
ddfsnow_opt = ddfsnow_bndhigh
mb_mwea = mb_mwea_ddfhigh
elif mb_obs_mwea > mb_mwea_ddflow:
ddfsnow_opt = ddfsnow_bndlow
mb_mwea = mb_mwea_ddflow
else:
# Single parameter optimizer (computationally more efficient and less prone to fail)
modelprms_subset = {'kp':kp_opt, 'ddfsnow': ddfsnow_init, 'tbias': tbias_init}
ddfsnow_bnds = (ddfsnow_bndlow, ddfsnow_bndhigh)
modelprms_opt, mb_mwea = single_param_optimizer(
modelprms_subset, mb_obs_mwea, prm2opt='ddfsnow', ddfsnow_bnds=ddfsnow_bnds, debug=debug)
ddfsnow_opt = modelprms_opt['ddfsnow']
continue_param_search = False
# Update parameter values
modelprms['ddfsnow'] = ddfsnow_opt
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
if debug:
print(' ddfsnow:', np.round(ddfsnow_opt,4), 'mb_mwea:', np.round(mb_mwea,2))
else:
ddfsnow_opt = modelprms['ddfsnow']
# ===== ROUND 3: TEMPERATURE BIAS ======
if continue_param_search:
if debug:
print('Round 3:')
# ----- TEMPBIAS: max accumulation -----
# Lower temperature bound based on no positive temperatures
# Temperature at the lowest bin
# T_bin = T_gcm + lr_gcm * (z_ref - z_gcm) + lr_glac * (z_bin - z_ref) + tbias
tbias_max_acc = (-1 * (gdir.historical_climate['temp'] + gdir.historical_climate['lr'] *
(fls[0].surface_h.min() - gdir.historical_climate['elev'])).max())
tbias_bndlow = tbias_max_acc
modelprms['tbias'] = tbias_bndlow
mb_mwea = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
if debug:
print(' tbias_bndlow:', np.round(tbias_bndlow,2), 'mb_mwea:', np.round(mb_mwea,2))
# Upper bound
while mb_mwea > mb_obs_mwea and modelprms['tbias'] < 20:
modelprms['tbias'] = modelprms['tbias'] + tbias_step
mb_mwea = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
if debug:
print(' tc:', np.round(modelprms['tbias'],2), 'mb_mwea:', np.round(mb_mwea,2))
tbias_bndhigh = modelprms['tbias']
# Single parameter optimizer (computationally more efficient and less prone to fail)
modelprms_subset = {'kp':kp_opt,
'ddfsnow': ddfsnow_opt,
'tbias': modelprms['tbias'] - tbias_step/2}
tbias_bnds = (tbias_bndhigh-tbias_step, tbias_bndhigh)
modelprms_opt, mb_mwea = single_param_optimizer(
modelprms_subset, mb_obs_mwea, prm2opt='tbias', tbias_bnds=tbias_bnds, debug=debug)
# Update parameter values
tbias_opt = modelprms_opt['tbias']
modelprms['tbias'] = tbias_opt
if debug:
print(' tbias:', np.round(tbias_opt,3), 'mb_mwea:', np.round(mb_mwea,3))
else:
tbias_opt = modelprms['tbias']
# Export model parameters
modelprms = modelprms_opt
for vn in ['ddfice', 'ddfsnow', 'kp', 'precgrad', 'tbias', 'tsnow_threshold']:
modelprms[vn] = [modelprms[vn]]
modelprms['mb_mwea'] = [mb_mwea]
modelprms['mb_obs_mwea'] = [mb_obs_mwea]
modelprms['mb_obs_mwea_err'] = [mb_obs_mwea_err]
modelprms_fn = glacier_str + '-modelprms_dict.pkl'
modelprms_fp = (pygem_prms.output_filepath + 'calibration/' + glacier_str.split('.')[0].zfill(2)
+ '/')
if not os.path.exists(modelprms_fp):
os.makedirs(modelprms_fp, exist_ok=True)
modelprms_fullfn = modelprms_fp + modelprms_fn
if os.path.exists(modelprms_fullfn):
with open(modelprms_fullfn, 'rb') as f:
modelprms_dict = pickle.load(f)
modelprms_dict[pygem_prms.option_calibration] = modelprms
else:
modelprms_dict = {pygem_prms.option_calibration: modelprms}
with open(modelprms_fullfn, 'wb') as f:
pickle.dump(modelprms_dict, f)
#%% ===== MODIFIED HUSS AND HOCK (2015) CALIBRATION =====
# used in Rounce et al. (2020; MCMC paper)
# - precipitation factor, then temperature bias (no ddfsnow)
# - ranges different
elif pygem_prms.option_calibration == 'HH2015mod':
tbias_init = pygem_prms.tbias_init
tbias_step = pygem_prms.tbias_step
kp_init = pygem_prms.kp_init
kp_bndlow = pygem_prms.kp_bndlow
kp_bndhigh = pygem_prms.kp_bndhigh
ddfsnow_init = pygem_prms.ddfsnow_init
# ----- Initialize model parameters -----
modelprms['tbias'] = tbias_init
modelprms['kp'] = kp_init
modelprms['ddfsnow'] = ddfsnow_init
modelprms['ddfice'] = modelprms['ddfsnow'] / pygem_prms.ddfsnow_iceratio
# ----- FUNCTIONS -----
def objective(modelprms_subset):
""" Objective function for mass balance data (mimize difference between model and observation).
Parameters
----------
modelprms_subset : list of model parameters [kp, ddfsnow, tbias]
"""
# Subset of model parameters used to reduce number of constraints required
modelprms['kp'] = modelprms_subset[0]
modelprms['tbias'] = tbias_init
if len(modelprms_subset) > 1:
modelprms['tbias'] = modelprms_subset[1]
# Mass balance
mb_mwea = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
# Difference with observation (mwea)
mb_dif_mwea_abs = abs(mb_obs_mwea - mb_mwea)
return mb_dif_mwea_abs
def run_objective(modelprms_init, mb_obs_mwea, modelprms_bnds=None,
run_opt=True, eps_opt=pygem_prms.eps_opt,
ftol_opt=pygem_prms.ftol_opt):
""" Run the optimization for the single glacier objective function.
Parameters
----------
modelparams_init : list of model parameters to calibrate [kp, ddfsnow, tbias]
kp_bnds, tbias_bnds, ddfsnow_bnds, precgrad_bnds : tuples (lower & upper bounds)
run_opt : Boolean statement run optimization or bypass optimization and run with initial parameters
Returns
-------
modelparams : model parameters dict and specific mass balance (mwea)
"""
# Run the optimization
if run_opt:
modelprms_opt = minimize(objective, modelprms_init, method=pygem_prms.method_opt,
bounds=modelprms_bnds, options={'ftol':ftol_opt, 'eps':eps_opt})
# Record the optimized parameters
modelprms_subset = modelprms_opt.x
else:
modelprms_subset = modelprms.copy()
modelprms['kp'] = modelprms_subset[0]
if len(modelprms_subset) == 2:
modelprms['tbias'] = modelprms_subset[1]
# Re-run the optimized parameters in order to see the mass balance
mb_mwea = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
return modelprms, mb_mwea
# ----- Temperature bias bounds -----
tbias_bndhigh = 0
# Tbias lower bound based on no positive temperatures
tbias_bndlow = (-1 * (gdir.historical_climate['temp'] + gdir.historical_climate['lr'] *
(fls[0].surface_h.min() - gdir.historical_climate['elev'])).max())
modelprms['tbias'] = tbias_bndlow
mb_mwea = mb_mwea_calc(gdir, modelprms, glacier_rgi_table, fls=fls)
if debug:
print(' tbias_bndlow:',
|
np.round(tbias_bndlow,2)
|
numpy.round
|
import logging
import numpy as np
from pkg_resources import parse_version
import ibllib.io.raw_data_loaders as raw
from ibllib.io.extractors.base import BaseBpodTrialsExtractor, run_extractor_classes
from ibllib.misc import version
_logger = logging.getLogger('ibllib')
class FeedbackType(BaseBpodTrialsExtractor):
"""
Get the feedback that was delivered to subject.
**Optional:** saves _ibl_trials.feedbackType.npy
Checks in raw datafile for error and reward state.
Will raise an error if more than one of the mutually exclusive states have
been triggered.
Sets feedbackType to -1 if error state was trigered (applies to no-go)
Sets feedbackType to +1 if reward state was triggered
"""
save_names = '_ibl_trials.feedbackType.npy'
var_names = 'feedbackType'
def _extract(self):
feedbackType = np.empty(len(self.bpod_trials))
feedbackType.fill(np.nan)
reward = []
error = []
no_go = []
for t in self.bpod_trials:
reward.append(~np.isnan(t['behavior_data']['States timestamps']['reward'][0][0]))
error.append(~np.isnan(t['behavior_data']['States timestamps']['error'][0][0]))
no_go.append(~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0]))
if not all(np.sum([reward, error, no_go], axis=0) == np.ones(len(self.bpod_trials))):
raise ValueError
feedbackType[reward] = 1
feedbackType[error] = -1
feedbackType[no_go] = -1
feedbackType = feedbackType.astype('int64')
return feedbackType
class ContrastLR(BaseBpodTrialsExtractor):
"""
Get left and right contrasts from raw datafile. Optionally, saves
_ibl_trials.contrastLeft.npy and _ibl_trials.contrastRight.npy to alf folder.
Uses signed_contrast to create left and right contrast vectors.
"""
save_names = ('_ibl_trials.contrastLeft.npy', '_ibl_trials.contrastRight.npy')
var_names = ('contrastLeft', 'contrastRight')
def _extract(self):
contrastLeft = np.array([t['contrast']['value'] if np.sign(
t['position']) < 0 else np.nan for t in self.bpod_trials])
contrastRight = np.array([t['contrast']['value'] if np.sign(
t['position']) > 0 else np.nan for t in self.bpod_trials])
return contrastLeft, contrastRight
class ProbabilityLeft(BaseBpodTrialsExtractor):
save_names = '_ibl_trials.probabilityLeft.npy'
var_names = 'probabilityLeft'
def _extract(self, **kwargs):
return np.array([t['stim_probability_left'] for t in self.bpod_trials])
class Choice(BaseBpodTrialsExtractor):
"""
Get the subject's choice in every trial.
**Optional:** saves _ibl_trials.choice.npy to alf folder.
Uses signed_contrast and trial_correct.
-1 is a CCW turn (towards the left)
+1 is a CW turn (towards the right)
0 is a no_go trial
If a trial is correct the choice of the animal was the inverse of the sign
of the position.
>>> choice[t] = -np.sign(position[t]) if trial_correct[t]
"""
save_names = '_ibl_trials.choice.npy'
var_names = 'choice'
def _extract(self):
sitm_side = np.array([np.sign(t['position']) for t in self.bpod_trials])
trial_correct = np.array([t['trial_correct'] for t in self.bpod_trials])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in self.bpod_trials])
choice = sitm_side.copy()
choice[trial_correct] = -choice[trial_correct]
choice[trial_nogo] = 0
choice = choice.astype(int)
return choice
class RepNum(BaseBpodTrialsExtractor):
"""
Count the consecutive repeated trials.
**Optional:** saves _ibl_trials.repNum.npy to alf folder.
Creates trial_repeated from trial['contrast']['type'] == 'RepeatContrast'
>>> trial_repeated = [0, 1, 1, 0, 1, 0, 1, 1, 1, 0]
>>> repNum = [0, 1, 2, 0, 1, 0, 1, 2, 3, 0]
"""
save_names = '_ibl_trials.repNum.npy'
var_names = 'repNum'
def _extract(self):
trial_repeated = np.array(
[t['contrast']['type'] == 'RepeatContrast' for t in self.bpod_trials])
trial_repeated = trial_repeated.astype(int)
repNum = trial_repeated.copy()
c = 0
for i in range(len(trial_repeated)):
if trial_repeated[i] == 0:
c = 0
repNum[i] = 0
continue
c += 1
repNum[i] = c
return repNum
class RewardVolume(BaseBpodTrialsExtractor):
"""
Load reward volume delivered for each trial.
**Optional:** saves _ibl_trials.rewardVolume.npy
Uses reward_current to accumulate the amount of
"""
save_names = '_ibl_trials.rewardVolume.npy'
var_names = 'rewardVolume'
def _extract(self):
trial_volume = [x['reward_amount']
if x['trial_correct'] else 0 for x in self.bpod_trials]
reward_volume = np.array(trial_volume).astype(np.float64)
assert len(reward_volume) == len(self.bpod_trials)
return reward_volume
class FeedbackTimes(BaseBpodTrialsExtractor):
"""
Get the times the water or error tone was delivered to the animal.
**Optional:** saves _ibl_trials.feedback_times.npy
Gets reward and error state init times vectors,
checks if theintersection of nans is empty, then
merges the 2 vectors.
"""
save_names = '_ibl_trials.feedback_times.npy'
var_names = 'feedback_times'
@staticmethod
def get_feedback_times_lt5(session_path, data=False):
if not data:
data = raw.load_data(session_path)
rw_times = [tr['behavior_data']['States timestamps']['reward'][0][0]
for tr in data]
err_times = [tr['behavior_data']['States timestamps']['error'][0][0]
for tr in data]
nogo_times = [tr['behavior_data']['States timestamps']['no_go'][0][0]
for tr in data]
assert sum(np.isnan(rw_times) &
np.isnan(err_times) & np.isnan(nogo_times)) == 0
merge = np.array([np.array(times)[~np.isnan(times)] for times in
zip(rw_times, err_times, nogo_times)]).squeeze()
return np.array(merge)
@staticmethod
def get_feedback_times_ge5(session_path, data=False):
# ger err and no go trig times -- look for BNC2High of trial -- verify
# only 2 onset times go tone and noise, select 2nd/-1 OR select the one
# that is grater than the nogo or err trial onset time
if not data:
data = raw.load_data(session_path)
missed_bnc2 = 0
rw_times, err_sound_times, merge = [np.zeros([len(data), ]) for _ in range(3)]
for ind, tr in enumerate(data):
st = tr['behavior_data']['Events timestamps'].get('BNC2High', None)
if not st:
st = np.array([np.nan, np.nan])
missed_bnc2 += 1
# xonar soundcard duplicates events, remove consecutive events too close together
st = np.delete(st, np.where(np.diff(st) < 0.020)[0] + 1)
rw_times[ind] = tr['behavior_data']['States timestamps']['reward'][0][0]
# get the error sound only if the reward is nan
err_sound_times[ind] = st[-1] if st.size >= 2 and np.isnan(rw_times[ind]) else np.nan
if missed_bnc2 == len(data):
_logger.warning('No BNC2 for feedback times, filling error trials NaNs')
merge *= np.nan
merge[~np.isnan(rw_times)] = rw_times[~np.isnan(rw_times)]
merge[~np.isnan(err_sound_times)] = err_sound_times[~np.isnan(err_sound_times)]
return merge
def _extract(self):
# Version check
if version.ge(self.settings['IBLRIG_VERSION_TAG'], '5.0.0'):
merge = self.get_feedback_times_ge5(self.session_path, data=self.bpod_trials)
else:
merge = self.get_feedback_times_lt5(self.session_path, data=self.bpod_trials)
return np.array(merge)
class Intervals(BaseBpodTrialsExtractor):
"""
Trial start to trial end. Trial end includes 1 or 2 seconds after feedback,
(depending on the feedback) and 0.5 seconds of iti.
**Optional:** saves _ibl_trials.intervals.npy
Uses the corrected Trial start and Trial end timpestamp values form PyBpod.
"""
save_names = '_ibl_trials.intervals.npy'
var_names = 'intervals'
def _extract(self):
starts = [t['behavior_data']['Trial start timestamp'] for t in self.bpod_trials]
ends = [t['behavior_data']['Trial end timestamp'] for t in self.bpod_trials]
return np.array([starts, ends]).T
class ResponseTimes(BaseBpodTrialsExtractor):
"""
Time (in absolute seconds from session start) when a response was recorded.
**Optional:** saves _ibl_trials.response_times.npy
Uses the timestamp of the end of the closed_loop state.
"""
save_names = '_ibl_trials.response_times.npy'
var_names = 'response_times'
def _extract(self):
rt = np.array([tr['behavior_data']['States timestamps']['closed_loop'][0][1]
for tr in self.bpod_trials])
return rt
class ItiDuration(BaseBpodTrialsExtractor):
"""
Calculate duration of iti from state timestamps.
**Optional:** saves _ibl_trials.iti_duration.npy
Uses Trial end timestamp and get_response_times to calculate iti.
"""
save_names = '_ibl_trials.itiDuration.npy'
var_names = 'iti_dur'
def _extract(self):
rt, _ = ResponseTimes(self.session_path).extract(
save=False, bpod_trials=self.bpod_trials, settings=self.settings)
ends = np.array([t['behavior_data']['Trial end timestamp'] for t in self.bpod_trials])
iti_dur = ends - rt
return iti_dur
class GoCueTriggerTimes(BaseBpodTrialsExtractor):
"""
Get trigger times of goCue from state machine.
Current software solution for triggering sounds uses PyBpod soft codes.
Delays can be in the order of 10's of ms. This is the time when the command
to play the sound was executed. To measure accurate time, either getting the
sound onset from xonar soundcard sync pulse (latencies may vary).
"""
save_names = '_ibl_trials.goCueTrigger_times.npy'
var_names = 'goCueTrigger_times'
def _extract(self):
if version.ge(self.settings['IBLRIG_VERSION_TAG'], '5.0.0'):
goCue = np.array([tr['behavior_data']['States timestamps']
['play_tone'][0][0] for tr in self.bpod_trials])
else:
goCue = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in self.bpod_trials])
return goCue
class TrialType(BaseBpodTrialsExtractor):
save_names = '_ibl_trials.type.npy'
var_name = 'trial_type'
def _extract(self):
trial_type = []
for tr in self.bpod_trials:
if ~np.isnan(tr["behavior_data"]["States timestamps"]["reward"][0][0]):
trial_type.append(1)
elif ~np.isnan(tr["behavior_data"]["States timestamps"]["error"][0][0]):
trial_type.append(-1)
elif ~np.isnan(tr["behavior_data"]["States timestamps"]["no_go"][0][0]):
trial_type.append(0)
else:
_logger.warning("Trial is not in set {-1, 0, 1}, appending NaN to trialType")
trial_type.append(np.nan)
return np.array(trial_type)
class GoCueTimes(BaseBpodTrialsExtractor):
"""
Get trigger times of goCue from state machine.
Current software solution for triggering sounds uses PyBpod soft codes.
Delays can be in the order of 10-100s of ms. This is the time when the command
to play the sound was executed. To measure accurate time, either getting the
sound onset from the future microphone OR the new xonar soundcard and
setup developed by Sanworks guarantees a set latency (in testing).
"""
save_names = '_ibl_trials.goCue_times.npy'
var_names = 'goCue_times'
def _extract(self):
go_cue_times = np.zeros([len(self.bpod_trials), ])
for ind, tr in enumerate(self.bpod_trials):
if raw.get_port_events(tr, 'BNC2'):
bnchigh = tr['behavior_data']['Events timestamps'].get('BNC2High', None)
if bnchigh:
go_cue_times[ind] = bnchigh[0]
continue
bnclow = tr['behavior_data']['Events timestamps'].get('BNC2Low', None)
if bnclow:
go_cue_times[ind] = bnclow[0] - 0.1
continue
go_cue_times[ind] = np.nan
else:
go_cue_times[ind] = np.nan
nmissing = np.sum(np.isnan(go_cue_times))
# Check if all stim_syncs have failed to be detected
if np.all(np.isnan(go_cue_times)):
_logger.warning(
f'{self.session_path}: Missing ALL !! BNC2 TTLs ({nmissing} trials)')
# Check if any stim_sync has failed be detected for every trial
elif np.any(np.isnan(go_cue_times)):
_logger.warning(f'{self.session_path}: Missing BNC2 TTLs on {nmissing} trials')
return go_cue_times
class IncludedTrials(BaseBpodTrialsExtractor):
save_names = '_ibl_trials.included.npy'
var_names = 'included'
def _extract(self):
if version.ge(self.settings['IBLRIG_VERSION_TAG'], '5.0.0'):
trials_included = self.get_included_trials_ge5(
data=self.bpod_trials, settings=self.settings)
else:
trials_included = self.get_included_trials_lt5(data=self.bpod_trials)
return trials_included
@staticmethod
def get_included_trials_lt5(data=False):
trials_included = np.array([True for t in data])
return trials_included
@staticmethod
def get_included_trials_ge5(data=False, settings=False):
trials_included = np.array([True for t in data])
if ('SUBJECT_DISENGAGED_TRIGGERED' in settings.keys() and settings[
'SUBJECT_DISENGAGED_TRIGGERED'] is not False):
idx = settings['SUBJECT_DISENGAGED_TRIALNUM'] - 1
trials_included[idx:] = False
return trials_included
class ItiInTimes(BaseBpodTrialsExtractor):
var_names = 'itiIn_times'
def _extract(self):
if parse_version(self.settings["IBLRIG_VERSION_TAG"]) < parse_version("5.0.0"):
iti_in = np.ones(len(self.bpod_trials)) * np.nan
else:
iti_in = np.array(
[tr["behavior_data"]["States timestamps"]
["exit_state"][0][0] for tr in self.bpod_trials]
)
return iti_in
class ErrorCueTriggerTimes(BaseBpodTrialsExtractor):
var_names = 'errorCueTrigger_times'
def _extract(self):
errorCueTrigger_times = np.zeros(len(self.bpod_trials)) * np.nan
for i, tr in enumerate(self.bpod_trials):
nogo = tr["behavior_data"]["States timestamps"]["no_go"][0][0]
error = tr["behavior_data"]["States timestamps"]["error"][0][0]
if np.all(~np.isnan(nogo)):
errorCueTrigger_times[i] = nogo
elif np.all(~np.isnan(error)):
errorCueTrigger_times[i] = error
return errorCueTrigger_times
class StimFreezeTriggerTimes(BaseBpodTrialsExtractor):
var_names = 'stimFreezeTrigger_times'
def _extract(self):
if parse_version(self.settings["IBLRIG_VERSION_TAG"]) < parse_version("6.2.5"):
return np.ones(len(self.bpod_trials)) * np.nan
freeze_reward = np.array(
[
True
if np.all(~np.isnan(tr["behavior_data"]["States timestamps"]["freeze_reward"][0]))
else False
for tr in self.bpod_trials
]
)
freeze_error = np.array(
[
True
if np.all(~np.isnan(tr["behavior_data"]["States timestamps"]["freeze_error"][0]))
else False
for tr in self.bpod_trials
]
)
no_go = np.array(
[
True
if np.all(~np.isnan(tr["behavior_data"]["States timestamps"]["no_go"][0]))
else False
for tr in self.bpod_trials
]
)
assert (np.sum(freeze_error) + np.sum(freeze_reward) +
np.sum(no_go) == len(self.bpod_trials))
stimFreezeTrigger = np.array([])
for r, e, n, tr in zip(freeze_reward, freeze_error, no_go, self.bpod_trials):
if n:
stimFreezeTrigger = np.append(stimFreezeTrigger, np.nan)
continue
state = "freeze_reward" if r else "freeze_error"
stimFreezeTrigger = np.append(
stimFreezeTrigger, tr["behavior_data"]["States timestamps"][state][0][0]
)
return stimFreezeTrigger
class StimOffTriggerTimes(BaseBpodTrialsExtractor):
var_names = 'stimOffTrigger_times'
def _extract(self):
if parse_version(self.settings["IBLRIG_VERSION_TAG"]) >= parse_version("6.2.5"):
stim_off_trigger_state = "hide_stim"
elif parse_version(self.settings["IBLRIG_VERSION_TAG"]) >= parse_version("5.0.0"):
stim_off_trigger_state = "exit_state"
else:
stim_off_trigger_state = "trial_start"
stimOffTrigger_times = np.array(
[tr["behavior_data"]["States timestamps"][stim_off_trigger_state][0][0]
for tr in self.bpod_trials]
)
# If pre version 5.0.0 no specific nogo Off trigger was given, just return trial_starts
if stim_off_trigger_state == "trial_start":
return stimOffTrigger_times
no_goTrigger_times = np.array(
[tr["behavior_data"]["States timestamps"]["no_go"][0][0] for tr in self.bpod_trials]
)
# Stim off trigs are either in their own state or in the no_go state if the
# mouse did not move, if the stim_off_trigger_state always exist
# (exit_state or trial_start)
# no NaNs will happen, NaNs might happen in at last trial if
# session was stopped after response
# if stim_off_trigger_state == "hide_stim":
# assert all(~np.isnan(no_goTrigger_times) == np.isnan(stimOffTrigger_times))
# Patch with the no_go states trig times
stimOffTrigger_times[~np.isnan(no_goTrigger_times)] = no_goTrigger_times[
~np.isnan(no_goTrigger_times)
]
return stimOffTrigger_times
class StimOnTriggerTimes(BaseBpodTrialsExtractor):
save_names = '_ibl_trials.stimOnTrigger_times.npy'
var_names = 'stimOnTrigger_times'
def _extract(self):
# Get the stim_on_state that triggers the onset of the stim
stim_on_state = np.array([tr['behavior_data']['States timestamps']
['stim_on'][0] for tr in self.bpod_trials])
return stim_on_state[:, 0].T
class StimOnTimes_deprecated(BaseBpodTrialsExtractor):
save_names = '_ibl_trials.stimOn_times.npy'
var_names = 'stimOn_times'
def _extract(self):
"""
Find the time of the state machine command to turn on the stim
(state stim_on start or rotary_encoder_event2)
Find the next frame change from the photodiode after that TS.
Screen is not displaying anything until then.
(Frame changes are in BNC1 High and BNC1 Low)
"""
# Version check
_logger.warning("Deprecation Warning: this is an old version of stimOn extraction."
"From version 5., use StimOnOffFreezeTimes")
if version.ge(self.settings['IBLRIG_VERSION_TAG'], '5.0.0'):
stimOn_times = self.get_stimOn_times_ge5(self.session_path, data=self.bpod_trials)
else:
stimOn_times = self.get_stimOn_times_lt5(self.session_path, data=self.bpod_trials)
return np.array(stimOn_times)
@staticmethod
def get_stimOn_times_ge5(session_path, data=False):
"""
Find first and last stim_sync pulse of the trial.
stimOn_times should be the first after the stim_on state.
(Stim updates are in BNC1High and BNC1Low - frame2TTL device)
Check that all trials have frame changes.
Find length of stim_on_state [start, stop].
If either check fails the HW device failed to detect the stim_sync square change
Substitute that trial's missing or incorrect value with a NaN.
return stimOn_times
"""
if not data:
data = raw.load_data(session_path)
# Get all stim_sync events detected
stim_sync_all = [raw.get_port_events(tr, 'BNC1') for tr in data]
stim_sync_all = [np.array(x) for x in stim_sync_all]
# Get the stim_on_state that triggers the onset of the stim
stim_on_state = np.array([tr['behavior_data']['States timestamps']
['stim_on'][0] for tr in data])
stimOn_times = np.array([])
for sync, on, off in zip(
stim_sync_all, stim_on_state[:, 0], stim_on_state[:, 1]):
pulse = sync[np.where(np.bitwise_and((sync > on), (sync <= off)))]
if pulse.size == 0:
stimOn_times = np.append(stimOn_times, np.nan)
else:
stimOn_times = np.append(stimOn_times, pulse)
nmissing = np.sum(np.isnan(stimOn_times))
# Check if all stim_syncs have failed to be detected
if np.all(np.isnan(stimOn_times)):
_logger.error(f'{session_path}: Missing ALL BNC1 TTLs ({nmissing} trials)')
# Check if any stim_sync has failed be detected for every trial
if np.any(np.isnan(stimOn_times)):
_logger.warning(f'{session_path}: Missing BNC1 TTLs on {nmissing} trials')
return stimOn_times
@staticmethod
def get_stimOn_times_lt5(session_path, data=False):
"""
Find the time of the statemachine command to turn on hte stim
(state stim_on start or rotary_encoder_event2)
Find the next frame change from the photodiodeafter that TS.
Screen is not displaying anything until then.
(Frame changes are in BNC1High and BNC1Low)
"""
if not data:
data = raw.load_data(session_path)
stim_on = []
bnc_h = []
bnc_l = []
for tr in data:
stim_on.append(tr['behavior_data']['States timestamps']['stim_on'][0][0])
if 'BNC1High' in tr['behavior_data']['Events timestamps'].keys():
bnc_h.append(np.array(tr['behavior_data']
['Events timestamps']['BNC1High']))
else:
bnc_h.append(np.array([np.NINF]))
if 'BNC1Low' in tr['behavior_data']['Events timestamps'].keys():
bnc_l.append(np.array(tr['behavior_data']
['Events timestamps']['BNC1Low']))
else:
bnc_l.append(np.array([np.NINF]))
stim_on = np.array(stim_on)
bnc_h = np.array(bnc_h, dtype=object)
bnc_l = np.array(bnc_l, dtype=object)
count_missing = 0
stimOn_times = np.zeros_like(stim_on)
for i in range(len(stim_on)):
hl = np.sort(np.concatenate([bnc_h[i], bnc_l[i]]))
stot = hl[hl > stim_on[i]]
if np.size(stot) == 0:
stot = np.array([np.nan])
count_missing += 1
stimOn_times[i] = stot[0]
if np.all(np.isnan(stimOn_times)):
_logger.error(f'{session_path}: Missing ALL BNC1 TTLs ({count_missing} trials)')
if count_missing > 0:
_logger.warning(f'{session_path}: Missing BNC1 TTLs on {count_missing} trials')
return np.array(stimOn_times)
class StimOnOffFreezeTimes(BaseBpodTrialsExtractor):
"""
Extracts stim on / off and freeze times from Bpod BNC1 detected fronts
"""
save_names = ("_ibl_trials.stimOn_times.npy", None, None)
var_names = ('stimOn_times', 'stimOff_times', 'stimFreeze_times')
def _extract(self):
choice = Choice(self.session_path).extract(
bpod_trials=self.bpod_trials, settings=self.settings, save=False
)[0]
f2TTL = [raw.get_port_events(tr, name="BNC1") for tr in self.bpod_trials]
stimOn_times = np.array([])
stimOff_times = np.array([])
stimFreeze_times = np.array([])
for tr in f2TTL:
if tr and len(tr) == 2:
stimOn_times =
|
np.append(stimOn_times, tr[0])
|
numpy.append
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from collections import namedtuple
import numpy as np
class Augmix(Augmentation):
tags = ['compositor', 'augmix_compose']
def __init__(self, severity=None, im_size=None, augmentation_list=[], width=3, max_depth=3, prob_coeff=1.0, random_depth=True, record=False, float_output=True):
super(Augmix, self).__init__(severity, im_size, record)
self.width = width
self.depth = max_depth
self.random_depth = random_depth
self.prob_coeff = prob_coeff
self.augs = augmentation_list
self.float_output = float_output
self.record_length = max([len(a.convert_to_numpy(a.sample_parameters())) for a in self.augs])\
if self.augs else 0
def transform(self, image, m, ws, aug_record):
if not self.augs:
return image
mix = np.zeros_like(image).astype(np.float32)
for i in range(self.width):
image_aug = image.copy()
for j in range(self.depth):
pos = self.depth * i + j
if aug_record[pos].idx < 0:
continue
op = self.augs[aug_record[pos].idx].transform
image_aug = op(image_aug, **(aug_record[pos].params))
mix += ws[i] * image_aug.astype(np.float32)
mixed = (1 - m) * image.astype(np.float32) + m * mix
if self.float_output:
return mixed
return mixed.astype(np.uint8)
def sample_parameters(self):
ws = np.float32(np.random.dirichlet([self.prob_coeff] * self.width))
m = np.float32(np.random.beta(self.prob_coeff, self.prob_coeff))
if not self.augs:
return { 'm' : m, 'ws' : ws, 'aug_record': []}
aug_idxs = np.random.randint(low=0, high=len(self.augs), size=self.width*self.depth)
if self.random_depth:
for i in range(self.width):
inverse_depth =
|
np.random.randint(1,self.depth+1)
|
numpy.random.randint
|
import numpy as np
def generate(width=100, height=100, polygons=[]):
grid =
|
np.ones((height, width), dtype=np.uint8)
|
numpy.ones
|
# Define Function(s)
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
def txt_2_array(file_name):
f1 = open(file_name,'r')
# Read all lines
all_lines = f1.readlines()
f1.close()
# Select from Fourht line to the end
first_row_with_data = 3
all_lines=all_lines[first_row_with_data:]
# Calculate number of rows
num_rows=len(all_lines)
f1.close()
num_cols = len( all_lines[0].split()[1::3] )
data_array=np.zeros( (num_rows,num_cols) )
# allocate line by line
for i in range(num_rows):
l=all_lines[i].split()[1::3]
data_array[i,:]=np.array(l)
return data_array
def name_2_dict(file_name):
# create dictionary with information
file_info = {'Mouse': file_name.split("_")[3], \
'Slice': int(file_name.split("_")[4][1]),\
'Year': int(file_name.split("_")[5]),\
'Month': int(file_name.split("_")[6]),\
'Day': int(file_name.split("_")[7])}
return file_info
def T2decay(xdata, T1,Mz):
R1=1.0/T1
return Mz * np.exp(-xdata*R1)
def fitT2(xdata,Ydata):
num_rois=Ydata.shape[1]
initial_guess=[1,.1]
T2_estimates=np.zeros((num_rois,1))
for i in range(num_rois):
ydata=Ydata[:,i] / Ydata[0,i]
pars_hat, cov = curve_fit(T2decay, xdata, ydata,p0=initial_guess,bounds=(0, 1.0))
T2_estimates[i,0]=pars_hat[0]
return T2_estimates
def name_2_df(file_name):
# create dataframe with information from file name
my_dict=name_2_dict(file_name)
my_df=pd.DataFrame()
my_df=my_df.from_dict(my_dict,orient='index').transpose()
return my_df
def normalize_data(DataMatrix,normalization_point):
rows,cols = DataMatrix.shape
newData = np.zeros_like(DataMatrix)
for row in range(rows):
newData[row,:]=DataMatrix[row,:]/DataMatrix[row,normalization_point]
return newData
def Lorentzian(sat_offset,Amp,Width,Center):
Width = Width**2; Width=Width/4
xdata = (sat_offset-Center)**2
return (Amp*Width) / (Width +xdata )
def Lorentzian2(sat_offset,a1,w1,c1,a2,w2,c2):
return Lorentzian(sat_offset,a1,w1,c1) + Lorentzian(sat_offset,a2,w2,c2)
def Lorentzian3(sat_offset,a1,w1,c1,a2,w2,c2,a3,w3,c3):
return Lorentzian(sat_offset,a1,w1,c1) + Lorentzian(sat_offset,a2,w2,c2) + Lorentzian(sat_offset,a3,w3,c3)
def Lscale(sat_offset,a1,w1,c1,a2,w2,c2,scale):
return Lorentzian2(sat_offset,a1,w1,c1,a2,w2,c2) + scale
def Lscale3(sat_offset,a1,w1,c1,a2,w2,c2,a3,w3,c3,scale):
return Lorentzian3(sat_offset,a1,w1,c1,a2,w2,c2,a3,w3,c3) + scale
def fit_L2_scale(offsets,ydata):
Signal=1-ydata
# fix xdata
xdata=offsets-offsets[Signal.argmax()]
# allocate fitting based on this
A10, W10, C10 = 0.90, 1, 0
A20, W20, C20 = .1, 1, -4
A1L, W1L, C1L = 0.5, .1, -.1
A2L, W2L, C2L = 0, .1, -6
A1U, W1U, C1U = 1.0, 5, +.1
A2U, W2U, C2U = 1.0, 10, -1.0
scale0, scaleL, scaleU = 0, -1, +1
initial_guess = [A10, W10, C10, A20, W20, C20, scale0]
lb = [A1L, W1L, C1L, A2L, W2L, C2L, scaleL]
ub = [A1U, W1U, C1U, A2U, W2U, C2U, scaleU]
p, cov = curve_fit(Lscale, xdata, Signal,p0=initial_guess,bounds=(lb, ub))
return p;
def fit_L3_scale(offsets,ydata):
Signal=1-ydata
# fix xdata
xdata=offsets-offsets[Signal.argmax()]
# allocate fitting based on this
A10, W10, C10 = 0.90, 1, 0
A20, W20, C20 = .1, 2, -4
A30, W30, C30 = .1, 1, +2
A1L, W1L, C1L = 0.5, .1, -.1
A2L, W2L, C2L = 0, .1, -6
A3L, W3L, C3L = 0, .1, +1
A1U, W1U, C1U = 1.0, 5, +.1
A2U, W2U, C2U = 1.0, 10, -1.0
A3U, W3U, C3U = 1.0, 5, +3.0
scale0, scaleL, scaleU = 0, -1, +1
initial_guess = [A10, W10, C10, A20, W20, C20, A30, W30, C30, scale0]
lb = [A1L, W1L, C1L, A2L, W2L, C2L, A3L, W3L, C3L, scaleL]
ub = [A1U, W1U, C1U, A2U, W2U, C2U, A3U, W3U, C3U, scaleU]
p, cov = curve_fit(Lscale3, xdata, Signal,p0=initial_guess,bounds=(lb, ub))
L=Lscale3(xdata,p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9])
rsq=
|
np.corrcoef(Signal,L)
|
numpy.corrcoef
|
import numpy as np
import scipy.stats as stats
import scipy.linalg
import pytest
import open_cp.kernels as testmod
import open_cp.data
import unittest.mock as mock
import shapely.geometry
def slow_gaussian_kernel_new(pts, mean, var):
"""Test case where `pts`, `mean`, `var` are all of shape 2."""
assert(len(pts.shape) == 2 and len(mean.shape) == 2 and len(var.shape) == 2)
space_dim = pts.shape[0]
num_pts = pts.shape[1]
num_samples = mean.shape[1]
assert(space_dim == mean.shape[0])
assert((space_dim, num_samples) == var.shape)
out = np.empty(num_pts)
for i in range(num_pts):
total = np.empty(num_samples)
for j in range(num_samples):
prod = np.empty(space_dim)
for k in range(space_dim):
v = var[k][j] * 2
prod[k] = np.exp(- (pts[k][i] - mean[k][j]) **
2 / v) / np.sqrt(np.pi * v)
total[j] = np.product(prod)
out[i] = np.mean(total)
return out
def test_slow_gaussian_kernel_single_new():
pts = np.empty((1, 1))
pts[0][0] = 1
mean = np.empty((1, 1))
mean[0][0] = 0.5
var = np.empty((1, 1))
var[0][0] = 3
expected = np.exp(-0.25 / 6) / np.sqrt(6 * np.pi)
got = slow_gaussian_kernel_new(pts, mean, var)
np.testing.assert_allclose(expected, got)
def test_compare_GaussianKernel():
for k in range(1, 6):
for M in range(1, 6):
mean = np.random.random(size=(k,M))
var = 0.0001 + np.random.random(size=(k,M))**2
kernel = testmod.GaussianKernel(mean, var)
for N in range(1, 6):
pts = np.random.random(size=(k,N))
want = slow_gaussian_kernel_new(pts, mean, var)
got = kernel(pts)
print(k,M,N)
np.testing.assert_allclose(got, want)
# Single point case
pts = np.random.random(size=k)
want = slow_gaussian_kernel_new(pts[:,None], mean, var)[0]
got = kernel(pts)
print("Single point case k={}, M={}".format(k,M))
assert want == pytest.approx(got)
def test_compare_GaussianKernel_k1_case():
for M in range(1, 6):
mean = np.random.random(size=M)
var = 0.0001 + np.random.random(size=M)**2
kernel = testmod.GaussianKernel(mean, var)
for N in range(1, 6):
pts = np.random.random(size=N)
want = slow_gaussian_kernel_new(pts[None,:], mean[None,:], var[None,:])
got = kernel(pts)
print(M,N)
np.testing.assert_allclose(got, want)
# Single point case
print("Single point case, M={}".format(M))
pts = np.random.random()
want = slow_gaussian_kernel_new(np.asarray(pts)[None,None], mean[None,:], var[None,:])[0]
got = kernel(pts)
assert want == pytest.approx(got)
def test_1D_kth_distance():
coords = [0,1,2,3,6,7,9,15]
distances = testmod.compute_kth_distance(coords, k=3)
np.testing.assert_allclose(distances, [3,2,2,3,3,4,6,9])
def test_2D_kth_distance():
coords = [[0,0,1,1],[0,1,0,2]]
distances = testmod.compute_kth_distance(coords, k=2)
np.testing.assert_allclose(distances, [1,np.sqrt(2),np.sqrt(2),2])
def slow_kth_nearest(points, index):
"""(k, N) input. Returns ordered list [0,...] of distance to kth nearest point from index"""
if len(points.shape) == 1:
points = points[None, :]
pt = points[:, index]
distances = np.sqrt(np.sum((points - pt[:,None])**2, axis=0))
distances.sort()
return distances
def test_slow_kth_nearest():
pts = np.array([1,2,4,5,7,8,9])
got = slow_kth_nearest(pts, 0)
np.testing.assert_array_equal(got, [0,1,3,4,6,7,8])
got = slow_kth_nearest(pts, 3)
np.testing.assert_array_equal(got, [0,1,2,3,3,4,4])
got = slow_kth_nearest(pts, 4)
np.testing.assert_array_equal(got, [0,1,2,2,3,5,6])
pts = np.array([[0,0],[1,1],[0,1],[1,0],[2,3]]).T
got = slow_kth_nearest(pts, 0)
np.testing.assert_allclose(got, [0,1,1,np.sqrt(2),np.sqrt(13)])
got = slow_kth_nearest(pts, 1)
np.testing.assert_allclose(got, [0,1,1,np.sqrt(2),np.sqrt(5)])
def test_1d_kth_nearest():
# In the 1D scale we don't need to rescale
pts = np.random.random(size=20) * 20 - 10
for k in [1,2,3,4,5]:
distances = [slow_kth_nearest(pts, i)[k] for i in range(len(pts))]
def expected_kernel(x):
value = 0
for i, p in enumerate(pts):
value += stats.norm(loc=p, scale=distances[i]).pdf(x)
return value / len(pts)
kernel = testmod.kth_nearest_neighbour_gaussian_kde(pts, k=k)
test_points =
|
np.random.random(size=10)
|
numpy.random.random
|
import argparse
import pandas as pd
import numpy as np
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sequence", help="sequences to split into ha and na")
parser.add_argument("--output_fastas", nargs=2, help="FASTA files of split genomes")
args = parser.parse_args()
strains = []
genomes = []
for record in SeqIO.parse(args.sequence, "fasta"):
strains.append(str(record.id))
genomes.append(str(record.seq))
indices_na = []
indices_ha = []
# Split genomes into two based on segment name
for i in range(0, len(strains)):
if(any(ele in strains[i] for ele in ["|N|4", "|S|4","|T|4"])):
indices_ha.append(i)
else:
indices_na.append(i)
strains_na = np.array(strains)[indices_na]
genomes_na =
|
np.array(genomes)
|
numpy.array
|
from manim import *
import numpy as np
from scipy import signal
import scipy.stats
import scipy
# basique
## latex
class latex_formules(Scene):
def construct(self):
latex = MathTex(r"\sum_{n=1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}")
self.play(FadeInFrom(latex))
self.wait()
## identité remarquable (a-b)^2
class IR_amoinsb_2(Scene): # (a-b)^2
def construct(self):
formula = TexMobject(
"(", # 0
"a", # 1
"-", # 2
"b", # 3
")", # 4
"^{2}", # 5
"=", # 6
"a", # 7
"^{2}", # 8
"-", # 9
"2", # 10
"a", # 11
"b", # 12
"+", # 13
"b", # 14
"^{2}" # 15
)
formula.scale(2)
self.play(Write(formula[0:7])) # 0 à 6
formula[7].set_color(RED)
formula[14].set_color(BLUE)
formula[11].set_color(RED)
formula[12].set_color(BLUE)
self.wait()
self.play(
ReplacementTransform(formula[1].copy(), formula[7]),
Write(formula[8]),
ReplacementTransform(formula[2].copy(), formula[9]),
ReplacementTransform(formula[3].copy(), formula[9]),
Write(formula[13]),
ReplacementTransform(formula[3].copy(), formula[14]),
Write(formula[15]),
run_time=2
)
self.wait(1.5)
self.play(
Write(formula[10]),
ReplacementTransform(formula[1].copy(), formula[11]),
ReplacementTransform(formula[3].copy(), formula[12]),
run_time=2
)
self.wait()
## identité remarquable a^2-b^2
class IR_a_b_2(Scene): # a^2-b^2
def construct(self):
formula = Tex(
"a", # 0
"^{2}", # 1
"-", # 2
"b", # 3
"^{2}", # 4
"=", # 5
"(", # 6
"a", # 7
"-", # 8
"b", # 9
")", # 10
"(", # 11
"a", # 12
"+", # 13
"b", # 14
")" # 15
)
formula.scale(2)
self.play(Write(formula[0:6])) # 0 à 5
formula[7].set_color(RED)
formula[9].set_color(BLUE)
formula[14].set_color(RED)
formula[12].set_color(BLUE)
self.wait()
self.play(
ReplacementTransform(formula[0].copy(), formula[7]),
ReplacementTransform(formula[0].copy(), formula[12]),
Write(formula[6]), Write(formula[11]),
Write(formula[10]), Write(formula[15]),
run_time=1.5
)
self.wait(0.5)
self.play(
Write(formula[8]), Write(formula[13]),
ReplacementTransform(formula[3].copy(), formula[9]),
ReplacementTransform(formula[3].copy(), formula[14]),
run_time=1.5
)
self.wait()
## Texte en couleur et entouré
class TextColor(Scene): # f(x)=ax+b
def construct(self):
text = Tex("f(x)", "=", "a", "x", "+", "b")
text[0].set_color(WHITE)
text[1].set_color(WHITE)
text[2].set_color(BLUE)
text[3].set_color(WHITE)
text[4].set_color("#FFFFFF") # Hexadecimal color
text[5].set_color(RED)
# text.to_corner(DL)
frameBoxa = SurroundingRectangle(text[2], buff=0.8 * SMALL_BUFF)
frameBoxa.set_stroke(BLUE, 3)
boxtextea = Tex("{\\normalsize On fait varier a}")
boxtextea.set_color(BLUE, 3)
boxtextea.next_to(text[2].get_center(), UP, buff=0.7)
frameBoxb = SurroundingRectangle(text[5], buff=0.8 * SMALL_BUFF)
frameBoxb.set_stroke(RED)
boxtexteb = Tex("{\\normalsize et b}")
boxtexteb.set_color(RED)
boxtexteb.next_to(text[5].get_center(), UP, buff=0.7)
self.play(Write(text))
self.wait(.1)
self.play(ShowCreation(frameBoxa))
self.play(Write(boxtextea))
self.wait(0.4)
self.remove(frameBoxa)
self.wait(0.1)
self.remove(boxtextea)
self.play(ShowCreation(frameBoxb))
self.play(Write(boxtexteb))
self.wait(0.4)
self.remove(frameBoxb)
self.wait(0.1)
self.remove(boxtexteb)
self.wait(0.5)
## aligner text
class Aligner_text(Scene):
def construct(self):
text1 = Tex("text1").shift(2 * UL) # UpLeft
text2 = Tex("text2")
text3 = Tex("text3").shift(2 * DR) # DownRight
group = VGroup(text1, text2, text3).scale(1.1)
self.add(group)
self.play(group.animate.arrange(RIGHT, .25, center=False))
## ligne couleur gradient
class LigneGradient(Scene):
def construct(self):
line_gradient = Line(LEFT * 4, RIGHT * 4)
line_gradient.set_color(color=[PURPLE, BLUE, YELLOW, GREEN, RED])
self.add(line_gradient)
self.wait()
## deformer carré
class WarpSquare(Scene):
def construct(self):
square = Square()
self.add(square)
self.play(ApplyPointwiseFunction(
lambda point: complex_to_R3(np.exp(R3_to_complex(point))),
square
))
self.wait()
## déplacement disque
class movecircle(Scene):
def construct(self):
sphere = Sphere().set_color(RED)
self.add(sphere)
self.play(ApplyMethod(sphere.shift, UP), run_time=2)
self.play(ApplyMethod(sphere.scale, 0.4), run_time=2)
self.wait(1)
# 2D
## polygone
class polygon(GraphScene):
def construct(self):
self.setup_axes(animate=True)
polyg = [self.coords_to_point(0,0), #P1
self.coords_to_point(0,3.5), #P2
self.coords_to_point(3.5,1.75), #P3
self.coords_to_point(3.5,0), #P4
self.coords_to_point(0,0)] #P1 pour fermer la figure
plol = Polygon(*polyg).move_to(UP+DOWN)
self.play(ShowCreation(plol))
## ligne verticale sous courbe
class Plot_AO(GraphScene):
def construct(self):
self.setup_axes()
self.v_graph = self.get_graph(lambda x: 4 * x - x ** 2, x_min=0, x_max=4)
self.variable_point_label = "x_0"
self.add_T_label(x_val=1)
self.add(self.v_graph)
self.wait()
# 3D
## Animation surfaces
class SurfacesAnimation(ThreeDScene): ####### Surface
def construct(self):
axes = ThreeDAxes()
cylinder = ParametricSurface(
lambda u, v: np.array([
np.cos(TAU * v),
np.sin(TAU * v),
2 * (1 - u)
]),
resolution=(6, 32)).fade(0.5) # Resolution of the surfaces
paraboloid = ParametricSurface(
lambda u, v: np.array([
np.cos(v) * u,
np.sin(v) * u,
u ** 2
]), v_max=TAU,
checkerboard_colors=[PURPLE_D, PURPLE_E],
resolution=(10, 32)).scale(2)
para_hyp = ParametricSurface(
lambda u, v: np.array([
u,
v,
u ** 2 - v ** 2
]), v_min=-2, v_max=2, u_min=-2, u_max=2, checkerboard_colors=[BLUE_D, BLUE_E],
resolution=(15, 32)).scale(1)
cone = ParametricSurface(
lambda u, v: np.array([
u * np.cos(v),
u * np.sin(v),
u
]), v_min=0, v_max=TAU, u_min=-2, u_max=2, checkerboard_colors=[GREEN_D, GREEN_E],
resolution=(15, 32)).scale(1)
hip_one_side = ParametricSurface(
lambda u, v: np.array([
np.cosh(u) * np.cos(v),
np.cosh(u) * np.sin(v),
np.sinh(u)
]), v_min=0, v_max=TAU, u_min=-2, u_max=2, checkerboard_colors=[YELLOW_D, YELLOW_E],
resolution=(15, 32))
ellipsoid = ParametricSurface(
lambda u, v: np.array([
1 * np.cos(u) * np.cos(v),
2 * np.cos(u) * np.sin(v),
0.5 * np.sin(u)
]), v_min=0, v_max=TAU, u_min=-PI / 2, u_max=PI / 2, checkerboard_colors=[TEAL_D, TEAL_E],
resolution=(15, 32)).scale(2)
sphere = ParametricSurface(
lambda u, v: np.array([
1.5 * np.cos(u) *
|
np.cos(v)
|
numpy.cos
|
""" Wrapper class for Intel's PixelLink realisation (text segmentation NN)
text-detection-00[34]
For text-detection-002 you'll need to uncomment string in detect()
"""
import cv2
import numpy as np
from skimage.morphology import label
from skimage.measure import regionprops
from typing import List, Tuple
from skimage.measure._regionprops import RegionProperties
class PixelLinkDetector():
""" Wrapper class for Intel's version of PixelLink text detector
See https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/intel/ \
text-detection-0004/description/text-detection-0004.md
:param xml_model_path: path to XML file
**Example:**
.. code-block:: python
detector = PixelLinkDetector('text-detection-0004.xml')
img = cv2.imread('tmp.jpg')
# ~250ms on i7-6700K
detector.detect(img)
# ~2ms
bboxes = detector.decode()
"""
def __init__(self, xml_model_path: str, txt_threshold=0.5):
"""
:param xml_model_path: path to model's XML file
:param txt_threshold: confidence, defaults to ``0.5``
"""
self._net = cv2.dnn.readNet(xml_model_path, xml_model_path[:-3] + 'bin')
self._txt_threshold = txt_threshold
def detect(self, img: np.ndarray) -> None:
""" GetPixelLink's outputs (BxCxHxW):
+ [1x16x192x320] - logits related to linkage between pixels and their neighbors
+ [1x2x192x320] - logits related to text/no-text classification for each pixel
B - batch size
C - number of channels
H - image height
W - image width
:param img: image as ``numpy.ndarray``
"""
self._img_shape = img.shape
blob = cv2.dnn.blobFromImage(img, 1, (1280, 768))
self._net.setInput(blob)
out_layer_names = self._net.getUnconnectedOutLayersNames()
# for text-detection-002
# self.pixels, self.links = self._net.forward(out_layer_names)
# for text-detection-00[34]
self.links, self.pixels = self._net.forward(out_layer_names)
def get_mask(self) -> np.ndarray:
""" Get binary mask of detected text pixels
"""
pixel_mask = self._get_pixel_scores() >= self._txt_threshold
return pixel_mask.astype(np.uint8)
def _logsumexp(self, a: np.ndarray, axis=-1) -> np.ndarray:
""" Castrated function from scipy
https://github.com/scipy/scipy/blob/v1.6.2/scipy/special/_logsumexp.py
Compute the log of the sum of exponentials of input elements.
"""
a_max = np.amax(a, axis=axis, keepdims=True)
tmp =
|
np.exp(a - a_max)
|
numpy.exp
|
import os
import shutil
import numpy as np
from scipy.optimize import linear_sum_assignment
from easydict import EasyDict as edict
import configparser
INF = 1e8
def linear_sum_assignment_with_inf(cost_matrix):
"""
This is a workaround from 'https://github.com/scipy/scipy/issues/6900'
"""
cost_matrix = np.asarray(cost_matrix).copy()
min_inf = np.isneginf(cost_matrix).any()
max_inf = np.isposinf(cost_matrix).any()
if min_inf and max_inf:
raise ValueError("matrix contains both inf and -inf")
if min_inf or max_inf:
values = cost_matrix[~np.isinf(cost_matrix)]
m = values.min()
M = values.max()
n = min(cost_matrix.shape)
# strictly positive constant even when added
# to elements of the cost matrix
positive = n * (M - m + np.abs(M) + np.abs(m) + 1)
if max_inf:
place_holder = (M + (n - 1) * (M - m)) + positive
if min_inf:
place_holder = (m + (n - 1) * (m - M)) - positive
cost_matrix[np.isinf(cost_matrix)] = place_holder
return linear_sum_assignment(cost_matrix)
def parseSequences2(seqmapFile):
assert (os.path.exists(seqmapFile)), 'seqmap file {} does not exist'.format(seqmapFile)
with open(seqmapFile) as f:
allseq = [x.strip() for x in f.readlines()[0:]]
return allseq
def boxiou(x1, y1, w1, h1, x2, y2, w2, h2):
def boxIntersect(bboxleft1, bboxright1, bboxbottom1, bboxup1, bboxleft2, bboxright2, bboxbottom2, bboxup2):
hor = np.max((0, np.min((bboxright1, bboxright2)) - np.max((bboxleft1, bboxleft2))))
if hor < 1e-8:
return 0.0
ver = np.max((0, np.min((bboxbottom1, bboxbottom2)) - np.max((bboxup1, bboxup2))))
if ver < 1e-8:
return 0.0
return hor * ver
def boxUnion(bboxleft1, bboxright1, bboxbottom1, bboxup1, bboxleft2, bboxright2, bboxbottom2, bboxup2, isect=None):
a1 = bboxright1 - bboxleft1
b1 = bboxbottom1 - bboxup1
a2 = bboxright2 - bboxleft2
b2 = bboxbottom2 - bboxup2
union = a1 * b1 + a2 * b2
if isect is not None:
bisect = isect
else:
bisect = boxIntersect(bboxleft1, bboxright1, bboxbottom1, bboxup1, bboxleft2, bboxright2, bboxbottom2,
bboxup2)
return union - bisect
bisect = boxIntersect(x1, x1 + w1, y1 + h1, y1, x2, x2 + w2, y2 + h2, y2)
if bisect < 1e-8:
return 0.0
bunion = boxUnion(x1, x1 + w1, y1 + h1, y1, x2, x2 + w2, y2 + h2, y2, bisect)
assert bunion > 0, 'something wrong with union computation'
iou = bisect / bunion
return iou
def bbox_overlap(bbox1, bbox2):
return boxiou(bbox1[0], bbox1[1], bbox1[2], bbox1[3], bbox2[0], bbox2[1], bbox2[2], bbox2[3])
# return boxiou(bbox1[0], bbox1[1], bbox1[2] - bbox1[0], bbox1[3] - bbox1[1], bbox2[0], bbox2[1], bbox2[2] - bbox2[0], bbox2[3] - bbox2[1])
def classIDToString(classID):
labels = ['ped',
'person_on_vhcl',
'car',
'bicycle',
'mbike',
'non_mot_vhcl',
'static_person',
'distractor',
'occluder',
'occluder_on_grnd',
'occluder_full',
'reflection',
'crowd']
if classID < 1 or classID > len(labels):
return 'unknown'
return labels[classID - 1]
def preprocessResult(resFile, seqName, dataDir=None, force=True, minvis=0.0):
def cleanRequired(seqFolder):
return 'CVPR19' in seqFolder or 'MOT16' in seqFolder or 'MOT17' in seqFolder
# assert cleanRequired(seqName), 'preproccessing should only be done for MOT15/16/17 and CVPR 19'
if not os.path.exists(resFile):
print('Results file does not exist')
return
p = os.path.dirname(resFile)
f, e = os.path.splitext(os.path.basename(resFile))
cleanDir = os.path.join(p, 'clean')
if not os.path.exists(cleanDir):
os.makedirs(cleanDir)
resFileClean = os.path.join(cleanDir, f + e)
if not force and os.path.exists(resFileClean):
print('skipping...')
return
tf_ = os.path.getsize(resFile)
if tf_ == 0:
print('Results file empty')
shutil.copy(resFile, resFileClean)
return
def getSeqInfoFromFile(seq, dataDir):
seqFolder = os.path.join(dataDir, seqName)
seqInfoFile = os.path.join(dataDir, seqName, 'seqinfo.ini')
config = configparser.ConfigParser()
config.read(seqInfoFile)
imgFolder = config.get('Sequence', 'imDir')
frameRate = config.getint('Sequence', 'frameRate')
F = config.getint('Sequence', 'seqLength')
imWidth = config.getint('Sequence', 'imWidth')
imHeight = config.getint('Sequence', 'imHeight')
imgExt = config.get('Sequence', 'imExt')
return seqName, seqFolder, imgFolder, frameRate, F, imWidth, imHeight, imgExt
seqName, seqFolder, imgFolder, frameRate, F, imWidth, imHeight, imgExt \
= getSeqInfoFromFile(seqName, dataDir)
resRaw = np.loadtxt(resFile, delimiter=',')
gtFolder = os.path.join(dataDir, seqName, 'gt')
gtFile = os.path.join(gtFolder, 'gt.txt')
gtRaw = np.loadtxt(gtFile, delimiter=',')
assert np.shape(gtRaw)[1] == 9, 'unknown GT format'
if 'CVPR19' in seqName:
distractors = ['person_on_vhcl', 'static_person', 'distractor', 'reflection', 'non_mot_vhcl']
else:
distractors = ['person_on_vhcl', 'static_person', 'distractor', 'reflection']
keepBoxes = np.ones((np.shape(resRaw)[0],), dtype=bool)
td = 0.5
for t in range(1, F + 1):
resInFrame = np.where(resRaw[:, 0] == t)[0]
N = len(resInFrame)
resInFrame = np.reshape(resInFrame, (N,))
GTInFrame = np.where(gtRaw[:, 0] == t)[0]
Ngt = len(GTInFrame)
GTInFrame = np.reshape(GTInFrame, (Ngt,))
allisects = np.zeros((Ngt, N))
g = 0
for gg in GTInFrame:
g = g + 1
r = 0
bxgt, bygt, bwgt, bhgt = gtRaw[gg, 2:6]
for rr in resInFrame:
r = r + 1
bxres, byres, bwres, bhres = resRaw[rr, 2:6]
if bxgt + bwgt < bxres or bxgt > bxres + bwres:
continue
if bygt + bhgt < byres or bygt > byres + bhres:
continue
allisects[g - 1, r - 1] = boxiou(bxgt, bygt, bwgt, bhgt, bxres, byres, bwres, bhres)
tmpai = allisects
tmpai = 1 - tmpai
# tmpai[tmpai > td] = np.inf
# mGT, mRes = linear_sum_assignment_with_inf(tmpai)
tmpai[tmpai > td] = INF
mGT, mRes = linear_sum_assignment(tmpai)
Mtch = np.zeros_like(tmpai)
Mtch[mGT, mRes] = 1
nMtch = len(mGT)
for m in range(nMtch):
g = GTInFrame[mGT[m]]
r = resInFrame[mRes[m]]
if (tmpai[mGT[m]][mRes[m]] == INF):
continue
gtClassID = gtRaw[g, 7].astype(np.int)
gtClassString = classIDToString(gtClassID)
if gtClassString in distractors:
keepBoxes[r] = False
if gtRaw[g, 8] < minvis:
keepBoxes[r] = False
resNew = resRaw
resNew = resRaw[keepBoxes, :]
np.savetxt(resFileClean, resNew)
return resFileClean
def clear_mot_hungarian(gtDB, stDB, threshold, world, VERBOSE=False):
# TODO: This function comes from https://github.com/shenh10/mot_evaluation/blob/master/utils/measurements.py
# TO BE reimplemented
"""
compute CLEAR_MOT and other metrics
[recall, precision, FAR, GT, MT, PT, ML, falsepositives, false negatives, idswitches, FRA, MOTA, MOTP, MOTAL]
"""
# st_frames = np.unique(stDB[:, 0])
gtDB = gtDB.astype(np.int)
stDB = stDB.astype(np.int)
gt_frames = np.unique(gtDB[:, 0])
st_ids = np.unique(stDB[:, 1])
gt_ids = np.unique(gtDB[:, 1])
# f_gt = int(max(max(st_frames), max(gt_frames)))
# n_gt = int(max(gt_ids))
# n_st = int(max(st_ids))
f_gt = len(gt_frames)
n_gt = len(gt_ids)
n_st = len(st_ids)
mme = np.zeros((f_gt,), dtype=float) # ID switch in each frame
c = np.zeros((f_gt,), dtype=float) # matches found in each frame
fp = np.zeros((f_gt,), dtype=float) # false positives in each frame
missed = np.zeros((f_gt,), dtype=float) # missed gts in each frame
g = np.zeros((f_gt,), dtype=float) # gt count in each frame
d = np.zeros((f_gt, n_gt), dtype=float) # overlap matrix
Mout = np.zeros((f_gt, n_gt), dtype=float)
allfps = np.zeros((f_gt, n_st), dtype=float)
gt_inds = [{} for i in range(f_gt)]
st_inds = [{} for i in range(f_gt)]
M = [{} for i in range(f_gt)] # matched pairs hashing gid to sid in each frame
# hash the indices to speed up indexing
for i in range(gtDB.shape[0]):
frame = np.where(gt_frames == gtDB[i, 0])[0][0]
gid = np.where(gt_ids == gtDB[i, 1])[0][0]
gt_inds[frame][gid] = i
gt_frames_list = list(gt_frames)
for i in range(stDB.shape[0]):
# sometimes detection missed in certain frames, thus should be assigned to groundtruth frame id for alignment
frame = gt_frames_list.index(stDB[i, 0])
sid = np.where(st_ids == stDB[i, 1])[0][0]
st_inds[frame][sid] = i
for t in range(f_gt):
g[t] = len(gt_inds[t].keys())
# preserving original mapping if box of this trajectory has large enough iou in avoid of ID switch
if t > 0:
mappings = list(M[t - 1].keys())
sorted(mappings)
for k in range(len(mappings)):
if mappings[k] in gt_inds[t].keys() and M[t - 1][mappings[k]] in st_inds[t].keys():
row_gt = gt_inds[t][mappings[k]]
row_st = st_inds[t][M[t - 1][mappings[k]]]
dist = bbox_overlap(stDB[row_st, 2:6], gtDB[row_gt, 2:6])
if dist >= threshold:
M[t][mappings[k]] = M[t - 1][mappings[k]]
# mapping remaining groundtruth and estimated boxes
unmapped_gt, unmapped_st = [], []
unmapped_gt = [key for key in gt_inds[t].keys() if key not in M[t].keys()]
unmapped_st = [key for key in st_inds[t].keys() if key not in M[t].values()]
if len(unmapped_gt) > 0 and len(unmapped_st) > 0:
square_size = np.max((len(unmapped_gt), len(unmapped_st)))
overlaps = np.zeros((square_size, square_size), dtype=float)
for i in range(len(unmapped_gt)):
row_gt = gt_inds[t][unmapped_gt[i]]
for j in range(len(unmapped_st)):
row_st = st_inds[t][unmapped_st[j]]
dist = 1 - bbox_overlap(stDB[row_st, 2:6], gtDB[row_gt, 2:6])
if dist <= threshold:
overlaps[i][j] = dist
overlaps[overlaps == 0.0] = 1e8
matched_indices = linear_sum_assignment(overlaps)
for matched in zip(*matched_indices):
if overlaps[matched[0], matched[1]] == 1e8:
continue
M[t][unmapped_gt[matched[0]]] = unmapped_st[matched[1]]
# compute statistics
cur_tracked = list(M[t].keys())
fps = [key for key in st_inds[t].keys() if key not in M[t].values()]
for k in range(len(fps)):
allfps[t][fps[k]] = fps[k]
# check miss match errors
if t > 0:
for i in range(len(cur_tracked)):
ct = cur_tracked[i]
est = M[t][ct]
last_non_empty = -1
for j in range(t - 1, 0, -1):
if ct in M[j].keys():
last_non_empty = j
break
if ct in gt_inds[t - 1].keys() and last_non_empty != -1:
mtct, mlastnonemptyct = -1, -1
if ct in M[t]:
mtct = M[t][ct]
if ct in M[last_non_empty]:
mlastnonemptyct = M[last_non_empty][ct]
if mtct != mlastnonemptyct:
mme[t] += 1
c[t] = len(cur_tracked)
fp[t] = len(st_inds[t].keys())
fp[t] -= c[t]
missed[t] = g[t] - c[t]
for i in range(len(cur_tracked)):
ct = cur_tracked[i]
est = M[t][ct]
row_gt = gt_inds[t][ct]
row_st = st_inds[t][est]
d[t][ct] = 1 - bbox_overlap(stDB[row_st, 2:6], gtDB[row_gt, 2:6])
for k in M[t].keys():
Mout[t][k] = M[t][k] + 1;
return mme, c, fp, missed, g, d, Mout, allfps
def CLEAR_MOT_HUN(gtMat, resMat, threshold, world):
metricsInfo = edict()
metricsInfo.names = edict()
metricsInfo.names.long = ['Recall', 'Precision', 'False Alarm Rate',
'GT Tracks', 'Mostly Tracked', 'Partially Tracked', 'Mostly Lost',
'False Positives', 'False Negatives', 'ID Switches', 'Fragmentations',
'MOTA', 'MOTP', 'MOTA Log']
metricsInfo.names.short = ['Rcll', 'Prcn', 'FAR',
'GT', 'MT', 'PT', 'ML',
'FP', 'FN', 'IDs', 'FM',
'MOTA', 'MOTP', 'MOTAL']
metricsInfo.widths = edict()
metricsInfo.widths.long = [6, 9, 16, 9, 14, 17, 11, 13, 15, 15, 11, 14, 5, 5, 8]
metricsInfo.widths.short = [5, 5, 5, 3, 3, 3, 3, 2, 4, 4, 3, 3, 5, 5, 5]
metricsInfo.format = edict()
metricsInfo.format.long = {'.1f', '.1f', '.2f',
'i', 'i', 'i', 'i',
'i', 'i', 'i', 'i', 'i',
'.1f', '.1f', '.1f'}
metricsInfo.format.short = metricsInfo.format.long
additionalInfo = edict()
_, ic = np.unique(gtMat[:, 1], return_inverse=True)
gtMat[:, 1] = ic
_, ic2 = np.unique(resMat[:, 1], return_inverse=True)
resMat[:, 1] = ic2
VERBOSE = False
mme, c, fp, m, g, d, alltracked, allfalsepos = clear_mot_hungarian(gtMat, resMat, threshold, VERBOSE)
# ! Caution: alltracked is 0-indexed
Fgt = np.max(gtMat[:, 0])
Ngt = len(np.unique(gtMat[:, 1]))
F = np.max(resMat[:, 0])
missed = np.sum(m)
falsepositives = np.sum(fp)
idswitches = np.sum(mme)
MOTP = (1.0 - np.sum(np.sum(d)) / np.sum(c)) * 100
if world:
MOTP = MOTP / threshold
if np.isnan(MOTP):
MOTP = 0.0
MOTAL = (1 - ((np.sum(m) + np.sum(fp) + np.log10(np.sum(mme) + 1)) / np.sum(g))) * 100
MOTA = (1 - ((np.sum(m) + np.sum(fp) + (np.sum(mme))) /
|
np.sum(g)
|
numpy.sum
|
import numpy as np
import cifar_input as utils
def load_data(dataset='C10+', is_tune=False, is_crop_filp=False):
with open('stl10_data/train_X.bin') as f:
raw = np.fromfile(f, dtype=np.uint8, count=-1)
raw = np.reshape(raw, (-1, 3, 96, 96))
raw =
|
np.transpose(raw, (0,3,2,1))
|
numpy.transpose
|
import timeit
import numpy as np
import pandas as pd
import pygco
import cv2
class colorize(object):
def __init__(self, original, sobel=True):
self.test = cv2.imread(original,0)
self.original = cv2.imread(original,1)
self.dfpr = pd.read_csv('pred_cost.csv', sep=',',header=None)
self.colors = pd.read_csv('colors.csv', sep=',',header=None).as_matrix()
self.pixels = self.dfpr.ix[:,:2].as_matrix()
self.pred_cost = self.dfpr.ix[:,2:].as_matrix()
self.rows, self.cols = self.test.shape
self.sobel = sobel
def get_edges(self,blur_width=3):
img_blurred = cv2.GaussianBlur(self.test, (0, 0), blur_width)
vh = cv2.Sobel(img_blurred, -1, 1, 0)
vv = cv2.Sobel(img_blurred, -1, 0, 1)
return 0.5*vv + 0.5*vh
def graphcut(self,label_costs, l=100):
num_classes = len(self.colors)
pairwise_costs = np.zeros((num_classes, num_classes))
for ii in range(num_classes):
for jj in range(num_classes):
c1 = np.array(self.colors[ii])
c2 = np.array(self.colors[jj])
pairwise_costs[ii,jj] =
|
np.linalg.norm(c1-c2)
|
numpy.linalg.norm
|
import radionets.simulations.layouts.layouts as layouts
from radionets.simulations.uv_simulations import Antenna, Source
import cartopy.crs as ccrs
import numpy as np
import matplotlib.pyplot as plt
import cartopy.io.img_tiles as cimgt
from pathlib import Path
from matplotlib.colors import LogNorm
import matplotlib.patches as mpatches
from radionets.evaluation.utils import make_axes_nice
from matplotlib import cm
from matplotlib.colors import ListedColormap
def create_OrBu():
top = cm.get_cmap("Blues_r", 128)
bottom = cm.get_cmap("Oranges", 128)
white =
|
np.array([256 / 256, 256 / 256, 256 / 256, 1])
|
numpy.array
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Utilities for defects module.
"""
import itertools
import logging
import math
import operator
from collections import defaultdict
from copy import deepcopy
import numpy as np
import pandas as pd
from monty.dev import requires
from monty.json import MSONable
from numpy.linalg import norm
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial import Voronoi
from scipy.spatial.distance import squareform
from pymatgen.analysis.local_env import (
LocalStructOrderParams,
MinimumDistanceNN,
cn_opt_params,
)
from pymatgen.analysis.phase_diagram import get_facets
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.periodic_table import Element, get_el_sp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Chgcar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import pbc_diff
from pymatgen.vis.structure_vtk import StructureVis
try:
from skimage.feature import peak_local_max
peak_local_max_found = True
except ImportError:
peak_local_max_found = False
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright 2014, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
__date__ = "January 11, 2018"
logger = logging.getLogger(__name__)
hart_to_ev = 27.2114
ang_to_bohr = 1.8897
invang_to_ev = 3.80986
kumagai_to_V = 1.809512739e2 # = Electron charge * 1e10 / VacuumPermittivity Constant
motif_cn_op = {}
for cn, di in cn_opt_params.items():
for mot, li in di.items():
motif_cn_op[mot] = {"cn": int(cn), "optype": li[0]}
motif_cn_op[mot]["params"] = deepcopy(li[1]) if len(li) > 1 else None
class QModel(MSONable):
"""
Model for the defect charge distribution.
A combination of exponential tail and gaussian distribution is used
(see Freysoldt (2011), DOI: 10.1002/pssb.201046289 )
q_model(r) = q [x exp(-r/gamma) + (1-x) exp(-r^2/beta^2)]
without normalization constants
By default, gaussian distribution with 1 Bohr width is assumed.
If defect charge is more delocalized, exponential tail is suggested.
"""
def __init__(self, beta=1.0, expnorm=0.0, gamma=1.0):
"""
Args:
beta: Gaussian decay constant. Default value is 1 Bohr.
When delocalized (eg. diamond), 2 Bohr is more appropriate.
expnorm: Weight for the exponential tail in the range of [0-1].
Default is 0.0 indicating no tail .
For delocalized charges ideal value is around 0.54-0.6.
gamma: Exponential decay constant
"""
self.beta = beta
self.expnorm = expnorm
self.gamma = gamma
self.beta2 = beta * beta
self.gamma2 = gamma * gamma
if expnorm and not gamma:
raise ValueError("Please supply exponential decay constant.")
def rho_rec(self, g2):
"""
Reciprocal space model charge value
for input squared reciprocal vector.
Args:
g2: Square of reciprocal vector
Returns:
Charge density at the reciprocal vector magnitude
"""
return self.expnorm / np.sqrt(1 + self.gamma2 * g2) + (1 - self.expnorm) * np.exp(-0.25 * self.beta2 * g2)
@property
def rho_rec_limit0(self):
"""
Reciprocal space model charge value
close to reciprocal vector 0 .
rho_rec(g->0) -> 1 + rho_rec_limit0 * g^2
"""
return -2 * self.gamma2 * self.expnorm - 0.25 * self.beta2 * (1 - self.expnorm)
def eV_to_k(energy):
"""
Convert energy to reciprocal vector magnitude k via hbar*k^2/2m
Args:
a: Energy in eV.
Returns:
(double) Reciprocal vector magnitude (units of 1/Bohr).
"""
return math.sqrt(energy / invang_to_ev) * ang_to_bohr
def genrecip(a1, a2, a3, encut):
"""
Args:
a1, a2, a3: lattice vectors in bohr
encut: energy cut off in eV
Returns:
reciprocal lattice vectors with energy less than encut
"""
vol = np.dot(a1, np.cross(a2, a3)) # 1/bohr^3
b1 = (2 * np.pi / vol) * np.cross(a2, a3) # units 1/bohr
b2 = (2 * np.pi / vol) * np.cross(a3, a1)
b3 = (2 * np.pi / vol) * np.cross(a1, a2)
# create list of recip space vectors that satisfy |i*b1+j*b2+k*b3|<=encut
G_cut = eV_to_k(encut)
# Figure out max in all recipricol lattice directions
i_max = int(math.ceil(G_cut / norm(b1)))
j_max = int(math.ceil(G_cut / norm(b2)))
k_max = int(math.ceil(G_cut / norm(b3)))
# Build index list
i = np.arange(-i_max, i_max)
j = np.arange(-j_max, j_max)
k = np.arange(-k_max, k_max)
# Convert index to vectors using meshgrid
indices = np.array(np.meshgrid(i, j, k)).T.reshape(-1, 3)
# Multiply integer vectors to get recipricol space vectors
vecs = np.dot(indices, [b1, b2, b3])
# Calculate radii of all vectors
radii = np.sqrt(np.einsum("ij,ij->i", vecs, vecs))
# Yield based on radii
for vec, r in zip(vecs, radii):
if r < G_cut and r != 0:
yield vec
def generate_reciprocal_vectors_squared(a1, a2, a3, encut):
"""
Generate reciprocal vector magnitudes within the cutoff along the specified
lattice vectors.
Args:
a1: Lattice vector a (in Bohrs)
a2: Lattice vector b (in Bohrs)
a3: Lattice vector c (in Bohrs)
encut: Reciprocal vector energy cutoff
Returns:
[[g1^2], [g2^2], ...] Square of reciprocal vectors (1/Bohr)^2
determined by a1, a2, a3 and whose magntidue is less than gcut^2.
"""
for vec in genrecip(a1, a2, a3, encut):
yield np.dot(vec, vec)
def closestsites(struct_blk, struct_def, pos):
"""
Returns closest site to the input position
for both bulk and defect structures
Args:
struct_blk: Bulk structure
struct_def: Defect structure
pos: Position
Return: (site object, dist, index)
"""
blk_close_sites = struct_blk.get_sites_in_sphere(pos, 5, include_index=True)
blk_close_sites.sort(key=lambda x: x[1])
def_close_sites = struct_def.get_sites_in_sphere(pos, 5, include_index=True)
def_close_sites.sort(key=lambda x: x[1])
return blk_close_sites[0], def_close_sites[0]
class StructureMotifInterstitial:
"""
Generate interstitial sites at positions
where the interstitialcy is coordinated by nearest neighbors
in a way that resembles basic structure motifs
(e.g., tetrahedra, octahedra). The algorithm is called InFiT
(Interstitialcy Finding Tool), it was introducted by
<NAME>, <NAME>, <NAME>,
and <NAME> (Front. Mater., 4, 34, 2017),
and it is used by the Python Charged Defect Toolkit
(PyCDT: <NAME> et al., Comput. Phys. Commun., in press, 2018).
"""
def __init__(
self,
struct,
inter_elem,
motif_types=("tetrahedral", "octahedral"),
op_threshs=(0.3, 0.5),
dl=0.2,
doverlap=1,
facmaxdl=1.01,
verbose=False,
):
"""
Generates symmetrically distinct interstitial sites at positions
where the interstitial is coordinated by nearest neighbors
in a pattern that resembles a supported structure motif
(e.g., tetrahedra, octahedra).
Args:
struct (Structure): input structure for which symmetrically
distinct interstitial sites are to be found.
inter_elem (string): element symbol of desired interstitial.
motif_types ([string]): list of structure motif types that are
to be considered. Permissible types are:
tet (tetrahedron), oct (octahedron).
op_threshs ([float]): threshold values for the underlying order
parameters to still recognize a given structural motif
(i.e., for an OP value >= threshold the coordination pattern
match is positive, for OP < threshold the match is
negative.
dl (float): grid fineness in Angstrom. The input
structure is divided into a grid of dimension
a/dl x b/dl x c/dl along the three crystallographic
directions, with a, b, and c being the lengths of
the three lattice vectors of the input unit cell.
doverlap (float): distance that is considered
to flag an overlap between any trial interstitial site
and a host atom.
facmaxdl (float): factor to be multiplied with the maximum grid
width that is then used as a cutoff distance for the
clustering prune step.
verbose (bool): flag indicating whether (True) or not (False;
default) to print additional information to screen.
"""
# Initialize interstitial finding.
self._structure = struct.copy()
self._motif_types = motif_types[:]
if len(self._motif_types) == 0:
raise RuntimeError("no motif types provided.")
self._op_threshs = op_threshs[:]
self.cn_motif_lostop = {}
self.target_cns = []
for motif in self._motif_types:
if motif not in list(motif_cn_op.keys()):
raise RuntimeError(f"unsupported motif type: {motif}.")
cn = int(motif_cn_op[motif]["cn"])
if cn not in self.target_cns:
self.target_cns.append(cn)
if cn not in list(self.cn_motif_lostop.keys()):
self.cn_motif_lostop[cn] = {}
tmp_optype = motif_cn_op[motif]["optype"]
if tmp_optype == "tet_max":
tmp_optype = "tet"
if tmp_optype == "oct_max":
tmp_optype = "oct"
self.cn_motif_lostop[cn][motif] = LocalStructOrderParams(
[tmp_optype], parameters=[motif_cn_op[motif]["params"]], cutoff=-10.0
)
self._dl = dl
self._defect_sites = []
self._defect_types = []
self._defect_site_multiplicity = []
self._defect_cns = []
self._defect_opvals = []
rots, trans = SpacegroupAnalyzer(struct)._get_symmetry()
nbins = [
int(struct.lattice.a / dl),
int(struct.lattice.b / dl),
int(struct.lattice.c / dl),
]
dls = [
struct.lattice.a / float(nbins[0]),
struct.lattice.b / float(nbins[1]),
struct.lattice.c / float(nbins[2]),
]
maxdl = max(dls)
if verbose:
print(f"Grid size: {nbins[0]} {nbins[1]} {nbins[2]}")
print(f"dls: {dls[0]} {dls[1]} {dls[2]}")
struct_w_inter = struct.copy()
struct_w_inter.append(inter_elem, [0, 0, 0])
natoms = len(list(struct_w_inter.sites))
trialsites = []
# Build index list
i = np.arange(0, nbins[0]) + 0.5
j = np.arange(0, nbins[1]) + 0.5
k = np.arange(0, nbins[2]) + 0.5
# Convert index to vectors using meshgrid
indices = np.array(np.meshgrid(i, j, k)).T.reshape(-1, 3)
# Multiply integer vectors to get recipricol space vectors
vecs = np.multiply(indices, np.divide(1, nbins))
# Loop over trial positions that are based on a regular
# grid in fractional coordinate space
# within the unit cell.
for vec in vecs:
struct_w_inter.replace(natoms - 1, inter_elem, coords=vec, coords_are_cartesian=False)
if len(struct_w_inter.get_sites_in_sphere(struct_w_inter.sites[natoms - 1].coords, doverlap)) == 1:
neighs_images_weigths = MinimumDistanceNN(tol=0.8, cutoff=6).get_nn_info(struct_w_inter, natoms - 1)
neighs_images_weigths_sorted = sorted(neighs_images_weigths, key=lambda x: x["weight"], reverse=True)
for nsite in range(1, len(neighs_images_weigths_sorted) + 1):
if nsite not in self.target_cns:
continue
allsites = [neighs_images_weigths_sorted[i]["site"] for i in range(nsite)]
indices_neighs = list(range(len(allsites)))
allsites.append(struct_w_inter.sites[natoms - 1])
for mot, ops in self.cn_motif_lostop[nsite].items():
opvals = ops.get_order_parameters(allsites, len(allsites) - 1, indices_neighs=indices_neighs)
if opvals[0] > op_threshs[motif_types.index(mot)]:
cns = {}
for isite in range(nsite):
site = neighs_images_weigths_sorted[isite]["site"]
if isinstance(site.specie, Element):
elem = site.specie.symbol
else:
elem = site.specie.element.symbol
if elem in list(cns.keys()):
cns[elem] = cns[elem] + 1
else:
cns[elem] = 1
trialsites.append(
{
"mtype": mot,
"opval": opvals[0],
"coords": struct_w_inter.sites[natoms - 1].coords[:],
"fracs": vec,
"cns": dict(cns),
}
)
break
# Prune list of trial sites by clustering and find the site
# with the largest order parameter value in each cluster.
nintersites = len(trialsites)
unique_motifs = []
for ts in trialsites:
if ts["mtype"] not in unique_motifs:
unique_motifs.append(ts["mtype"])
labels = {}
connected = []
for i in range(nintersites):
connected.append([])
for j in range(nintersites):
dist, image = struct_w_inter.lattice.get_distance_and_image(
trialsites[i]["fracs"], trialsites[j]["fracs"]
)
connected[i].append(bool(dist < (maxdl * facmaxdl)))
include = []
for motif in unique_motifs:
labels[motif] = []
for i, ts in enumerate(trialsites):
labels[motif].append(i if ts["mtype"] == motif else -1)
change = True
while change:
change = False
for i in range(nintersites - 1):
if change:
break
if labels[motif][i] == -1:
continue
for j in range(i + 1, nintersites):
if labels[motif][j] == -1:
continue
if connected[i][j] and labels[motif][i] != labels[motif][j]:
if labels[motif][i] < labels[motif][j]:
labels[motif][j] = labels[motif][i]
else:
labels[motif][i] = labels[motif][j]
change = True
break
unique_ids = []
for l in labels[motif]:
if l != -1 and l not in unique_ids:
unique_ids.append(l)
if verbose:
print(f"unique_ids {motif} {unique_ids}")
for uid in unique_ids:
maxq = 0.0
imaxq = -1
for i in range(nintersites):
if labels[motif][i] == uid:
if imaxq < 0 or trialsites[i]["opval"] > maxq:
imaxq = i
maxq = trialsites[i]["opval"]
include.append(imaxq)
# Prune by symmetry.
multiplicity = {}
discard = []
for motif in unique_motifs:
discard_motif = []
for indi, i in enumerate(include):
if trialsites[i]["mtype"] != motif or i in discard_motif:
continue
multiplicity[i] = 1
symposlist = [trialsites[i]["fracs"].dot(np.array(m, dtype=float)) for m in rots]
for t in trans:
symposlist.append(trialsites[i]["fracs"] + np.array(t))
for indj in range(indi + 1, len(include)):
j = include[indj]
if trialsites[j]["mtype"] != motif or j in discard_motif:
continue
for sympos in symposlist:
dist, image = struct.lattice.get_distance_and_image(sympos, trialsites[j]["fracs"])
if dist < maxdl * facmaxdl:
discard_motif.append(j)
multiplicity[i] += 1
break
for i in discard_motif:
if i not in discard:
discard.append(i)
if verbose:
print(
"Initial trial sites: {}\nAfter clustering: {}\n"
"After symmetry pruning: {}".format(len(trialsites), len(include), len(include) - len(discard))
)
for i in include:
if i not in discard:
self._defect_sites.append(
PeriodicSite(
Element(inter_elem),
trialsites[i]["fracs"],
self._structure.lattice,
to_unit_cell=False,
coords_are_cartesian=False,
properties=None,
)
)
self._defect_types.append(trialsites[i]["mtype"])
self._defect_cns.append(trialsites[i]["cns"])
self._defect_site_multiplicity.append(multiplicity[i])
self._defect_opvals.append(trialsites[i]["opval"])
def enumerate_defectsites(self):
"""
Get all defect sites.
Returns:
defect_sites ([PeriodicSite]): list of periodic sites
representing the interstitials.
"""
return self._defect_sites
def get_motif_type(self, i):
"""
Get the motif type of defect with index i (e.g., "tet").
Returns:
motif (string): motif type.
"""
return self._defect_types[i]
def get_defectsite_multiplicity(self, n):
"""
Returns the symmtric multiplicity of the defect site at the index.
"""
return self._defect_site_multiplicity[n]
def get_coordinating_elements_cns(self, i):
"""
Get element-specific coordination numbers of defect with index i.
Returns:
elem_cn (dict): dictionary storing the coordination numbers (int)
with string representation of elements as keys.
(i.e., {elem1 (string): cn1 (int), ...}).
"""
return self._defect_cns[i]
def get_op_value(self, i):
"""
Get order-parameter value of defect with index i.
Returns:
opval (float): OP value.
"""
return self._defect_opvals[i]
def make_supercells_with_defects(self, scaling_matrix):
"""
Generate a sequence of supercells
in which each supercell contains a single interstitial,
except for the first supercell in the sequence
which is a copy of the defect-free input structure.
Args:
scaling_matrix (3x3 integer array): scaling matrix
to transform the lattice vectors.
Returns:
scs ([Structure]): sequence of supercells.
"""
scs = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
scs.append(sc)
for ids, defect_site in enumerate(self._defect_sites):
sc_with_inter = sc.copy()
sc_with_inter.append(
defect_site.species_string,
defect_site.frac_coords,
coords_are_cartesian=False,
validate_proximity=False,
properties=None,
)
if not sc_with_inter:
raise RuntimeError(f"could not generate supercell with interstitial {ids + 1}")
scs.append(sc_with_inter.copy())
return scs
class TopographyAnalyzer:
"""
This is a generalized module to perform topological analyses of a crystal
structure using Voronoi tessellations. It can be used for finding potential
interstitial sites. Applications including using these sites for
inserting additional atoms or for analyzing diffusion pathways.
Note that you typically want to do some preliminary postprocessing after
the initial construction. The initial construction will create a lot of
points, especially for determining potential insertion sites. Some helper
methods are available to perform aggregation and elimination of nodes. A
typical use is something like::
a = TopographyAnalyzer(structure, ["O"], ["P"])
a.cluster_nodes()
a.remove_collisions()
"""
def __init__(
self,
structure,
framework_ions,
cations,
tol=0.0001,
max_cell_range=1,
check_volume=True,
constrained_c_frac=0.5,
thickness=0.5,
):
"""
Init.
Args:
structure (Structure): An initial structure.
framework_ions ([str]): A list of ions to be considered as a
framework. Typically, this would be all anion species. E.g.,
["O", "S"].
cations ([str]): A list of ions to be considered as non-migrating
cations. E.g., if you are looking at Li3PS4 as a Li
conductor, Li is a mobile species. Your cations should be [
"P"]. The cations are used to exclude polyhedra from
diffusion analysis since those polyhedra are already occupied.
tol (float): A tolerance distance for the analysis, used to
determine if something are actually periodic boundary images of
each other. Default is usually fine.
max_cell_range (int): This is the range of periodic images to
construct the Voronoi tessellation. A value of 1 means that we
include all points from (x +- 1, y +- 1, z+- 1) in the
voronoi construction. This is because the Voronoi poly
extends beyond the standard unit cell because of PBC.
Typically, the default value of 1 works fine for most
structures and is fast. But for really small unit
cells with high symmetry, you may need to increase this to 2
or higher.
check_volume (bool): Set False when ValueError always happen after
tuning tolerance.
constrained_c_frac (float): Constraint the region where users want
to do Topology analysis the default value is 0.5, which is the
fractional coordinate of the cell
thickness (float): Along with constrained_c_frac, limit the
thickness of the regions where we want to explore. Default is
0.5, which is mapping all the site of the unit cell.
"""
self.structure = structure
self.framework_ions = {get_el_sp(sp) for sp in framework_ions}
self.cations = {get_el_sp(sp) for sp in cations}
# Let us first map all sites to the standard unit cell, i.e.,
# 0 ≤ coordinates < 1.
# structure = Structure.from_sites(structure, to_unit_cell=True)
# lattice = structure.lattice
# We could constrain the region where we want to dope/explore by setting
# the value of constrained_c_frac and thickness. The default mode is
# mapping all sites to the standard unit cell
s = structure.copy()
constrained_sites = []
for i, site in enumerate(s):
if (
site.frac_coords[2] >= constrained_c_frac - thickness
and site.frac_coords[2] <= constrained_c_frac + thickness
):
constrained_sites.append(site)
structure = Structure.from_sites(sites=constrained_sites)
lattice = structure.lattice
# Divide the sites into framework and non-framework sites.
framework = []
non_framework = []
for site in structure:
if self.framework_ions.intersection(site.species.keys()):
framework.append(site)
else:
non_framework.append(site)
# We construct a supercell series of coords. This is because the
# Voronoi polyhedra can extend beyond the standard unit cell. Using a
# range of -2, -1, 0, 1 should be fine.
coords = []
cell_range = list(range(-max_cell_range, max_cell_range + 1))
for shift in itertools.product(cell_range, cell_range, cell_range):
for site in framework:
shifted = site.frac_coords + shift
coords.append(lattice.get_cartesian_coords(shifted))
# Perform the voronoi tessellation.
voro = Voronoi(coords)
# Store a mapping of each voronoi node to a set of points.
node_points_map = defaultdict(set)
for pts, vs in voro.ridge_dict.items():
for v in vs:
node_points_map[v].update(pts)
logger.debug(f"{len(voro.vertices)} total Voronoi vertices")
# Vnodes store all the valid voronoi polyhedra. Cation vnodes store
# the voronoi polyhedra that are already occupied by existing cations.
vnodes = []
cation_vnodes = []
def get_mapping(poly):
"""
Helper function to check if a vornoi poly is a periodic image
of one of the existing voronoi polys.
"""
for v in vnodes:
if v.is_image(poly, tol):
return v
return None
# Filter all the voronoi polyhedra so that we only consider those
# which are within the unit cell.
for i, vertex in enumerate(voro.vertices):
if i == 0:
continue
fcoord = lattice.get_fractional_coords(vertex)
poly = VoronoiPolyhedron(lattice, fcoord, node_points_map[i], coords, i)
if np.all([-tol <= c < 1 + tol for c in fcoord]):
if len(vnodes) == 0:
vnodes.append(poly)
else:
ref = get_mapping(poly)
if ref is None:
vnodes.append(poly)
logger.debug(f"{len(vnodes)} voronoi vertices in cell.")
# Eliminate all voronoi nodes which are closest to existing cations.
if len(cations) > 0:
cation_coords = [
site.frac_coords for site in non_framework if self.cations.intersection(site.species.keys())
]
vertex_fcoords = [v.frac_coords for v in vnodes]
dist_matrix = lattice.get_all_distances(cation_coords, vertex_fcoords)
indices = np.where(dist_matrix == np.min(dist_matrix, axis=1)[:, None])[1]
cation_vnodes = [v for i, v in enumerate(vnodes) if i in indices]
vnodes = [v for i, v in enumerate(vnodes) if i not in indices]
logger.debug(f"{len(vnodes)} vertices in cell not with cation.")
self.coords = coords
self.vnodes = vnodes
self.cation_vnodes = cation_vnodes
self.framework = framework
self.non_framework = non_framework
if check_volume:
self.check_volume()
def check_volume(self):
"""
Basic check for volume of all voronoi poly sum to unit cell volume
Note that this does not apply after poly combination.
"""
vol = sum(v.volume for v in self.vnodes) + sum(v.volume for v in self.cation_vnodes)
if abs(vol - self.structure.volume) > 1e-8:
raise ValueError(
"Sum of voronoi volumes is not equal to original volume of "
"structure! This may lead to inaccurate results. You need to "
"tweak the tolerance and max_cell_range until you get a "
"correct mapping."
)
def cluster_nodes(self, tol=0.2):
"""
Cluster nodes that are too close together using a tol.
Args:
tol (float): A distance tolerance. PBC is taken into account.
"""
lattice = self.structure.lattice
vfcoords = [v.frac_coords for v in self.vnodes]
# Manually generate the distance matrix (which needs to take into
# account PBC.
dist_matrix = np.array(lattice.get_all_distances(vfcoords, vfcoords))
dist_matrix = (dist_matrix + dist_matrix.T) / 2
for i in range(len(dist_matrix)):
dist_matrix[i, i] = 0
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
cn = fcluster(z, tol, criterion="distance")
merged_vnodes = []
for n in set(cn):
poly_indices = set()
frac_coords = []
for i, j in enumerate(np.where(cn == n)[0]):
poly_indices.update(self.vnodes[j].polyhedron_indices)
if i == 0:
frac_coords.append(self.vnodes[j].frac_coords)
else:
fcoords = self.vnodes[j].frac_coords
# We need the image to combine the frac_coords properly.
d, image = lattice.get_distance_and_image(frac_coords[0], fcoords)
frac_coords.append(fcoords + image)
merged_vnodes.append(VoronoiPolyhedron(lattice, np.average(frac_coords, axis=0), poly_indices, self.coords))
self.vnodes = merged_vnodes
logger.debug(f"{len(self.vnodes)} vertices after combination.")
def remove_collisions(self, min_dist=0.5):
"""
Remove vnodes that are too close to existing atoms in the structure
Args:
min_dist(float): The minimum distance that a vertex needs to be
from existing atoms.
"""
vfcoords = [v.frac_coords for v in self.vnodes]
sfcoords = self.structure.frac_coords
dist_matrix = self.structure.lattice.get_all_distances(vfcoords, sfcoords)
all_dist = np.min(dist_matrix, axis=1)
new_vnodes = []
for i, v in enumerate(self.vnodes):
if all_dist[i] > min_dist:
new_vnodes.append(v)
self.vnodes = new_vnodes
def get_structure_with_nodes(self):
"""
Get the modified structure with the voronoi nodes inserted. The
species is set as a DummySpecies X.
"""
new_s = Structure.from_sites(self.structure)
for v in self.vnodes:
new_s.append("X", v.frac_coords)
return new_s
def print_stats(self):
"""
Print stats such as the MSE dist.
"""
latt = self.structure.lattice
def get_min_dist(fcoords):
n = len(fcoords)
dist = latt.get_all_distances(fcoords, fcoords)
all_dist = [dist[i, j] for i in range(n) for j in range(i + 1, n)]
return min(all_dist)
voro = [s.frac_coords for s in self.vnodes]
print(f"Min dist between voronoi vertices centers = {get_min_dist(voro):.4f}")
def get_non_framework_dist(fcoords):
cations = [site.frac_coords for site in self.non_framework]
dist_matrix = latt.get_all_distances(cations, fcoords)
min_dist = np.min(dist_matrix, axis=1)
if len(cations) != len(min_dist):
raise Exception("Could not calculate distance to all cations")
return np.linalg.norm(min_dist), min(min_dist), max(min_dist)
print(len(self.non_framework))
print(f"MSE dist voro = {str(get_non_framework_dist(voro))}")
def write_topology(self, fname="Topo.cif"):
"""
Write topology to a file.
:param fname: Filename
"""
new_s = Structure.from_sites(self.structure)
for v in self.vnodes:
new_s.append("Mg", v.frac_coords)
new_s.to(filename=fname)
def analyze_symmetry(self, tol):
"""
:param tol: Tolerance for SpaceGroupAnalyzer
:return: List
"""
s = Structure.from_sites(self.framework)
site_to_vindex = {}
for i, v in enumerate(self.vnodes):
s.append("Li", v.frac_coords)
site_to_vindex[s[-1]] = i
print(len(s))
finder = SpacegroupAnalyzer(s, tol)
print(finder.get_space_group_operations())
symm_structure = finder.get_symmetrized_structure()
print(len(symm_structure.equivalent_sites))
return [
[site_to_vindex[site] for site in sites]
for sites in symm_structure.equivalent_sites
if sites[0].specie.symbol == "Li"
]
def vtk(self):
"""
Show VTK visualization.
"""
if StructureVis is None:
raise NotImplementedError("vtk must be present to view.")
lattice = self.structure.lattice
vis = StructureVis()
vis.set_structure(Structure.from_sites(self.structure))
for v in self.vnodes:
vis.add_site(PeriodicSite("K", v.frac_coords, lattice))
vis.add_polyhedron(
[PeriodicSite("S", c, lattice, coords_are_cartesian=True) for c in v.polyhedron_coords],
PeriodicSite("Na", v.frac_coords, lattice),
color="element",
draw_edges=True,
edges_color=(0, 0, 0),
)
vis.show()
class VoronoiPolyhedron:
"""
Convenience container for a voronoi point in PBC and its associated polyhedron.
"""
def __init__(self, lattice, frac_coords, polyhedron_indices, all_coords, name=None):
"""
:param lattice:
:param frac_coords:
:param polyhedron_indices:
:param all_coords:
:param name:
"""
self.lattice = lattice
self.frac_coords = frac_coords
self.polyhedron_indices = polyhedron_indices
self.polyhedron_coords = np.array(all_coords)[list(polyhedron_indices), :]
self.name = name
def is_image(self, poly, tol):
"""
:param poly: VoronoiPolyhedron
:param tol: Coordinate tolerance.
:return: Whether a poly is an image of the current one.
"""
frac_diff = pbc_diff(poly.frac_coords, self.frac_coords)
if not np.allclose(frac_diff, [0, 0, 0], atol=tol):
return False
to_frac = self.lattice.get_fractional_coords
for c1 in self.polyhedron_coords:
found = False
for c2 in poly.polyhedron_coords:
d = pbc_diff(to_frac(c1), to_frac(c2))
if not np.allclose(d, [0, 0, 0], atol=tol):
found = True
break
if not found:
return False
return True
@property
def coordination(self):
"""
:return: Coordination number
"""
return len(self.polyhedron_indices)
@property
def volume(self):
"""
:return: Volume
"""
return calculate_vol(self.polyhedron_coords)
def __str__(self):
return f"Voronoi polyhedron {self.name}"
class ChargeDensityAnalyzer(MSONable):
"""
Analyzer to find potential interstitial sites based on charge density. The
`total` charge density is used.
"""
def __init__(self, chgcar):
"""
Initialization.
Args:
chgcar (pmg.Chgcar): input Chgcar object.
"""
self.chgcar = chgcar
self.structure = chgcar.structure
self.extrema_coords = [] # list of frac_coords of local extrema
self.extrema_type = None # "local maxima" or "local minima"
self._extrema_df = None # extrema frac_coords - chg density table
self._charge_distribution_df = None # frac_coords - chg density table
@classmethod
def from_file(cls, chgcar_filename):
"""
Init from a CHGCAR.
:param chgcar_filename:
:return:
"""
chgcar = Chgcar.from_file(chgcar_filename)
return cls(chgcar=chgcar)
@property
def charge_distribution_df(self):
"""
:return: Charge distribution.
"""
if self._charge_distribution_df is None:
return self._get_charge_distribution_df()
return self._charge_distribution_df
@property
def extrema_df(self):
"""
:return: The extrema in charge density.
"""
if self.extrema_type is None:
logger.warning("Please run ChargeDensityAnalyzer.get_local_extrema first!")
return self._extrema_df
def _get_charge_distribution_df(self):
"""
Return a complete table of fractional coordinates - charge density.
"""
# Fraction coordinates and corresponding indices
axis_grid = np.array([np.array(self.chgcar.get_axis_grid(i)) / self.structure.lattice.abc[i] for i in range(3)])
axis_index = np.array([range(len(axis_grid[i])) for i in range(3)])
data = {}
for index in itertools.product(*axis_index):
a, b, c = index
f_coords = (axis_grid[0][a], axis_grid[1][b], axis_grid[2][c])
data[f_coords] = self.chgcar.data["total"][a][b][c]
# Fraction coordinates - charge density table
df = pd.Series(data).reset_index()
df.columns = ["a", "b", "c", "Charge Density"]
self._charge_distribution_df = df
return df
def _update_extrema(self, f_coords, extrema_type, threshold_frac=None, threshold_abs=None):
"""Update _extrema_df, extrema_type and extrema_coords"""
if threshold_frac is not None:
if threshold_abs is not None:
logger.warning("Filter can be either threshold_frac or threshold_abs!") # Exit if both filter are set
return
if threshold_frac > 1 or threshold_frac < 0:
raise Exception("threshold_frac range is [0, 1]!")
# Return empty result if coords list is empty
if len(f_coords) == 0:
df = pd.DataFrame({}, columns=["A", "B", "C", "Chgcar"])
self._extrema_df = df
self.extrema_coords = []
logger.info(f"Find {len(df)} {extrema_type}.")
return
data = {}
unit = 1 / np.array(self.chgcar.dim) # pixel along a, b, c
for fc in f_coords:
a, b, c = tuple(map(int, fc / unit))
data[tuple(fc)] = self.chgcar.data["total"][a][b][c]
df = pd.Series(data).reset_index()
df.columns = ["a", "b", "c", "Charge Density"]
ascending = extrema_type == "local minima"
if threshold_abs is None:
threshold_frac = threshold_frac if threshold_frac is not None else 1.0
num_extrema = int(threshold_frac * len(f_coords))
df = df.sort_values(by="Charge Density", ascending=ascending)[0:num_extrema]
df.reset_index(drop=True, inplace=True) # reset major index
else: # threshold_abs is set
df = df.sort_values(by="Charge Density", ascending=ascending)
df = df[df["Charge Density"] <= threshold_abs] if ascending else df[df["Charge Density"] >= threshold_abs]
extrema_coords = []
for row in df.iterrows():
fc = np.array(row[1]["a":"c"])
extrema_coords.append(fc)
self._extrema_df = df
self.extrema_type = extrema_type
self.extrema_coords = extrema_coords
logger.info(f"Find {len(df)} {extrema_type}.")
@requires(
peak_local_max_found,
"get_local_extrema requires skimage.feature.peak_local_max module"
" to be installed. Please confirm your skimage installation.",
)
def get_local_extrema(self, find_min=True, threshold_frac=None, threshold_abs=None):
"""
Get all local extrema fractional coordinates in charge density,
searching for local minimum by default. Note that sites are NOT grouped
symmetrically.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
threshold_frac (float): optional fraction of extrema shown, which
returns `threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to show the extrema with 20% highest or lowest
intensity. Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
extrema_coords (list): list of fractional coordinates corresponding
to local extrema.
"""
sign, extrema_type = 1, "local maxima"
if find_min:
sign, extrema_type = -1, "local minima"
# Make 3x3x3 supercell
# This is a trick to resolve the periodical boundary issue.
total_chg = sign * self.chgcar.data["total"]
total_chg = np.tile(total_chg, reps=(3, 3, 3))
coordinates = peak_local_max(total_chg, min_distance=1)
# Remove duplicated sites introduced by supercell.
f_coords = [coord / total_chg.shape * 3 for coord in coordinates]
f_coords = [f - 1 for f in f_coords if all(np.array(f) < 2) and all(
|
np.array(f)
|
numpy.array
|
from PIL import ImageOps
from PIL import ImageDraw
from PIL import ImageFont
from PIL import Image
from PIL import _imagingft
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from dipy.align.reslice import reslice
from os.path import join
from contextlib import contextmanager
import subprocess
import nibabel as nib
import numpy as np
import brain_cmaps
import os
import platform
import sys
import gzip
import shutil
import pathlib
import contextlib
import warnings
with warnings.catch_warnings(): # ignore joblib DeprecationWarning caused by importing nilearn
# TODO: Remove this when nilearn is updated
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
import sklearn
from sklearn.neighbors.ball_tree import NeighborsHeap # Only to prevent errors with PyInstaller
from sklearn.neighbors import quad_tree # Only to prevent errors with PyInstaller
import nilearn
from nilearn import masking
from nilearn import plotting
def ungzip(gzipped_file):
"""Un-gzip a gzipped file
Parameters
----------
gzipped_file : str
Absolute path of a gzipped file
Returns
-------
str
Absolute path of ungzipped file
"""
with gzip.open(gzipped_file, 'rb') as f_in:
ungzipped_file = gzipped_file.split('.gz')[0]
with open(ungzipped_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return ungzipped_file
def gzip_file(file_to_gzip):
"""gzip a file
Parameters
----------
file_to_gzip : str
Absolute path of a gzipped file
Returns
-------
str
Absolute path of gzipped file
"""
with open(file_to_gzip, 'rb') as f_in:
gzipped_file = file_to_gzip + '.gz'
with gzip.open(gzipped_file, 'wb') as f_out:
f_out.writelines(f_in)
return gzipped_file
def unzip_mac(zipped_file):
"""Un-zip a zipped file using Mac OS unzip command
The Python zipfile module does not correctly extract aliases on Mac OS and
does not preserve permissions. This function just calls the unzip command.
Parameters
----------
zipped_file : str
Absolute path of a zipped file
Returns
-------
str
Absolute path of unzipped file
"""
out_path = os.path.dirname(zipped_file)
command = ['unzip', zipped_file, '-d', out_path]
subprocess.run(command, stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
return out_path
def check_spm12app_mac(spm12_path):
"""Unzip spm12_maci64.zip if not already unzipped (only on Mac OS)
Parameters
----------
spm12_path : str
Absolute path to run_spm12.sh
"""
if platform.system() == 'Darwin':
spm12_dir = os.path.dirname(spm12_path)
spm12_app_dir = os.path.join(spm12_dir, 'spm12.app')
spm12_app_zip = os.path.join(spm12_dir, 'spm12_maci64.zip')
if not os.path.exists(spm12_app_dir) and os.path.isfile(spm12_app_zip):
unzip_mac(spm12_app_zip)
def reslice_iso(orig_img):
"""Reslice nifti image into isotropic 1x1x1 space
Parameters
----------
img : nibabel.nifti1.Nifti1Image
nibabel nii image object
Returns
-------
nibabel.nifti1.Nifti1Image
nibabel nii image object in 1x1x1 space
"""
orig_data = orig_img.get_data()
orig_affine = orig_img.affine
orig_zooms = orig_img.header.get_zooms()[:3]
new_zooms = (1., 1., 1.)
new_data, new_affine = reslice(
orig_data, orig_affine, orig_zooms, new_zooms)
new_img = nib.Nifti1Image(new_data, new_affine)
return new_img
def load_RAS_orient(path_to_nii):
"""Load nii and reorient to RAS orientation
Parameters
----------
path_to_nii : str
Path to nii file
Returns
-------
nibabel.nifti1.Nifti1Image
nibabel nii image object in RAS orientation
"""
nii = nib.load(path_to_nii)
nii_RAS = nib.as_closest_canonical(nii)
return nii_RAS
def run_cmd(command):
"""Wraps subprocess.run() to avoid console popping up on Windows with PyInstaller --noconsole
Args:
command (list): Liist of arguments to pass to subprocess.run()
"""
if platform.system() == 'Windows':
# Prevent PyInstaller executable from popping up a command window by default when
# run with the --noconsole option.
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-subprocess
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
env = os.environ
subprocess.run(command, startupinfo=si, env=env, stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
else:
subprocess.run(command, stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
def spm_coregister(target, sources, spm12_path, mcr_path):
"""Wraps SPM's spm.spatial.coreg
Coregistered files are saved into the same dir as `target` and filename is prefixed with 'r'.
Parameters
----------
target : str
Absolute path of target nii file
sources : list of str
Absolute paths of source nii files to coregister to target
spm12_path : str
Absolute path of SPM12 standalone binary (.sh or .exe)
mcr_path : str
Absolute path of Matlab Compiler Runtime (packaged with SPM12 standalone). May be an empty string on Windows systems.
Returns
-------
None
"""
# SPM batch script template
spm_coreg_batch = """
matlabbatch{{1}}.spm.spatial.coreg.estwrite.ref = {{'{target}'}};
matlabbatch{{1}}.spm.spatial.coreg.estwrite.source = {{'{source}'}};
matlabbatch{{1}}.spm.spatial.coreg.estwrite.other = {{''}};
matlabbatch{{1}}.spm.spatial.coreg.estwrite.eoptions.cost_fun = 'nmi';
matlabbatch{{1}}.spm.spatial.coreg.estwrite.eoptions.sep = [4 2];
matlabbatch{{1}}.spm.spatial.coreg.estwrite.eoptions.tol = [0.02 0.02 0.02 0.001 0.001 0.001 0.01 0.01 0.01 0.001 0.001 0.001];
matlabbatch{{1}}.spm.spatial.coreg.estwrite.eoptions.fwhm = [7 7];
matlabbatch{{1}}.spm.spatial.coreg.estwrite.roptions.interp = 4;
matlabbatch{{1}}.spm.spatial.coreg.estwrite.roptions.wrap = [0 0 0];
matlabbatch{{1}}.spm.spatial.coreg.estwrite.roptions.mask = 0;
matlabbatch{{1}}.spm.spatial.coreg.estwrite.roptions.prefix = 'r';
"""
if target.endswith('.gz'):
target = ungzip(target)
for i, source in enumerate(sources):
if source.endswith('.gz'):
source = ungzip(source)
# Create SPM batch file
spm_batch_file = os.path.join(os.path.dirname(
target), 'spm_coregister_batch' + str(i) + '.m')
with open(spm_batch_file, 'w') as batch_file:
batch_file.write(spm_coreg_batch.format(
target=target, source=source))
# Run SPM batch file (Don't include MCR path if using Windows .exe)
if spm12_path.endswith('.exe'):
if os.path.isfile(spm12_path) and os.path.isfile(spm_batch_file):
command = [spm12_path, 'batch', spm_batch_file]
else:
if os.path.isfile(spm12_path) and os.path.isdir(mcr_path) and os.path.isfile(spm_batch_file):
command = [spm12_path, mcr_path, 'batch', spm_batch_file]
# Unzip spm12_maci64.zip on Mac OS if user forgets
check_spm12app_mac(spm12_path)
# Change working dir to spm12_path before running command
# This fixes an issue caused by a relative path in the spm12.app bundle on Mac OS
cwd = os.getcwd()
os.chdir(os.path.dirname(spm12_path))
run_cmd(command)
os.chdir(cwd)
# remove SPM batch file
os.remove(spm_batch_file)
def spm_normalise(source, apply_to, spm12_path, mcr_path):
"""Wraps SPM's spm.tools.oldnorm
Normalised files are saved into the same dir as `source` and filename is prefixed with 'w'.
Parameters
----------
source : str
Absolute path of nii file to use as source (e.g. a T1 MRI)
apply_to : str
Absolute path of nii files (that are already coregistered with the source nii) on which to apply the transformation
from source -> MNI152
spm12_path : str
Absolute path of SPM12 standalone binary (.sh or .exe)
mcr_path : str
Absolute path of Matlab Compiler Runtime (packaged with SPM12 standalone). May be an empty string on Windows systems.
Returns
-------
None
"""
if getattr(sys, 'frozen', False):
# Path for PyInstaller
mni152_path = join(sys._MEIPASS, 'mnisiscom_data', 'MNI152_T1.nii')
else:
mni152_path = join(os.path.dirname(os.path.realpath(__file__)), 'MNI152_T1.nii')
# SPM batch script template
spm_norm_batch = """
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.subj.source = {{'{source},1'}};
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.subj.wtsrc = '';
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.subj.resample = {{'{apply_to},1'}};
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.eoptions.template = {{'{mni152},1'}};
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.eoptions.weight = '';
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.eoptions.smosrc = 8;
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.eoptions.smoref = 0;
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.eoptions.regtype = 'mni';
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.eoptions.cutoff = 25;
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.eoptions.nits = 16;
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.eoptions.reg = 1;
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.roptions.preserve = 0;
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.roptions.bb = [-78 -112 -70
78 76 85];
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.roptions.vox = [2 2 2];
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.roptions.interp = 1;
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.roptions.wrap = [0 0 0];
matlabbatch{{1}}.spm.tools.oldnorm.estwrite.roptions.prefix = 'w';
"""
if source.endswith('.gz'):
source = ungzip(source)
if apply_to.endswith('.gz'):
apply_to = ungzip(apply_to)
# Create SPM batch file
spm_batch_file = os.path.join(
os.path.dirname(source), 'spm_normalise_batch.m')
with open(spm_batch_file, 'w') as batch_file:
batch_file.write(spm_norm_batch.format(
source=source, apply_to=apply_to, mni152=mni152_path))
# Run SPM batch file (Don't include MCR path if using Windows .exe)
if spm12_path.endswith('.exe'):
if os.path.isfile(spm12_path) and os.path.isfile(spm_batch_file):
command = [spm12_path, 'batch', spm_batch_file]
else:
if os.path.isfile(spm12_path) and os.path.isdir(mcr_path) and os.path.isfile(spm_batch_file):
command = [spm12_path, mcr_path, 'batch', spm_batch_file]
# Unzip spm12_maci64.zip on Mac OS if user forgets
check_spm12app_mac(spm12_path)
# Change working dir to spm12_path before running command
# This fixes an issue caused by a relative path in the spm12.app bundle on Mac OS
cwd = os.getcwd()
os.chdir(os.path.dirname(spm12_path))
run_cmd(command)
os.chdir(cwd)
# Remove SPM batch file
os.remove(spm_batch_file)
# Remove SPM deformations files
deformations_file = os.path.splitext(source)[0] + '_sn.mat'
if os.path.isfile(deformations_file):
os.remove(deformations_file)
def compute_siscom(interictal_nii, ictal_nii, out_dir, threshold=0.5, mask_cutoff=0.6):
"""Given coregistered interictal/ictal nii images, compute SISCOM
Three files are created in out_dir:
- ictal_z.nii.gz
- interictal_z.nii.gz
- siscom_z.nii.gz
Parameters
----------
interictal_nii : str
Absolute path of interictal SPECT nii
ictal_nii : str
Absolute path of ictal SPECT nii
out_dir : str
Absolute path of dir in which to save result files
threshold : float
Threshold below which to zero out SISCOM result
Returns
-------
None
"""
# Load interictal and ictal image data
interictal_img = load_RAS_orient(interictal_nii)
interictal = interictal_img.get_data()
ictal_img = load_RAS_orient(ictal_nii)
ictal = ictal_img.get_data()
# Get rid of pesky NaNs
interictal[np.isnan(interictal)] = 0
ictal[np.isnan(ictal)] = 0
# Create a mask from interictal image and mask interictal/ictal images with it
mask_img = masking.compute_epi_mask(
interictal_img, lower_cutoff=mask_cutoff)
mask = mask_img.get_data()
ictal = ictal * mask
interictal = interictal * mask
# Compute z score of each image (individually)
ictal_std = (ictal - np.mean(ictal)) / np.std(ictal)
interictal_std = (interictal - np.mean(interictal)) / np.std(interictal)
# Compute subtratction image
siscom = ictal_std - interictal_std
# siscom[siscom < 0] = 0 # ignore negative (i.e. where interictal > ictal)
#siscom_std = (siscom - np.mean(siscom)) / np.std(siscom)
# Threshold subtraction (ignore voxels where interictal > ictal)
siscom[siscom < threshold] = 0
# Save nifti
new_img = nib.Nifti1Image(interictal_std, interictal_img.affine)
new_img.to_filename(join(out_dir, 'interictal_z.nii.gz'))
new_img = nib.Nifti1Image(ictal_std, ictal_img.affine)
new_img.to_filename(join(out_dir, 'ictal_z.nii.gz'))
new_img = nib.Nifti1Image(siscom, ictal_img.affine)
new_img.to_filename(join(out_dir, 'siscom_z.nii.gz'))
mask_img.to_filename(join(out_dir, 'interictal_mask.nii.gz'))
def get_slides_of_interest(mask_nii, slice_orientation):
"""Short summary.
Parameters
----------
mask_nii : str
Absolute path of mask nii
slice_orientation : str
Either of 'ax', 'cor', or 'sag' for orientation in which to cut
Returns
-------
min_slice : int
index of min slice of interest
max_slice : int
index of max slice of interest
"""
mask_data = reslice_iso(load_RAS_orient(mask_nii)).get_data()
if slice_orientation == 'ax':
nb_slices = mask_data.shape[2]
elif slice_orientation == 'cor':
nb_slices = mask_data.shape[1]
elif slice_orientation == 'sag':
nb_slices = mask_data.shape[0]
else:
raise ValueError(
"Valid options for slice_orientation are 'ax', 'cor', or 'sag'")
min_slice = 0
max_slice = 0
for i in range(nb_slices):
if slice_orientation == 'ax':
slice_avg = np.average(mask_data[:, :, i])
elif slice_orientation == 'cor':
slice_avg = np.average(mask_data[:, i, :])
elif slice_orientation == 'sag':
slice_avg = np.average(mask_data[i, :, :])
if slice_avg > 0 and min_slice == 0:
min_slice = i
elif min_slice != 0 and slice_avg == 0 and max_slice == 0:
max_slice = i - 1
return min_slice, max_slice
def make_mri_panel(t1_nii, interictal_std_nii, ictal_std_nii, siscom_nii, mask_nii, out_dir,
slice_orientation='ax', slice_thickness=5, alpha=0.8, panel_type='all',
t1_window=(0.2, 0.9), spect_window=(0, 4.5), siscom_window=(0, 2)):
"""Plot a panel of MRI slices with SPECT results overlaid
Parameters
----------
t1_nii : str
Absolute path of T1 nii
interictal_std_nii : str
Absolute path of interictal_std nii
ictal_std_nii : str
Absolute path of ictal_std nii
siscom_nii : str
Absolute path of SISCOM nii
mask_nii : str
Absolute path of mask nii
out_dir : str
Absolute path of dir in which to save result files
slice_orientation : str
Either of 'ax', 'cor', or 'sag' for orientation in which to cut
slice_thickness : int
Number of voxels in between slice snapshots
alpha : float
Value between 0 and 1 indicating the transparency of the overlay layer
panel_type : str
Either of 'all', 'mri_panel', or 'mri_slide', indicates the type of layout for output results
t1_window : tuple of int or float
Scaling factor for min and max values of T1 MRI (0 to 1)
spect_window : tuple of int or float
Min and max values for standardised SPECT images (standard deviations)
siscom_window : tuple of int or float
Min and max values for SISCOM image (difference of standard deviations)
Returns
-------
None
"""
# Load colour maps
ge_cmap = brain_cmaps.ge_cmap()
pet_cmap = brain_cmaps.pet_cmap()
# Make results dirs
results_dir = join(out_dir, 'SISCOM_results')
if not os.path.isdir(results_dir):
os.mkdir(results_dir)
# Make slice snapshots
# create dirs
mri_panel_dir = join(results_dir, 'mri_panel')
if not os.path.isdir(mri_panel_dir):
os.mkdir(mri_panel_dir)
dir_labels = ['interictal', 'ictal', 'siscom']
for dir_label in dir_labels:
dir_path = os.path.join(mri_panel_dir, dir_label)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# load images
t1_img = reslice_iso(load_RAS_orient(t1_nii))
t1 = t1_img.get_data()
t1 =
|
np.squeeze(t1)
|
numpy.squeeze
|
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import numpy as np
import h5py
import scipy.linalg as la
from scipy.special import erf
from ...hyp_defs import float_cpu
from ...utils.plotting import plot_gaussian_1D, plot_gaussian_ellipsoid_2D, plot_gaussian_ellipsoid_3D, plot_gaussian_3D
from ...utils.math import invert_pdmat, invert_trimat, symmat2vec, vec2symmat, fullcov_varfloor, logdet_pdmat
from .exp_family import ExpFamily
class Normal(ExpFamily):
def __init__(self, mu=None, Lambda=None, var_floor=1e-5,
update_mu=True, update_Lambda=True, **kwargs):
super(Normal, self).__init__(**kwargs)
self.mu = mu
self.Lambda = Lambda
self.var_floor = var_floor
self.update_mu = update_mu
self.update_Lambda = update_Lambda
self._compute_nat_std()
self._logLambda = None
self._cholLambda = None
self._Sigma = None
def _compute_nat_std(self):
if self.mu is not None and self.Lambda is not None:
self._validate_mu()
self._validate_Lambda()
self._compute_nat_params()
elif self.eta is not None:
self._validate_eta()
self.A = self.compute_A_nat(self.eta)
self._compute_std_params()
@property
def logLambda(self):
if self._logLambda is None:
assert self.is_init
f, L, logL = invert_pdmat(self.Lambda, return_logdet=True)
self._logLambda = logL
self._cholLambda = L.T
return self._logLambda
@property
def cholLambda(self):
if self._cholLambda is None:
assert self.is_init
f, L, logL = invert_pdmat(self.Lambda, return_logdet=True)
self._logLambda = logL
self._cholLambda = L.T
return self._cholLambda
@property
def Sigma(self):
if self._Sigma is None:
assert self.is_init
self._Sigma = invert_pdmat(self.Lambda, return_inv=True)[-1]
return self._Sigma
def initialize(self):
self.validate()
self._compute_nat_std()
def stack_suff_stats(self, F, S=None):
if S is None:
return F
return np.hstack((F,S))
def unstack_suff_stats(self, stats):
F=stats[:self.x_dim]
S=stats[self.x_dim:]
return F, S
def accum_suff_stats(self, x, u_x=None, sample_weight=None, batch_size=None):
if u_x is None:
if sample_weight is None:
N = x.shape[0]
F = np.sum(x, axis=0)
S = symmat2vec(np.dot(x.T, x))
else:
N = np.sum(sample_weight)
wx = sample_weight[:, None]*x
F = np.sum(wx, axis=0)
S = symmat2vec(np.dot(wx.T, x))
return N, self.stack_suff_stats(F, S)
else:
return self._accum_suff_stats_1batch(x, u_x, sample_weight)
def norm_suff_stats(self, N, u_x, return_order2=False):
assert self.is_init
F, S = self.unstack_suff_stats(u_x)
F_norm = np.dot(F-N*self.mu, self.cholLambda.T)
if return_order2:
SS = vec2symat(S)
Fmu = np.outer(self.F, self.mu)
SS = SS-Fmu-Fmu.T+N*np.outer(self.mu,self.mu)
SS = np.dot(self.cholLambda, np.dot(SS, self.cholLambda.T))
S = symmat2vec(SS)
return N, self.stack_suff_stats(F_norm, S)
return N, F_norm
def Mstep(self, N, u_x):
F, S = self.unstack_suff_stats(u_x)
if self.update_mu:
self.mu = F/N
if self.update_Lambda:
S = vec2symmat(S/N)
S -= np.outer(self.mu,self.mu)
# S = fullcov_varfloor(S, self.var_floor)
self.Lambda = invert_pdmat(S, return_inv=True)[-1]
self._Sigma = None
self._logLambda = None
self._cholLambda = None
self._compute_nat_params()
def log_prob_std(self, x):
assert self.is_init
mah_dist2 = np.sum(np.dot(x-self.mu,self.cholLambda)**2, axis=1)
return 0.5*self.logLambda-0.5*self.x_dim*np.log(2*np.pi)-0.5*mah_dist2
# def eval_logcdf(self, x):
# delta = np.dot((x-self.mu), self.cholLambda)
# lk = 0.5*(1+erf(delta/np.sqrt(2)))
# print(x-self.mu)
# print(la.cholesky(self.Lambda,lower=True))
# print(self.cholLambda)
# print(delta)
# print(lk)
# return np.sum(np.log(lk+1e-20), axis=-1)
def sample(self, num_samples, rng=None, seed=1024):
assert self.is_init
if rng is None:
rng = np.random.RandomState(seed)
return rng.multivariate_normal(self.mu, self.Sigma,size=(num_samples,)).astype(float_cpu())
# x=rng.normal(size=(num_samples, self.x_dim))
# cholS=la.cholesky(self.Sigma, lower=False, overwrite_a=True)
# return self.mu+np.dot(x, cholS)
def get_config(self):
config = {'var_floor': self.var_floor,
'update_mu': self.update_mu,
'update_lambda': self.update_Lambda }
base_config = super(Normal, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def save_params(self, f):
assert self.is_init
params = {'mu': self.mu,
'Lambda': self.Lambda}
self._save_params_from_dict(f, params)
@classmethod
def load_params(cls, f, config):
param_list = ['mu', 'Lambda']
params = self._load_params_to_dict(f, config['name'], param_list)
return cls(x_dim=config['x_dim'],
mu=params['mu'], Lambda=params['Lambda'],
var_floor=config['var_floor'],
update_mu=config['update_mu'],
update_Lambda=config['update_lambda'],
name=config['name'])
def _validate_mu(self):
assert(self.mu.shape[0] == self.x_dim)
def _validate_Lambda(self):
assert(self.Lambda.shape == (self.x_dim, self.x_dim))
def _validate_eta(self):
assert(self.eta.shape[0] == (self.x_dim**2+3*self.x_dim)/2)
def validate(self):
if self.mu is not None and self.Lambda is not None:
self._validate_mu()
self._validate_Lambda()
if self.eta is not None:
self._validate_eta()
@staticmethod
def compute_eta(mu, Lambda):
Lmu = np.dot(mu, Lambda)
eta = np.hstack((Lmu, -symmat2vec(Lambda, diag_factor=0.5)))
return eta
@staticmethod
def compute_x_dim_from_eta(eta):
x_dim = 0.5*(-3+np.sqrt(9+8*eta.shape[-1]))
assert(int(x_dim)==x_dim)
return int(x_dim)
@staticmethod
def compute_std(eta):
x_dim = Normal.compute_x_dim_from_eta(eta)
eta1 = eta[:x_dim]
eta2 = vec2symmat(eta[x_dim:], diag_factor=2)/2
Lambda = - 2*eta2
f = invert_pdmat(-eta2, right_inv=True)[0]
mu = 0.5 * f(eta1)
return mu, Lambda
@staticmethod
def compute_A_nat(eta):
x_dim = Normal.compute_x_dim_from_eta(eta)
eta1 = eta[:x_dim]
eta2 = vec2symmat(eta[x_dim:], diag_factor=2)/2
f, _, log_minus_eta2 = invert_pdmat(-eta2, right_inv=True, return_logdet=True)
r1 = 0.5*x_dim*np.log(2*np.pi)
r2 = 0.25*np.inner(f(eta1), eta1)
r3 = - 0.5*x_dim*
|
np.log(2)
|
numpy.log
|
import pytest
import numpy as np
import lifelines
import surpyval
def right_censor(x, tl, frac):
c = np.random.binomial(1, frac, x.shape)
x_out = np.copy(x)
for i, (trunc, value) in enumerate(zip(tl, x)):
if c[i] == 0:
continue
if np.isfinite(trunc):
x_out[i] = np.random.uniform(trunc, value, 1)
else:
x_out[i] = value - np.abs(value * np.random.uniform(0, 1, 1))
return x_out, c
def left_truncate(x, dist, frac, params):
t = np.random.binomial(1, frac, x.shape)
# Find a lower value
tl = np.where((t == 1) & (x > 0), x * np.random.uniform(0, 1, x.size), -np.inf)
tl = np.where((t == 1) & (x < 0), x - np.abs(x * np.random.uniform(0, 1, x.size)), tl)
drop_due_to_truncation = dist.ff(tl, *params)
drop_due_to_truncation[~np.isfinite(drop_due_to_truncation)] = 0
keep = np.ones_like(x)
for i, p in enumerate(drop_due_to_truncation):
if p == 0:
continue
else:
keep[i] = np.random.binomial(1, p)
mask = keep == 1
tl = tl[mask]
x = x[mask]
return x, tl
def test_kaplan_meier_against_lifelines():
kmf = lifelines.KaplanMeierFitter()
for i in range(100):
test_params = []
for b in ((1, 100), (0.5, 20)):
test_params.append(np.random.uniform(*b))
test_params = np.array(test_params)
x = surpyval.Weibull.random(int(np.random.uniform(2, 1000, 1)), *test_params)
n = np.ones_like(x) * int(np.random.uniform(1, 5))
x_test = np.random.uniform(x.min()/2, x.max()*2, 100)
ll_est = kmf.fit(x, weights=n).predict(x_test).values
surp_est = surpyval.KaplanMeier.fit(x, n=n).sf(x_test)
if not np.allclose(ll_est, surp_est, 1e-15):
raise AssertionError('Kaplan-Meier different to lifelines?!')
def test_kaplan_meier_censored_against_lifelines():
kmf = lifelines.KaplanMeierFitter()
for i in range(100):
test_params = []
for b in ((1, 100), (0.5, 20)):
test_params.append(np.random.uniform(*b))
test_params = np.array(test_params)
x = surpyval.Weibull.random(int(np.random.uniform(2, 1000, 1)), *test_params)
c = np.random.binomial(1, np.random.uniform(0, 1, 1), x.shape)
x = x - np.abs(x * np.random.uniform(0, 1, x.shape))
n = np.ones_like(x) * int(np.random.uniform(1, 5))
x_test = np.random.uniform(x.min()/2, x.max()*2, 100)
ll_est = kmf.fit(x, 1-c, weights=n).predict(x_test).values
surp_est = surpyval.KaplanMeier.fit(x, c=c, n=n).sf(x_test)
if not np.allclose(ll_est, surp_est, 1e-15):
raise AssertionError('Kaplan-Meier different to lifelines?!')
def test_kaplan_meier_censored_and_truncated_against_lifelines():
kmf = lifelines.KaplanMeierFitter()
for i in range(100):
test_params = []
for b in ((1, 100), (0.5, 20)):
test_params.append(np.random.uniform(*b))
test_params = np.array(test_params)
x = surpyval.Weibull.random(int(np.random.uniform(2, 1000, 1)), *test_params)
x, tl = left_truncate(x, surpyval.Weibull, 0.1, test_params)
x, c = right_censor(x, tl, 0.2)
n = np.ones_like(x) * int(np.random.uniform(1, 5))
x_test = np.random.uniform(x.min()/2, x.max()*2, 100)
ll_est = kmf.fit(x, 1-c, entry=tl, weights=n).predict(x_test).values
surp_est = surpyval.KaplanMeier.fit(x, c=c, n=n, tl=tl).sf(x_test)
if not np.allclose(ll_est, surp_est, 1e-15):
raise AssertionError('Kaplan-Meier different to lifelines?!')
def test_nelson_aalen_against_lifelines():
naf = lifelines.NelsonAalenFitter(nelson_aalen_smoothing=False)
for i in range(100):
test_params = []
for b in ((1, 100), (0.5, 20)):
test_params.append(np.random.uniform(*b))
test_params = np.array(test_params)
x = surpyval.Weibull.random(int(np.random.uniform(2, 1000, 1)), *test_params)
n = np.ones_like(x) * int(np.random.uniform(1, 5))
x_test = np.random.uniform(x.min()/2, x.max()*2, 100)
ll_est = naf.fit(x, weights=n).predict(x_test).values
surp_est = surpyval.NelsonAalen.fit(x, n=n).Hf(x_test)
if not np.allclose(ll_est, surp_est, 1e-15):
raise AssertionError('Kaplan-Meier different to lifelines?!')
def test_nelson_aalen_censored_against_lifelines():
naf = lifelines.NelsonAalenFitter(nelson_aalen_smoothing=False)
for i in range(100):
test_params = []
for b in ((1, 100), (0.5, 20)):
test_params.append(np.random.uniform(*b))
test_params = np.array(test_params)
x = surpyval.Weibull.random(int(np.random.uniform(2, 1000, 1)), *test_params)
c = np.random.binomial(1, np.random.uniform(0, 1, 1), x.shape)
x = x - np.abs(x * np.random.uniform(0, 1, x.shape))
n = np.ones_like(x) * int(np.random.uniform(1, 5))
x_test = np.random.uniform(x.min()/2, x.max()*2, 100)
ll_est = naf.fit(x, 1-c, weights=n).predict(x_test).values
surp_est = surpyval.NelsonAalen.fit(x, c=c, n=n).Hf(x_test)
if not np.allclose(ll_est, surp_est, 1e-15):
raise AssertionError('Kaplan-Meier different to lifelines?!')
def test_nelson_aalen_censored_and_truncated_against_lifelines():
naf = lifelines.NelsonAalenFitter(nelson_aalen_smoothing=False)
for i in range(100):
test_params = []
for b in ((1, 100), (0.5, 20)):
test_params.append(np.random.uniform(*b))
test_params = np.array(test_params)
x = surpyval.Weibull.random(int(np.random.uniform(2, 1000, 1)), *test_params)
x, tl = left_truncate(x, surpyval.Weibull, 0.1, test_params)
x, c = right_censor(x, tl, 0.2)
n = np.ones_like(x) * int(np.random.uniform(1, 5))
x_test = np.random.uniform(x.min()/2, x.max()*2, 100)
ll_est = naf.fit(x, 1-c, entry=tl, weights=n).predict(x_test).values
surp_est = surpyval.NelsonAalen.fit(x, c=c, n=n, tl=tl).Hf(x_test)
if not np.allclose(ll_est, surp_est, 1e-15):
raise AssertionError('Kaplan-Meier different to lifelines?!')
def test_fleming_harrington_same_as_nelson_aalen_with_no_counts():
naf = lifelines.NelsonAalenFitter(nelson_aalen_smoothing=False)
for i in range(100):
test_params = []
for b in ((1, 100), (0.5, 20)):
test_params.append(np.random.uniform(*b))
test_params = np.array(test_params)
x = surpyval.Weibull.random(int(np.random.uniform(2, 1000, 1)), *test_params)
x_test = np.random.uniform(x.min()/2, x.max()*2, 100)
ll_est = naf.fit(x).predict(x_test).values
surp_est = surpyval.FlemingHarrington.fit(x).Hf(x_test)
if not np.allclose(ll_est, surp_est, 1e-15):
raise AssertionError('Fleming-Harrington fails different to lifelines?!')
def test_fleming_harrington_HF_less_than_or_equal_to_nelson_aalen_with_counts():
naf = lifelines.NelsonAalenFitter(nelson_aalen_smoothing=False)
for i in range(100):
test_params = []
for b in ((1, 100), (0.5, 20)):
test_params.append(np.random.uniform(*b))
test_params = np.array(test_params)
x = surpyval.Weibull.random(int(np.random.uniform(2, 1000, 1)), *test_params)
n =
|
np.ones_like(x)
|
numpy.ones_like
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import corner
import juliet
import numpy as np
import pandas as pd
import aux
import os
try:
from astropy.timeseries import LombScargle
except ModuleNotFoundError:
# the timeserie module moved at astropy 3.1 -> 3.2
from astropy.stats import LombScargle
# prevent mpl from needing an X server
mpl.use('Agg')
# style matplotlib plots
try:
from popsyntools import plotstyle
except ModuleNotFoundError:
print('module "popsyntools" not found. Skipping plot styles therein.')
figure = {'dpi' : 200,
'subplot.left' : 0.16, # the left side of the subplots of the figure
'subplot.bottom' : 0.21, # the bottom of the subplots of the figure
'subplot.right' : 0.98, # the right side of the subplots of the figure
'subplot.top' : 0.97, # the top of the subplots of the figure
'subplot.hspace' : 0.15, # height reserved for space between subplots
'figsize' : [4.3, 3.2]}
mpl.rc('figure', **figure)
mpl.rc('savefig', bbox = 'tight', dpi = 200)
colors_rv = ['orangered', 'cornflowerblue']
def plot_posteriors(julietResults, out_folder):
""" plot individual posterior plots."""
# num_samps = len(julietResults.keys())
if not os.path.exists(out_folder+'/posteriorplots/'):
os.mkdir(out_folder+'/posteriorplots/')
# exclude fixed parameters
try:
posteriors = julietResults.posteriors
except AttributeError:
# sometimes, juliet puts julietResults into a tuple
posteriors = julietResults[0].posteriors
for k in posteriors['posterior_samples'].keys():
if k != 'unnamed':
val,valup,valdown = juliet.utils.get_quantiles(posteriors['posterior_samples'][k])
print(k,':',val,' + ',valup-val,' - ',val-valdown)
fig = plt.figure(figsize=(10,7))
plt.hist(posteriors['posterior_samples'][k],
bins=int(len(posteriors['posterior_samples'][k])/50),
histtype='step')
plt.axvline(x=val,color='cornflowerblue',lw=1.5,ls='--',
label='{} = {:.5}'.format(k, val))
plt.axvline(x=valdown,color='cornflowerblue',lw=.5,ls='--')
plt.axvline(x=valup,color='cornflowerblue',lw=.5,ls='--')
plt.title('Posterior of : {}'.format(k))
plt.xlabel(k)
plt.ylabel('Frequency')
plt.legend(loc=1)
if k == 'P_p1':
k = 'Period_p1'
fil2save = out_folder+'/posteriorplots/Period_p1.pdf'
else:
fil2save = out_folder+'/posteriorplots/'+k+'.pdf'
plt.tight_layout()
fig.savefig(fil2save,dpi=400)
# plt.show()
plt.close(fig)
def plot_cornerPlot(julietResults, posterior_names=None, pl=0., pu=1., reverse=False, fig=None, axes=None, **kwargs):
""" Produce a corner plot of posteriors from a juliet fit.
Parameters
------------
julietResults : results object
a results object returned by juliet.fit()
posterior_names : list, optional
labels for the plot. If None, use keys of the params dictionary
Returns
--------
fig : matplotlib figure
figure containing the plot
Notes
------
Assumes quadratic limb darkening for an instrument 'TESSERACT+TESS' and
linear limb darkening for 'CHAT+i' (only when present in julietResults object)
"""
# paramNames = list(params.keys())
# if posterior_names is None:
# posterior_names = paramNames
# back-transform r1, r2 to b, p and q1, q2 to u1, u2
if 'r1_p1' in julietResults.posteriors['posterior_samples']:
r1, r2 = julietResults.posteriors['posterior_samples']['r1_p1'], \
julietResults.posteriors['posterior_samples']['r2_p1']
b, p = juliet.utils.reverse_bp(r1, r2, pl, pu)
else:
b, p = None, None
if 'q1_TESSERACT+TESS' in julietResults.posteriors['posterior_samples']:
q1_tess, q2_tess = julietResults.posteriors['posterior_samples']['q1_TESSERACT+TESS'], \
julietResults.posteriors['posterior_samples']['q2_TESSERACT+TESS']
u1_tess, u2_tess = juliet.utils.reverse_ld_coeffs('quadratic', q1_tess, q2_tess)
else:
u1_tess = None
u2_tess = None
if 'q1_CHAT+i' in julietResults.posteriors['posterior_samples']:
q1_chat = julietResults.posteriors['posterior_samples']['q1_CHAT+i']
u1_chat, u1_chat = juliet.utils.reverse_ld_coeffs('linear', q1_chat, q1_chat)
else:
u1_chat = None
u2_chat = None
if 'secosomega_p1' in julietResults.posteriors['posterior_samples']:
# back-transfrom ecc, omega parametrization
secosomega = julietResults.posteriors['posterior_samples']['secosomega_p1']
sesinomega = julietResults.posteriors['posterior_samples']['sesinomega_p1']
ecc = secosomega ** 2 + sesinomega ** 2
omega = np.arccos(secosomega / np.sqrt(ecc)) * 180/np.pi
else:
ecc = None
omega = None
# extract posteriors, excluding fixed parameters
try:
posteriorSamples = julietResults.posteriors['posterior_samples'].copy()
except AttributeError:
posteriorSamples = julietResults[0].posteriors['posterior_samples'].copy()
posteriors = []
for name in julietResults.data.priors:
if (name not in ['r1_p1','r2_p1','q1_TESSERACT+TESS','q2_TESSERACT+TESS',
'sigma_w_TESSERACT+TESS',
'q1_CHAT+i', 'secosomega_p1', 'sesinomega_p1']) & \
(julietResults.data.priors[name]['distribution'] != 'fixed'):
# consider all non-fixed params, except special parametrizations
if julietResults.data.priors[name]['distribution'] == 'loguniform':
# plot log. distributed params in log
posteriors.append(('log '+name, np.log10(posteriorSamples[name])))
else:
posteriors.append((name,posteriorSamples[name]))
if (name in ['sigma_w_TESSERACT+TESS']) & \
(julietResults.data.priors[name]['distribution'] != 'fixed'):
# dirty hack for some params that shouldn't be plotted in log
posteriors.append((name, posteriorSamples[name]))
# include special parametrizations
if ecc is not None:
posteriors.append(('ecc', ecc))
posteriors.append(('omega', omega))
if b is not None:
posteriors.append(('b', b))
posteriors.append(('p', p))
if u1_tess is not None:
posteriors.append(('u1_TESSERACT+TESS', u1_tess))
posteriors.append(('u2_TESSERACT+TESS', u2_tess))
if u1_chat is not None:
posteriors.append(('u1_CHAT+i', u1_chat))
# # select parameters for the plot
if posterior_names is not None:
posterior_subset = []
# for label, posterior_samples in zip([p[0] for p in posteriors], posteriors): # [p[1] for p in posteriors]):
# if label in posterior_names:
# posterior_subset.append(posterior_samples)
for label in posterior_names:
posterior_subset.append([label, julietResults.posteriors['posterior_samples'][label]])
else:
posterior_subset = posteriors
posterior_data = np.array([p[1] for p in posterior_subset]).T
fig = corner.corner(posterior_data, fig=fig, axes=axes, #posterior_names,
labels=[aux.format(p[0]) + '\n' for p in posterior_subset], reverse=reverse,
**kwargs)
# tune look of corner figure
if axes is None:
axes = fig.axes
else:
axes = axes.flatten()
if not reverse:
for ax in axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_label_coords(0.5, -.6)
ax.yaxis.set_label_coords(-0.35, 0.5)
else:
for ax in axes:
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.xaxis.set_label_coords(0.5, -0.2)
ax.yaxis.set_label_coords(-0.35, 0.5)
fig.subplots_adjust(left=0.08, right=0.995, bottom=0.09, top=0.97,
wspace=.15, hspace=.15)
return fig
def plot_photometry(dataset, results, fig=None, axs=None, instrument=None):
""" plot photometry and best fit from transit model.
Parameters
------------
dataset : dataset object
dataset as returned by juliet.load()
results : results object
a results object returned by juliet.fit()
fig : matplotlib figure object, optional
figure to plot on
axs : list (opional)
list containing axis objects
instrument : string (optional)
name of the instrument
Returns
--------
axs : list of matplotlib axis objects
axis containing the plot
fig : matplotlib figure
figure containing the plot
"""
if isinstance(results, tuple):
# sometimes, juliet.fit returns a tuple
results = results[0]
if instrument is not None:
instruments = [instrument]
elif dataset.inames_lc is not None:
# make a plot for each photometric instrument. Ignore provided figure or axes.
instruments = dataset.inames_lc
axs = None
else:
# no photometric data in the dataset. Nothing to plot.
return
transit_model, transit_up68, transit_low68 = results.lc.evaluate(instrument,
return_err=True)
transit_model, transit_up95, transit_low95 = results.lc.evaluate(instrument,
return_err=True, alpha=.9545)
if axs is None:
fig, axs = plt.subplots(2, sharex=True, gridspec_kw={'height_ratios': [5, 2]},
figsize = [8.6, 3.2])
axs[0].errorbar(dataset.times_lc[instrument]- 2458000, dataset.data_lc[instrument],
yerr=dataset.errors_lc[instrument], **aux.photPlotParams(), label = aux.label(instrument))
axs[0].plot(dataset.times_lc[instrument]- 2458000, transit_model,
lw=1, label='model')
axs[0].fill_between(dataset.times_lc[instrument]- 2458000, transit_up68, transit_low68,
color='cornflowerblue', alpha=0.6, zorder=5)
axs[0].fill_between(dataset.times_lc[instrument]- 2458000, transit_up95, transit_low95,
color='cornflowerblue', alpha=0.2, zorder=5)
# Now the residuals:
axs[1].errorbar(dataset.times_lc[instrument] - 2458000,
(dataset.data_lc[instrument] - transit_model) * 1e6,
dataset.errors_lc[instrument] * 1e6, **aux.photPlotParams(), label='residuals')
axs[1].axhline(0, ls='--', lw=1, color='k', alpha=0.5)
axs[1].set_ylabel('residuals [ppm]')
axs[1].set_xlabel('Time [BJD - 2458000]')
axs[1].set_xlim(np.min(dataset.times_lc[instrument] - 2458000), np.max(dataset.times_lc[instrument] - 2458000))
# Plot portion of the lightcurve, axes, etc.:
# plt.xlim([1326,1332])
# plt.ylim([0.999,1.001])
axs[1].set_xlabel('Time [BJD - 2458000]')
axs[0].set_ylabel('relative flux')
axs[1].set_ylabel('residuals [ppm]')
leg = axs[0].legend(loc='lower left', ncol=99, bbox_to_anchor=(0., 1.),
frameon=False, columnspacing=1.6)
return fig, axs
def plot_phasedPhotometry(dataset, results, instrument=None, ylabels=True,
narrowAspect=False, color='C0',):
""" plot phased photometry and best fit from transit model.
Parameters
------------
dataset : dataset object
dataset as returned by juliet.load()
results : results object
a results object returned by juliet.fit()
instrument : string (optional)
name of the instrument. If not given, create a plot for each instrument.
ylabels : bool (optional)
should ylabels be drawn?
narrowAspect : bool (optional)
enable narrower aspect ratio for side-by-side plots
color : string (optional)
the color to be used for the CIs. default is the first element of the
default mpl cycle.
Returns
--------
plots : dictionary
dictionary with instrument names as keys. Each entry contains a tuple of
(fig, axs):
axs : list of matplotlib axis objects
axis containing the plot
fig : matplotlib figure
figure containing the plot
"""
if isinstance(results, tuple):
# sometimes, juliet.fit returns a tuple
results = results[0]
if instrument is not None:
instruments = [instrument]
elif dataset.inames_lc is not None:
# make a plot for each photometric instrument. Ignore provided figure or axes.
instruments = dataset.inames_lc
axs = None
else:
# no photometric data in the dataset. Nothing to plot.
return
plots = {}
times_lc = results.data.times_lc
data_lc = results.data.data_lc
errors_lc = results.data.errors_lc
numbering_planets_transit = results.data.numbering_transiting_planets
instruments_lc = results.data.inames_lc
for inst in instruments:
try:
_ = results.lc.evaluate(inst, t = times_lc[inst], \
)
gp_data_model = np.zeros(len(times_lc[inst]))
except:
_ = results.lc.evaluate(inst, t = times_lc[inst], \
GPregressors=times_lc[inst],\
)
gp_data_model = results.lc.model[inst]['GP']
det_data_model = results.lc.model[inst]['deterministic']
for i_transit in numbering_planets_transit:
try:
P = np.median(results.posteriors['posterior_samples']['P_p{}'.format(i_transit)])
except KeyError:
P = results.data.priors['P_p{}'.format(i_transit)]['hyperparameters']
try:
t0 = np.median(results.posteriors['posterior_samples']['t0_p{}'.format(i_transit)])
except KeyError:
t0 = results.data.priors['t0_p{}'.format(i_transit)]['hyperparameters']
if narrowAspect:
fig,axs = plt.subplots(2,1,sharex=True, figsize=[2.6, 3.2], gridspec_kw = {'height_ratios':[5,2]})
else:
fig,axs = plt.subplots(2,1,sharex=True, gridspec_kw = {'height_ratios':[5,2]})
phases_lc = juliet.utils.get_phases(times_lc[inst], P, t0)
model_phases = np.linspace(-0.04,0.04,1000)
model_times = model_phases*P + t0
try:
model_lc, transit_up68, transit_low68, model_components = results.lc.evaluate(inst, t = model_times, \
return_components = True,\
return_err=True, alpha=0.68)
_, transit_up95, transit_low95, _ = results.lc.evaluate(inst, t = model_times, \
return_components = True,\
return_err=True, alpha=0.95)
_, transit_up99, transit_low99, _ = results.lc.evaluate(inst, t = model_times, \
return_components = True,\
return_err=True, alpha=0.99)
except:
model_lc, transit_up68, transit_low68, model_components = results.lc.evaluate(inst, t = model_times, \
return_components = True,\
GPregressors=model_times, \
return_err=True, alpha=0.68)
_, transit_up95, transit_low95, _ = results.lc.evaluate(inst, t = model_times, \
return_components = True,\
GPregressors=model_times, \
return_err=True, alpha=0.95)
_, transit_up99, transit_low99, _ = results.lc.evaluate(inst, t = model_times, \
return_components = True,\
GPregressors=model_times, \
return_err=True, alpha=0.99)
mu = np.median(results.posteriors['posterior_samples']['mflux_{}'.format(inst)])
axs[0].errorbar(phases_lc, data_lc[inst] - gp_data_model + mu, \
yerr = errors_lc[inst], **aux.photPlotParams(), label=aux.label(inst),
zorder=6)
axs[0].plot(model_phases, model_lc + mu, lw=1, color='black', zorder=7)
axs[0].fill_between(model_phases,transit_up68 + mu,transit_low68 + mu,\
color='cornflowerblue', alpha=0.6,zorder=5, label='model')
axs[0].fill_between(model_phases,transit_up95 + mu,transit_low95 + mu,\
color='cornflowerblue',alpha=0.3,zorder=5)
# axs[0].fill_between(model_phases,transit_up99,transit_low99,\
# color='cornflowerblue',alpha=0.2,zorder=5)
axs[1].errorbar(phases_lc, (data_lc[inst]-det_data_model-gp_data_model)*1e6, \
yerr = errors_lc[inst]*1e6, **aux.photPlotParams())
axs[1].axhline(y=0, ls='--', lw=1, color='k', alpha=0.5)
# ax2.yaxis.set_major_formatter(plt.NullFormatter())
# try:
# axs[0].set_title('P = {:.5f} t0 = {:.5f}'.format(P, t0))
# except:
# axs[0].set_title('P = {:.5f} t0 = {:.5f}'.format(np.median(P), np.median(t0)))
# ax2.set_ylim([0.9985,1.0015]) ### CHANGE THIS
axs[0].minorticks_on()
if ylabels:
axs[0].set_ylabel('relative flux')
axs[1].set_ylabel('residuals [ppm]')
axs[1].set_xlabel('orbital phase')
leg = axs[0].legend(loc='lower left', ncol=99, bbox_to_anchor=(0., 1.),
frameon=False, columnspacing=1.6)
axs[1].minorticks_on()
# axs[0].yaxis.set_tick_params(labelsize=fontsize_phot_ticks)
# axs[1].xaxis.set_tick_params(labelsize=fontsize_phot_ticks)
# axs[1].yaxis.set_tick_params(labelsize=fontsize_phot_ticks)
# custom x limits, adapt for specific case
if inst == 'CHAT+i':
plt.xlim([-0.007,0.007])
axs[1].set_ylim([-5200, 5200])
elif inst == 'TESSERACT+TESS':
axs[0].set_xlim([-0.015,0.015])
axs[1].set_ylim([-2500, 2500])
elif inst == 'LCOGT':
axs[0].set_xlim([-0.004, 0.004])
axs[1].set_ylim([-2500, 2500])
else:
axs[0].set_xlim([-0.03,0.03])
# plt.tight_layout()
# fig.subplots_adjust(hspace=0) # to make the space between rows smaller
# plt.savefig(resultsPath+'/phased_lc_{}_pl{}.pdf'.format(inst,i_transit), dpi=700)
plots[inst] = (fig, axs)
return plots
def plot_rv_fit(dataset, results):
""" plot RV time series and best-fit model.
"""
if isinstance(results, tuple):
# sometimes, juliet.fit returns a tuple
results = results[0]
min_time, max_time = np.min(dataset.times_rv['FEROS']) - 10, \
np.max(dataset.times_rv['FEROS']) + 10
model_times = np.linspace(min_time, max_time, 1000)
# Now evaluate the model in those times, including 1 and 2 sigma CIs,
# and substract the systemic-velocity to get the Keplerian signal:
keplerian, up68, low68 = results.rv.evaluate('FEROS', t=model_times,
return_err=True, all_samples = True) - \
np.median(results.posteriors['posterior_samples']['mu_FEROS'])
keplerian, up95, low95 = results.rv.evaluate('FEROS', t=model_times,
return_err=True, all_samples = True, alpha=.9545) - \
np.median(results.posteriors['posterior_samples']['mu_FEROS'])
fig, axs = plt.subplots(2, sharex=True, figsize=[8.6, 3.2],
gridspec_kw={'height_ratios': [5, 2]})
# axs[0].errorbar(dataset.times_rv['FEROS'], dataset.data_rv['FEROS'],
# yerr=dataset.errors_rv['FEROS'], fmt='.', alpha=0.1)
# Now plot the (systematic-velocity corrected) RVs:
instruments = dataset.inames_rv
colors = colors_rv
for i in range(len(instruments)):
instrument = instruments[i]
# Evaluate the median jitter for the instrument:
jitter = np.median(results.posteriors['posterior_samples']['sigma_w_' + instrument])
# Evaluate the median systemic-velocity:
mu = np.median(results.posteriors['posterior_samples']['mu_' + instrument])
# Plot original data with original errorbars:
axs[0].errorbar(dataset.times_rv[instrument] - 2458000, dataset.data_rv[instrument] - mu, \
yerr=dataset.errors_rv[instrument], fmt='o',
markeredgewidth=.75,
mec=colors[i], ecolor=colors[i], elinewidth=1.5, mfc='white', \
ms=3, label=aux.label(instrument), zorder=10)
# Plot original errorbars + jitter (added in quadrature):
axs[0].errorbar(dataset.times_rv[instrument] - 2458000, dataset.data_rv[instrument] - mu, \
yerr=np.sqrt(dataset.errors_rv[instrument] ** 2 + jitter ** 2), fmt='o', \
mec=colors[i], ecolor=colors[i], elinewidth=1.5, mfc='white',
ms=3, alpha=0.5, zorder=8)
# plot residuals
real_model = results.rv.evaluate(instrument, t=dataset.times_rv[instrument], all_samples=True)
axs[1].errorbar(dataset.times_rv[instrument] - 2458000,
dataset.data_rv[instrument] - real_model,
yerr=dataset.errors_rv[instrument], fmt='o', \
markeredgewidth=.75,
mec=colors[i], ecolor=colors[i], elinewidth=1.5, mfc='white', \
ms=3, zorder=10)
# and the error bars for jitter
axs[1].errorbar(dataset.times_rv[instrument] - 2458000,
dataset.data_rv[instrument] - real_model,
yerr=np.sqrt(dataset.errors_rv[instrument] ** 2 + jitter ** 2), fmt='o', \
mec=colors[i], ecolor=colors[i], elinewidth=1.5, mfc='white',
ms=3, alpha=0.5, zorder=8)
# Plot Keplerian model and CIs:
axs[0].fill_between(model_times - 2458000, up68, low68,
color='cornflowerblue', alpha=0.5, zorder=5, label='model')
axs[0].fill_between(model_times - 2458000, up95, low95,
color='cornflowerblue', alpha=0.3, zorder=6)
axs[0].plot(model_times - 2458000, keplerian, color='black', zorder=7, lw=1)
# # plt.title('Log-evidence: {:.2f} $\pm$ {:.2f}'.format(results.posteriors['lnZ'], \
# # results.posteriors['lnZerr']))
axs[0].set_xlim([min_time - 2458000, max_time - 2458000])
axs[0].set_ylabel('RV [m/s]')
axs[1].axhline(0., ls='--', lw=2, color='gray')
axs[1].set_xlabel('time [BJD - 2458000]')
axs[1].set_ylabel('residuals [m/s]')
axs[0].legend(loc='lower left', ncol=99, bbox_to_anchor=(0., 1.),
frameon=False, columnspacing=1.6)
fig.align_ylabels()
return fig, axs
def plot_phasedRV(results):
""" plot phase-folded RV time series."""
if isinstance(results, tuple):
# sometimes, juliet.fit returns a tuple
results = results[0]
posteriors = results.posteriors
# print(results.data.priors)
# quit()
dataset = results.data
numbering_planets_rv = dataset.numbering_rv_planets
num_planets = len(numbering_planets_rv)
instruments_rv = dataset.inames_rv
min_time, max_time = np.min([np.min(dataset.times_rv[k]) for k in instruments_rv]) - 4, \
np.max([np.max(dataset.times_rv[k]) for k in instruments_rv]) + 4
model_rv_times = np.linspace(min_time, max_time, int((max_time - min_time) * 5))
plots = {}
for inst in instruments_rv:
keplerian_model, kep_up68, kep_low68, components = results.rv.evaluate(inst,
t=model_rv_times,
return_err=True, alpha=0.68,
return_components=True, )
mu = np.median(posteriors['posterior_samples']['mu_{}'.format(inst)])
keplerian_model -= mu
kep_up68 -= mu
kep_low68 -= mu
for i_rv in numbering_planets_rv:
# To plot the phased rv we need the median period and time-of-transit center:
try:
P = np.median(results.posteriors['posterior_samples']['P_p{}'.format(i_rv)])
except KeyError:
P = dataset.priors['P_p{}'.format(i_rv)]['hyperparameters']
try:
t0 = np.median(results.posteriors['posterior_samples']['t0_p{}'.format(i_rv)])
except KeyError:
t0 = dataset.priors['t0_p{}'.format(i_rv)]['hyperparameters']
# Get phases:
# Now plot the model for planet pl. First get phases of the model:
phases_model = np.linspace(-0.5, 0.5, 1000)
model_times = phases_model * P + t0
try:
model_rv, kep_up68, kep_low68, model_components = results.rv.evaluate(inst, t=model_times, \
return_components=True, \
return_err=True, alpha=0.68)
_, kep_up95, kep_low95, _ = results.rv.evaluate(inst, t=model_times, \
return_components=True, \
return_err=True, alpha=0.95)
_, kep_up99, kep_low99, _ = results.rv.evaluate(inst, t=model_times, \
return_components=True, \
return_err=True, alpha=0.99)
except:
model_rv, kep_up68, kep_low68, model_components = results.rv.evaluate(inst, t=model_times, \
return_components=True, \
GPregressors=model_times, \
return_err=True, alpha=0.68)
_, kep_up95, kep_low95, _ = results.rv.evaluate(inst, t=model_times, \
return_components=True, \
GPregressors=model_times, \
return_err=True, alpha=0.95)
_, kep_up99, kep_low99, _ = results.rv.evaluate(inst, t=model_times, \
return_components=True, \
GPregressors=model_times, \
return_err=True, alpha=0.99)
fig, axs = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [5, 2]})
# Plot phased model:
axs[0].plot(phases_model, model_components['p{}'.format(i_rv)], color='black', alpha=1, lw=1, zorder=3)
axs[0].fill_between(phases_model, kep_up68 - model_components['mu'], kep_low68 - model_components['mu'], \
color='cornflowerblue', alpha=0.6, zorder=1, label='model')
axs[0].fill_between(phases_model, kep_up95 - model_components['mu'], kep_low95 - model_components['mu'], \
color='cornflowerblue', alpha=0.3, zorder=1)
# axs[0].fill_between(phases_model, kep_up99 - model_components['mu'], kep_low99 - model_components['mu'], \
# color='cornflowerblue', alpha=0.4, zorder=1)
# Plot the data
for color, inst in zip(colors_rv, instruments_rv):
phases_data = juliet.get_phases(dataset.times_rv[inst], P, t0) # on the data time
# Extract jitters:
# Evaluate the median jitter for the instrument:
try:
jitter =
|
np.median(posteriors['posterior_samples']['sigma_w_' + inst])
|
numpy.median
|
"""Dictionary with default tasks and their underlying functions."""
import os
import subprocess
import shlex
import shutil
import glob
import numpy as np
from skpar.core.utils import get_ranges, get_logger, islistoflists
from skpar.core.plot import skparplot
from skpar.core.parameters import update_parameters
from skpar.core.database import Query
LOGGER = get_logger(__name__)
def parse_cmd(cmd):
"""Parse shell command for globbing and environment variables.
"""
if not isinstance(cmd, list):
cmd = shlex.split(cmd)
parsed_cmd = [cmd[0],]
for word in cmd[1:]:
if word[0] == '$':
var = word[1:].strip('{').strip('}')
varval = os.environ.get(var, word)
parsed_cmd.append(varval)
else:
if '*' in word:
items = glob.glob(word)
for item in items:
parsed_cmd.append(item)
else:
parsed_cmd.append(word)
return parsed_cmd
def execute(implargs, database, cmd, workdir='.', outfile='run.log',
purge_workdir=False, **kwargs):
"""Execute external command in workdir, streaming output/error to outfile.
Args:
implargs (dict): caller environment variables
database (dict-like): not used, but needed to maintain a task-signature
cmd (str): command; executed in `implargs['workroot']+workir`;
if it contains `$` or `*`-globbing, these are shell-expanded
workdir (path-like): execution directory relative to workroot
outfile (str): output file for the stdout/stderr stream; continuously
updated during execution
purge_workdir (bool): if true, any existing working directory is purged
kwargs (dict): passed directly to the underlying `subprocess.call()`
Returns:
None
Raises:
OSError: if `cmd` cannot be executed
RuntimeError: if `cmd` returncode is nonzero
SubprocessError: other possible circumstances
"""
# prepare workdir
origdir = os.getcwd()
workroot = implargs.get('workroot', '.')
_workdir = os.path.abspath(os.path.join(workroot, workdir))
try:
os.makedirs(_workdir)
except OSError:
# directory exists
if purge_workdir:
# that's a bit brutal, but saves to worry of links and subdirs
shutil.rmtree(_workdir)
os.makedirs(_workdir)
os.chdir(_workdir)
# prepare out/err handling
filename = kwargs.pop('stdout', outfile)
if filename:
kwargs['stdout'] = open(filename, 'w')
filename = kwargs.pop('stderr', None)
if filename:
kwargs['stderr'] = open(filename, 'w')
else:
kwargs['stderr'] = subprocess.STDOUT
# execute the command, make sure output is not streamed
_cmd = parse_cmd(cmd)
try:
returncode = subprocess.call(_cmd, **kwargs)
if returncode:
LOGGER.critical('Execution of %s FAILED with exit status %d',
_cmd, returncode)
raise RuntimeError
#
except subprocess.SubprocessError:
LOGGER.critical('Subprocess call of {:s} FAILED'.format(_cmd))
raise
#
except (OSError, FileNotFoundError) as exc:
LOGGER.critical("Abnormal termination: OS could not execute %s in %s",
_cmd, _workdir)
LOGGER.critical("If the command is a script ,"\
"check permissions and that is has a shebang!")
raise
#
finally:
# make sure we return to where we started from in any case!
os.chdir(origdir)
def get_model_data(implargs, database, item, source, model,
rm_columns=None, rm_rows=None, scale=1., **kwargs):
"""Get data from file and put it in a database under a given key.
Use numpy.loadtxt to get the data from `source` file and write the data
to `database` under `dst`.`key` field. If `dst` does not exist, it is
created. All `kwargs` are directly passed to numpy.loadtxt. Additionally,
some post-processing can be done (removing rows or columns and scaling).
Args:
implargs(dict): dictionary of implicit arguments from caller
database(object): must support dictionary-like get/update()
source(str): file name source of data; path relative to implargs[workroot]
model(str): model name to be updated in `database`
key(str): key under which to store the data in under `dst`
rm_columns: [ index, index, [ilow, ihigh], otherindex, [otherrange]]
rm_rows : [ index, index, [ilow, ihigh], otherindex, [otherrange]]
scale(float): multiplier of the data
"""
logger = implargs.get('logger', LOGGER)
workroot = implargs.get('workroot', '.')
assert isinstance(source, str), \
"source must be a filename string, but is {} instead.".\
format(type(source))
assert isinstance(item, str),\
"item must be a string naming the data, but is {} instead."\
.format(type(item))
# read file
fname = os.path.abspath(os.path.join(workroot, source))
try:
data = np.loadtxt(fname, **kwargs)
except ValueError:
logger.critical('np.loadtxt cannot understand the contents of %s'+\
'with the given arguments: %s', fname, **kwargs)
raise
except (IOError, FileNotFoundError):
logger.critical('np.loadtxt cannot open %s', fname)
raise
# do some filtering on columns and/or rows if requested
# note that file to 2D-array mapping depends on 'unpack' from
# kwargs, which transposes the loaded array.
postprocess = {'rm_columns': rm_columns, 'rm_rows': rm_rows}
if any(postprocess.values()):
if kwargs.get('unpack', False):
# since 'unpack' transposes the array, now row index
# in the original file is along axis 1, while column index
# in the original file is along axis 0.
key1, key2 = ['rm_columns', 'rm_rows']
else:
key1, key2 = ['rm_rows', 'rm_columns']
for axis, key in enumerate([key1, key2]):
rm_rngs = postprocess.get(key, [])
if rm_rngs:
indexes = []
# flatten, combine and sort, then delete corresp. object
for rng in get_ranges(rm_rngs):
indexes.extend(list(range(*rng)))
indexes = list(set(indexes))
indexes.sort()
data =
|
np.delete(data, obj=indexes, axis=axis)
|
numpy.delete
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .utils import _get_embedded, _get_tolerance, _phi
def entropy_approximate(signal, delay=1, dimension=2, tolerance="default", corrected=False, **kwargs):
"""Approximate entropy (ApEn)
Python implementations of the approximate entropy (ApEn) and its corrected version (cApEn).
Approximate entropy is a technique used to quantify the amount of regularity and the unpredictability
of fluctuations over time-series data. The advantages of ApEn include lower computational demand
(ApEn can be designed to work for small data samples (< 50 data points) and can be applied in real
time) and less sensitive to noise. However, ApEn is heavily dependent on the record length and lacks
relative consistency.
This function can be called either via ``entropy_approximate()`` or ``complexity_apen()``, and the
corrected version via ``complexity_capen()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003), or
to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
dimension : int
Embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order'). Typically
2 or 3. It corresponds to the number of compared runs of lagged data. If 2, the embedding returns
an array with two columns corresponding to the original signal and its delayed (by Tau) version.
tolerance : float
Tolerance (similarity threshold, often denoted as 'r'). It corresponds to the filtering level
- max absolute difference between segments. If 'default', will be set to 0.2 times the
standard deviation of the signal (for dimension = 2).
corrected : bool
If true, will compute corrected ApEn (cApEn), see Porta (2007).
**kwargs
Other arguments.
See Also
--------
entropy_shannon, entropy_sample, entropy_fuzzy
Returns
----------
apen : float
The approximate entropy of the single time series.
info : dict
A dictionary containing additional information regarding the parameters used
to compute approximate entropy.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=2, frequency=5)
>>> entropy1, parameters = nk.entropy_approximate(signal)
>>> entropy1 #doctest: +SKIP
>>> entropy2, parameters = nk.entropy_approximate(signal, corrected=True)
>>> entropy2 #doctest: +SKIP
References
-----------
- `EntroPy` <https://github.com/raphaelvallat/entropy>`_
- <NAME>., <NAME>., & <NAME>. (2009). Entropy and complexity measures for EEG signal
classification of schizophrenic and control participants. Artificial intelligence in medicine,
47(3), 263-274.
- <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Entropy analysis of short-term heartbeat
interval time series during regular walking. Entropy, 19(10), 568.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Prepare parameters
info = {"Dimension": dimension, "Delay": delay, "Corrected": corrected}
info["Tolerance"] = _get_tolerance(signal, tolerance=tolerance, dimension=dimension)
out = _entropy_approximate(
signal,
tolerance=info["Tolerance"],
delay=delay,
dimension=dimension,
corrected=corrected,
**kwargs
)
return out, info
def _entropy_approximate(signal, tolerance, delay=1, dimension=2, corrected=False, **kwargs):
if corrected is False:
# Get phi
phi = _phi(signal, delay=delay, dimension=dimension, tolerance=tolerance,
approximate=True, **kwargs)
apen = np.abs(np.subtract(phi[0], phi[1]))
if corrected is True:
__, count1 = _get_embedded(
signal,
delay=delay,
dimension=dimension,
tolerance=tolerance,
distance="chebyshev",
approximate=True,
**kwargs
)
__, count2 = _get_embedded(
signal,
delay=delay,
dimension=dimension + 1,
tolerance=tolerance,
distance="chebyshev",
approximate=True,
**kwargs
)
# Limit the number of vectors to N - (dimension + 1) * delay
upper_limit = len(signal) - (dimension + 1) * delay
# Correction to replace the ratio of count1 and count2 when either is equal to 1
# As when count = 1, only the vector itself is within r distance
correction = 1 / upper_limit
vector_similarity =
|
np.full(upper_limit, np.nan)
|
numpy.full
|
'''test_javabridge.py - test the low-level interface
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.
'''
__version__="$Revision$"
import os
import numpy as np
import unittest
import javabridge
jb = javabridge
class TestJavabridge(unittest.TestCase):
def setUp(self):
self.env = javabridge.attach()
def tearDown(self):
javabridge.detach()
def test_01_01_version(self):
major,minor = self.env.get_version()
def test_01_02_find_class(self):
string_class = self.env.find_class('java/lang/String')
self.assertTrue(isinstance(string_class, jb.JB_Class))
def test_01_03_00_new_string_utf(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertTrue(isinstance(jstring, jb.JB_Object))
def test_01_03_01_new_string_unicode(self):
s = u"Hola ni\u00F1os"
jstring = self.env.new_string(s)
self.assertEqual(self.env.get_string_utf(jstring), s)
def test_01_03_02_new_string_string(self):
s = "Hello, world"
jstring = self.env.new_string(s)
self.assertEqual(self.env.get_string_utf(jstring), s)
def test_01_03_03_new_string_zero_length(self):
jstring = self.env.new_string(u"")
self.assertEqual(self.env.get_string_utf(jstring), "")
def test_01_04_00_get_string_utf(self):
jstring = self.env.new_string_utf("Hello, world")
pstring = self.env.get_string_utf(jstring)
self.assertEqual(pstring, "Hello, world")
def test_01_04_01_get_string(self):
s = u"Hola ni\u00F1os"
jstring = self.env.new_string(s)
self.assertTrue(self.env.get_string(jstring), s)
def test_01_05_get_object_class(self):
jstring = self.env.new_string_utf("Hello, world")
string_class = self.env.get_object_class(jstring)
self.assertTrue(isinstance(string_class, jb.JB_Class))
def test_01_06_deallocate_object(self):
jstring = self.env.new_string_utf("Hello, world")
del jstring
def test_01_09_get_method_id(self):
klass = self.env.find_class("java/lang/String")
method_id = self.env.get_method_id(klass,'charAt','(I)C')
self.assertTrue(method_id is not None)
def test_01_10_get_static_method_id(self):
klass = self.env.find_class("java/lang/String")
method_id = self.env.get_static_method_id(klass, 'copyValueOf','([C)Ljava/lang/String;')
self.assertTrue(method_id is not None)
def test_01_11_new_object(self):
klass = self.env.find_class("java/lang/Byte")
method_id = self.env.get_method_id(klass, '<init>','(Ljava/lang/String;)V')
jbyte = self.env.new_object(klass, method_id, self.env.new_string_utf("55"))
self.assertTrue(jbyte is not None)
def test_01_11_01_is_instance_of(self):
klassByte = self.env.find_class("java/lang/Byte")
method_id = self.env.get_method_id(klassByte, '<init>','(Ljava/lang/String;)V')
jbyte = self.env.new_object(klassByte, method_id, self.env.new_string_utf("55"))
klassNumber = self.env.find_class("java/lang/Number")
self.assertTrue(self.env.is_instance_of(jbyte, klassNumber))
def test_01_11_02_isnt_instance_of(self):
klassByte = self.env.find_class("java/lang/Byte")
method_id = self.env.get_method_id(klassByte, '<init>','(Ljava/lang/String;)V')
jbyte = self.env.new_object(klassByte, method_id, self.env.new_string_utf("55"))
klassString = self.env.find_class("java/lang/String")
self.assertFalse(self.env.is_instance_of(jbyte, klassString))
def test_01_12_get_static_field_id(self):
klass = self.env.find_class("java/lang/Boolean")
field_id = self.env.get_static_field_id(klass, "FALSE","Ljava/lang/Boolean;")
self.assertTrue(field_id is not None)
def test_01_13_get_byte_array_elements(self):
pass # see test_03_09_call_method_array for test
def test_01_14_get_object_array_elements(self):
jstring = self.env.new_string_utf("Hello, world")
klass = self.env.get_object_class(jstring)
method_id = self.env.get_method_id(klass, 'split', '(Ljava/lang/String;)[Ljava/lang/String;')
split = self.env.new_string_utf(", ")
result = self.env.call_method(jstring, method_id, split)
result = self.env.get_object_array_elements(result)
self.assertEqual(len(result), 2)
self.assertEqual(self.env.get_string_utf(result[0]), "Hello")
self.assertEqual(self.env.get_string_utf(result[1]), "world")
def test_01_15_make_byte_array(self):
array = np.array([ord(x) for x in "Hello, world"],np.uint8)
jarray = self.env.make_byte_array(array)
klass = self.env.find_class("java/lang/String")
method_id = self.env.get_method_id(klass, '<init>', '([B)V')
result = self.env.new_object(klass, method_id, jarray)
self.assertEqual(self.env.get_string_utf(result), "Hello, world")
def test_01_16_get_array_length(self):
jstring = self.env.new_string_utf("Hello, world")
klass = self.env.get_object_class(jstring)
method_id = self.env.get_method_id(klass, 'split', '(Ljava/lang/String;)[Ljava/lang/String;')
split = self.env.new_string_utf(", ")
result = self.env.call_method(jstring, method_id, split)
self.assertEqual(self.env.get_array_length(result), 2)
def test_01_17_make_object_array(self):
klass = self.env.find_class("java/lang/String")
jarray = self.env.make_object_array(15, klass)
length = self.env.get_array_length(jarray)
self.assertEqual(length, 15)
def test_01_18_set_object_array_element(self):
klass = self.env.find_class("java/lang/String")
jarray = self.env.make_object_array(15, klass)
for i in range(15):
v = self.env.new_string_utf(str(i))
self.env.set_object_array_element(jarray, i, v)
result = self.env.get_object_array_elements(jarray)
self.assertEqual(len(result), 15)
for i, elem in enumerate(result):
v = self.env.get_string_utf(elem)
self.assertEqual(str(i), v)
def test_01_19_0_make_boolean_array(self):
np.random.seed(1190)
array = np.random.uniform(size=105) > .5
jarray = self.env.make_boolean_array(array)
result = self.env.get_boolean_array_elements(jarray)
self.assertTrue(np.all(array == result))
def test_01_19_make_short_array(self):
np.random.seed(119)
array = (np.random.uniform(size=10) * 65535 - 32768).astype(np.int16)
array = np.unique(array)
array.sort()
jarray = self.env.make_short_array(array)
klass = self.env.find_class("java/util/Arrays")
method_id = self.env.get_static_method_id(klass, "binarySearch",
"([SS)I")
for i, value in enumerate(array):
self.assertEqual(i, self.env.call_static_method(
klass, method_id, jarray, array[i]))
def test_01_20_make_int_array(self):
np.random.seed(120)
array = (np.random.uniform(size=10) * (2.0 ** 32-1) - (2.0 ** 31)).astype(np.int32)
array = np.unique(array)
array.sort()
jarray = self.env.make_int_array(array)
klass = self.env.find_class("java/util/Arrays")
method_id = self.env.get_static_method_id(klass, "binarySearch",
"([II)I")
for i, value in enumerate(array):
self.assertEqual(i, self.env.call_static_method(
klass, method_id, jarray, array[i]))
def test_01_21_make_long_array(self):
np.random.seed(121)
array = (np.random.uniform(size=10) * (2.0 ** 64) - (2.0 ** 63)).astype(np.int64)
array = np.unique(array)
array.sort()
jarray = self.env.make_long_array(array)
klass = self.env.find_class("java/util/Arrays")
method_id = self.env.get_static_method_id(klass, "binarySearch",
"([JJ)I")
for i, value in enumerate(array):
self.assertEqual(i, self.env.call_static_method(
klass, method_id, jarray, array[i]))
def test_01_22_make_float_array(self):
np.random.seed(122)
array = np.random.uniform(size=10).astype(np.float32)
array = np.unique(array)
array.sort()
jarray = self.env.make_float_array(array)
klass = self.env.find_class("java/util/Arrays")
method_id = self.env.get_static_method_id(klass, "binarySearch",
"([FF)I")
for i, value in enumerate(array):
self.assertEqual(i, self.env.call_static_method(
klass, method_id, jarray, array[i]))
def test_01_23_make_double_array(self):
np.random.seed(123)
array = np.random.uniform(size=10).astype(np.float64)
array = np.unique(array)
array.sort()
jarray = self.env.make_double_array(array)
klass = self.env.find_class("java/util/Arrays")
method_id = self.env.get_static_method_id(klass, "binarySearch",
"([DD)I")
for i, value in enumerate(array):
self.assertEqual(i, self.env.call_static_method(
klass, method_id, jarray, array[i]))
def test_01_24_get_short_array_elements(self):
np.random.seed(124)
array = (np.random.uniform(size=10) * 65535 - 32768).astype(np.int16)
jarray = self.env.make_short_array(array)
result = self.env.get_short_array_elements(jarray)
self.assertTrue(
|
np.all(array == result)
|
numpy.all
|
"""Functions for image processing
"""
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from PIL import Image, ImageFilter
import os
import math
import random
import numpy as np
from scipy import misc
import cv2
# Calulate the shape for creating new array given (h,w)
from dataset.face_data_augment import face_image_augment_cv
def get_new_shape(images, size=None, n=None):
shape = list(images.shape)
if size is not None:
h, w = tuple(size)
shape[1] = h
shape[2] = w
if n is not None:
shape[0] = n
shape = tuple(shape)
return shape
def random_crop(image, size):
_h, _w = image.shape[0], image.shape[1]
h, w = tuple(size)
y = np.random.randint(low=0, high=_h-h+1)
x = np.random.randint(low=0, high=_w-w+1)
image_new = image[y:y+h, x:x+w]
return image_new
def center_crop(image, size):
n, _h, _w = image.shape[:3]
h, w = tuple(size)
assert (_h>=h and _w>=w)
y = int(round(0.5 * (_h - h)))
x = int(round(0.5 * (_w - w)))
image_new = image[:, y:y+h, x:x+w]
return image_new
def random_flip(image):
image_new = image.copy()
if np.random.rand()>=0.5:
image_new =
|
np.fliplr(image)
|
numpy.fliplr
|
import onnxruntime_numpy as onp
from onnxruntime_numpy.types import (
float_types, integer_types, is_unsigned_int, all_types, is_bool,
numeric_types, bool_types)
import pytest
import numpy as np
from .utils import expect
import itertools
def argmax_use_numpy(data, axis=0, keepdims=1):
result = np.argmax(data, axis=axis)
if (keepdims == 1):
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmax_use_numpy_select_last_index(data, axis=0, keepdims=True):
data = np.flip(data, axis)
result = np.argmax(data, axis=axis)
result = data.shape[axis] - result - 1
if keepdims:
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmin_use_numpy(data, axis=0, keepdims=1):
result = np.argmin(data, axis=axis)
if (keepdims == 1):
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmin_use_numpy_select_last_index(data, axis=0, keepdims=True):
data = np.flip(data, axis)
result = np.argmin(data, axis=axis)
result = data.shape[axis] - result - 1
if keepdims:
result = np.expand_dims(result, axis)
return result.astype(np.int64)
@pytest.mark.parametrize("type_a", [*float_types, *integer_types])
def test_abs(type_a):
if is_unsigned_int(type_a):
# it is invalid to use unsigned int type with negative values
a = onp.array([1, 2, 3], dtype=type_a)
else:
a = onp.array([-1, -2, -3], dtype=type_a)
expected = onp.array([1, 2, 3], dtype=type_a)
result = onp.absolute(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_acos(type_a):
a = onp.array([1., .5, .1], dtype=type_a)
expected = onp.array([0., 1.04719755, 1.47062891], dtype=type_a)
result = onp.acos(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_acosh(type_a):
a = onp.array([1., 2., 3.], dtype=type_a)
expected = onp.array([0., 1.3169579, 1.76274717], dtype=type_a)
result = onp.acosh(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_default_axes_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
keepdims = True
expected = argmax_use_numpy(x, keepdims=keepdims)
result = onp.argmax(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_default_axes_keepdims_select_last_index(type_a):
x = np.array([[2, 2], [3, 10]], dtype=type_a)
keepdims = True
expected = argmax_use_numpy_select_last_index(x, keepdims=keepdims)
result = onp.argmax(onp.array(x), select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = True
expected = argmax_use_numpy(x, axis=axis, keepdims=keepdims)
result = onp.argmax(onp.array(x), axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = True
expected = argmax_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_negative_axis_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = -1
keepdims = True
expected = argmax_use_numpy(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_negative_axis_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = -1
keepdims = True
expected = argmax_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_no_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = False
expected = argmax_use_numpy(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_no_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = False
expected = argmax_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_default_axes_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
keepdims = True
expected = argmin_use_numpy(x, keepdims=keepdims)
result = onp.argmin(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_default_axes_keepdims_select_last_index(type_a):
x = np.array([[2, 2], [3, 10]], dtype=type_a)
keepdims = True
expected = argmin_use_numpy_select_last_index(x, keepdims=keepdims)
result = onp.argmin(onp.array(x), select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = True
expected = argmin_use_numpy(x, axis=axis, keepdims=keepdims)
result = onp.argmin(onp.array(x), axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = True
expected = argmin_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_negative_axis_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = -1
keepdims = True
expected = argmin_use_numpy(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_negative_axis_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = -1
keepdims = True
expected = argmin_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_no_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = False
expected = argmin_use_numpy(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_no_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = False
expected = argmin_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_asin(type_a):
a = onp.array([1., .2, .3], dtype=type_a)
expected = onp.array([1.57079633, 0.20135792, 0.30469265], dtype=type_a)
result = onp.asin(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_asinh(type_a):
a = onp.array([1., .2, .3], dtype=type_a)
expected = onp.array([0.88137359, 0.19869011, 0.29567305], dtype=type_a)
result = onp.asinh(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_atan(type_a):
a = onp.array([1., .2, .3], dtype=type_a)
expected = onp.array([0.78539816, 0.19739556, 0.29145679], dtype=type_a)
result = onp.atan(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_atanh(type_a):
a = onp.array([0., .2, .3], dtype=type_a)
expected = onp.array([0., 0.20273255, 0.3095196], dtype=type_a)
result = onp.atanh(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [*all_types])
@pytest.mark.parametrize("type_b", [*all_types])
def test_cast(type_a, type_b):
a = onp.array([0, 1, 2], dtype=type_a)
if is_bool(type_b) or is_bool(type_a):
expected = onp.array([0, 1, 1], dtype=type_b)
else:
expected = onp.array([0, 1, 2], dtype=type_b)
result = onp.cast(a, type_b)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_ceil(type_a):
a = onp.array([-1.5, 2.49, -3.99], dtype=type_a)
expected = onp.array([-1., 3., -3], dtype=type_a)
result = onp.ceil(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [*numeric_types])
def test_clip(type_a):
if type_a in [np.int16, np.int32, np.uint16, np.uint32]:
return
a = onp.array([0, 1, 2], dtype=type_a)
expected = onp.array([0, 1, 1], dtype=type_a)
result = onp.clip(a, minimum=0, maximum=1)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_cos(type_a):
a = onp.array([1, 2, 3], dtype=type_a)
expected = onp.array([0.54030231, -0.41614684, -0.9899925], dtype=type_a)
result = onp.cos(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_cosh(type_a):
a = onp.array([1, 2, 3], dtype=type_a)
expected = onp.array([1.54308063, 3.76219569, 10.067662], dtype=type_a)
result = onp.cosh(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_det(type_a):
a = onp.array([[1., 2.],
[3., 4.]], dtype=type_a)
expected = onp.array(-2, dtype=type_a)
result = onp.det(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_det_nd(type_a):
a = onp.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]],
[[1, 3], [3, 1]]], dtype=type_a)
expected = onp.array([-2., -3., -8.], dtype=type_a)
result = onp.det(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_erf(type_a):
a = onp.array([[1, 2, 3], [-1, -2, 0]], dtype=type_a)
expected = onp.array([[0.84270079, 0.99532227, 0.99997791],
[-0.84270079, -0.99532227, 0.]],
dtype=type_a)
result = onp.erf(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [*float_types])
def test_exp(type_a):
a = onp.array([-1, 0, 1], dtype=type_a)
expected = onp.array([0.36787945, 1., 2.71828175],
dtype=type_a)
result = onp.exp(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a",
[*float_types, np.uint64, np.int32, np.int64])
@pytest.mark.parametrize("type_b",
[*float_types, np.uint64, np.int32, np.int64])
def test_eyelike_populate_off_main_diagonal(type_a, type_b):
shape = (4, 5)
off_diagonal_offset = 1
if type_a in integer_types:
x = np.random.randint(0, 100, size=shape, dtype=type_a)
elif type_a in float_types:
x = np.random.randn(*shape).astype(type_a)
else:
raise ValueError(f"Invalid type {type_a}")
expected = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=type_b)
result = onp.eye_like(onp.array(x, dtype=type_a),
dtype=type_b, k=off_diagonal_offset)
assert result.dtype == type_b
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a",
[*float_types, np.uint64, np.int32, np.int64])
@pytest.mark.parametrize("type_b",
[*float_types, np.uint64, np.int32, np.int64])
def test_eyelike_with_dtype(type_a, type_b):
shape = (3, 4)
if type_a in integer_types:
x = np.random.randint(0, 100, size=shape, dtype=type_a)
elif type_a in float_types:
x = np.random.randn(*shape).astype(type_a)
else:
raise ValueError(f"Invalid type {type_a}")
expected = np.eye(shape[0], shape[1], dtype=type_b)
result = onp.eye_like(onp.array(x, dtype=type_a), dtype=type_b)
assert result.dtype == type_b
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a",
[*float_types, np.uint64, np.int32, np.int64])
def test_eyelike_without_dtype(type_a):
shape = (4, 4)
if type_a in integer_types:
x = np.random.randint(0, 100, size=shape, dtype=type_a)
elif type_a in float_types:
x = np.random.randn(*shape).astype(type_a)
else:
raise ValueError(f"Invalid type {type_a}")
expected = np.eye(shape[0], shape[1], dtype=type_a)
result = onp.eye_like(onp.array(x, dtype=type_a))
assert result.dtype == type_a
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a",
[*float_types, np.uint64, np.int32, np.int64])
def test_eyelike_with_3d_tensor(type_a):
shape = (4, 4, 1)
if type_a in integer_types:
x = np.random.randint(0, 100, size=shape, dtype=type_a)
elif type_a in float_types:
x = np.random.randn(*shape).astype(type_a)
else:
raise ValueError(f"Invalid type {type_a}")
with pytest.raises(ValueError):
_ = onp.eye_like(onp.array(x, dtype=type_a))
def test_eyelike_unsupported_type():
shape = (4, 4)
x = np.random.randint(0, 100, size=shape, dtype=np.int32)
with pytest.raises(TypeError):
_ = onp.eye_like(onp.array(x), dtype=np.str_)
@pytest.mark.parametrize("type_a", all_types)
def test_flatten(type_a):
shape = (2, 3, 4, 5)
a = np.random.random_sample(shape).astype(type_a)
for i in range(len(shape)):
new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)
expected = np.reshape(a, new_shape)
result = onp.flatten(onp.array(a, dtype=type_a), axis=i)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_flatten_negativate_axis(type_a):
shape = (2, 3, 4, 5)
a = np.random.random_sample(shape).astype(type_a)
for i in range(-len(shape), 0):
new_shape = (np.prod(shape[0:i]).astype(int), -1)
expected = np.reshape(a, new_shape)
result = onp.flatten(onp.array(a, dtype=type_a), axis=i)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_flatten_with_default_axis(type_a):
shape = (5, 4, 3, 2)
a = np.random.random_sample(shape).astype(type_a)
new_shape = (5, 24)
expected = np.reshape(a, new_shape)
result = onp.flatten(onp.array(a))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_floor(type_a):
x = np.random.randn(3, 4, 5).astype(type_a)
expected =
|
np.floor(x)
|
numpy.floor
|
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, EarlyStopping
from keras.utils import to_categorical
from sklearn.metrics import confusion_matrix
from PIL import Image
import numpy as np
from tqdm import tqdm
import os
import argparse
from data import Data
WIDTH = 120
HEIGHT = 169
def define_model():
# モデル構築
model = Sequential()
model.add(Conv2D(64, (10, 10), strides=(2, 2), padding='SAME', input_shape=(HEIGHT, WIDTH,3)))
model.add(MaxPooling2D((4, 4), strides=(2, 2), padding='VALID'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Conv2D(128, (5, 5), strides=(1, 1), padding='SAME'))
model.add(MaxPooling2D((2, 2), strides=(1, 1), padding='VALID'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(2, activation='softmax'))
return model
def train_op(model):
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
def train(args):
num_classes = 2
path_true = os.path.join(args.data_dir, 'favarit', 'train')
path_false = os.path.join(args.data_dir, 'not_favarit', 'train')
data_0_obj = Data(data_dir_path=path_true)
data_0 = data_0_obj.data_sets
label_0 = np.zeros(len(data_0))
data_1_obj = Data(data_dir_path=path_false)
data_1 = data_1_obj.data_sets
label_1 = np.ones(len(data_1))
data, labels = np.concatenate([data_0, data_1], axis=0), np.concatenate([label_0, label_1], axis=0)
X_train = data
Y_train = labels
path_true = os.path.join(args.data_dir, 'favarit', 'test')
path_false = os.path.join(args.data_dir, 'not_favarit', 'test')
data_0_obj = Data(data_dir_path=path_true)
data_0 = data_0_obj.data_sets
label_0 = np.zeros(len(data_0))
data_1_obj = Data(data_dir_path=path_false)
data_1 = data_1_obj.data_sets
label_1 = np.ones(len(data_1))
data, labels =
|
np.concatenate([data_0, data_1], axis=0)
|
numpy.concatenate
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import netket as nk
import numpy as np
from numpy.testing import assert_equal
from netket.utils import group
from itertools import product
from .. import common
pytestmark = common.skipif_mpi
# Tests for group.py and overrides in subclasses
planar_families = [group.planar.C, group.planar.D]
planars = [fn(n) for fn in planar_families for n in range(1, 9)] + [
group.planar.reflection_group(23),
group.planar.glide_group([0.5, 0]).replace(unit_cell=np.eye(2)),
]
planars_proper = [True] * 8 + [False] * 10
uniaxial_families = [group.axial.C, group.axial.Ch, group.axial.S]
uniaxials = [
fn(n, axis=
|
np.random.standard_normal(3)
|
numpy.random.standard_normal
|
"""
Solve the unique lowest-cost assignment problem using the
Hungarian algorithm (also known as Munkres algorithm).
"""
# Based on original code by <NAME>, adapted to numpy by <NAME>
# Copyright (c) 2008 <NAME> <<EMAIL>>, <NAME>
# Author: <NAME>, <NAME>
# LICENSE: BSD
import numpy as np
###############################################################################
# Object-oriented form of the algorithm
class _Hungarian(object):
"""Hungarian algorithm
Calculate the Munkres solution to the classical assignment problem.
Warning: this code is not following scikit-learn standards and will be
refactored.
"""
def compute(self, cost_matrix):
"""
Compute the indices for the lowest-cost pairings.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Does not have to be square.
Returns
-------
indices : 2D array of indices
The pairs of (row, col) indices in the original array giving
the original ordering.
"""
cost_matrix = np.atleast_2d(cost_matrix)
# If there are more rows (n) than columns (m), then the algorithm
# will not be able to work correctly. Therefore, we
# transpose the cost function when needed. Just have to
# remember to swap the result columns later in this function.
doTranspose = (cost_matrix.shape[1] < cost_matrix.shape[0])
if doTranspose:
self.C = (cost_matrix.T).copy()
else:
self.C = cost_matrix.copy()
# At this point, m >= n.
self.n = n = self.C.shape[0]
self.m = m = self.C.shape[1]
self.row_uncovered = np.ones(n, dtype=np.bool)
self.col_uncovered = np.ones(m, dtype=np.bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n+m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
done = False
step = 1
steps = {1: self._step1,
3: self._step3,
4: self._step4,
5: self._step5,
6: self._step6}
if m == 0 or n == 0:
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
done = True
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = np.array(np.where(self.marked == 1)).T
# We need to swap the columns because we originally
# did a transpose on the input cost matrix.
if doTranspose:
results = results[:, ::-1]
return results.tolist()
def _step1(self):
""" Steps 1 and 2 in the wikipedia page.
"""
# Step1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
self.C -= self.C.min(axis=1)[:, np.newaxis]
# Step2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(self.C == 0)):
if self.col_uncovered[j] and self.row_uncovered[i]:
self.marked[i, j] = 1
self.col_uncovered[j] = False
self.row_uncovered[i] = False
self._clear_covers()
return 3
def _step3(self):
"""
Cover each column containing a starred zero. If n columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (self.marked == 1)
self.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() >= self.n:
return 7 # done
else:
return 4
def _step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (self.C == 0).astype(np.int)
covered_C = C*self.row_uncovered[:, np.newaxis]
covered_C *= self.col_uncovered.astype(np.int)
n = self.n
m = self.m
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return 6
else:
self.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(self.marked[row] == 1)
if not self.marked[row, star_col] == 1:
# Could not find one
self.Z0_r = row
self.Z0_c = col
return 5
else:
col = star_col
self.row_uncovered[row] = False
self.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
self.row_uncovered.astype(np.int))
covered_C[row] = 0
def _step5(self):
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count, 0] = self.Z0_r
path[count, 1] = self.Z0_c
done = False
while not done:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(self.marked[:, path[count, 1]] == 1)
if not self.marked[row, path[count, 1]] == 1:
# Could not find one
done = True
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count-1, 1]
if not done:
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(self.marked[path[count, 0]] == 2)
if self.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count-1, 0]
path[count, 1] = col
# Convert paths
for i in range(count+1):
if self.marked[path[i, 0], path[i, 1]] == 1:
self.marked[path[i, 0], path[i, 1]] = 0
else:
self.marked[path[i, 0], path[i, 1]] = 1
self._clear_covers()
# Erase all prime markings
self.marked[self.marked == 2] = 0
return 3
def _step6(self):
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
# the smallest uncovered value in the matrix
if np.any(self.row_uncovered) and np.any(self.col_uncovered):
minval = np.min(self.C[self.row_uncovered], axis=0)
minval = np.min(minval[self.col_uncovered])
self.C[
|
np.logical_not(self.row_uncovered)
|
numpy.logical_not
|
import pathlib
import xarray as xr
import os.path as osp
import numpy as np
import astropy.units as au
import astropy.constants as ac
import pandas as pd
from scipy.integrate import simps, trapz
from scipy.interpolate import interp1d
from ..microphysics.dust_draine import DustDraine
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import Normalize, LogNorm
local = pathlib.Path(__file__).parent.absolute()
class SB99(object):
def __init__(self, basename='/projects/EOSTRIKE/SB99/Z1_M1E6/output/',
prefix='Z1_M1E6', logM=6.0, tmax_Myr=100.0):
"""
Parameters
----------
logM : float
Mass of a (coeval) cluster in log10 M/Msun
tmax_Myr : float
Maximum age in Myr
Returns
-------
df : pandas DataFrame
"""
self.logM = logM
self.tmax_Myr = tmax_Myr
self.basename = basename
self.fnames = dict()
self.fnames['snr'] = osp.join(self.basename, prefix + '.snr1')
self.fnames['power'] = osp.join(self.basename, prefix + '.power1')
self.fnames['spectrum'] = osp.join(self.basename, prefix + '.spectrum1')
# self.dfs = self.read_sn()
# self.dfw = self.read_wind()
# self.rr = self.read_rad()
def read_sn(self):
"""Function to read snr1 (supernova rate) output
"""
names = ['time', 'SN_rate', 'Edot_SN', 'Einj_SN', 'SN_rate_IB',
'Edot_SN_IB','Einj_SN_IB', 'Mpgen_typ', 'Mpgen_min',
'Edot_tot', 'Einj_tot']
df = pd.read_csv(self.fnames['snr'], names=names, skiprows=7, delimiter='\s+')
for c in df.columns:
if c == 'time' or c.startswith('Mpgen'):
continue
df[c] = 10.0**(df[c] - self.logM)
df = df.rename(columns={'time': 'time_yr'})
df['time_Myr'] = df['time_yr']*1e-6
# df['time'] = df['time_Myr']
# Move column
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
return df
def read_wind(self):
"""Function to read power1 (stellar wind power) output
"""
names = ['time','Edot_all','Edot_OB','Edot_RSG','Edot_LBV','Edot_WR','Einj_all',
'pdot_all','pdot_OB','pdot_RSG','pdot_LBV','pdot_WR']
df = pd.read_csv(self.fnames['power'], names=names, skiprows=7, delimiter='\s+')
for c in df.columns:
if c == 'time':
continue
df[c] = 10.0**(df[c] - self.logM)
df = df.rename(columns={'time': 'time_yr'})
df['time_Myr'] = df['time_yr']*1e-6
# Wind terminal velocity
for v in ('all', 'OB','RSG', 'LBV', 'WR'):
df['Vw_' + v] = (2.0*df['Edot_' + v]/df['pdot_' + v])/1e5
# Move column
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
return df
def read_rad(self):
"""Function to read SB99 spectrum data and mean dust opacity
"""
eV_cgs = (1.0*au.eV).cgs.value
hc_cgs = (ac.h*ac.c).cgs.value
Lsun_cgs = (ac.L_sun).cgs.value
d = DustDraine()
df_dust = d.dfa['Rv31']
f_Cext = interp1d(np.log10(df_dust['lwav']), np.log10(df_dust['Cext']))
f_Cabs = interp1d(np.log10(df_dust['lwav']), np.log10(df_dust['Cext']*(1.0 - df_dust['albedo'])))
df = pd.read_csv(self.fnames['spectrum'], skiprows=6, sep='\s+',
names=['time', 'wav', 'logf', 'logfstar', 'logfneb'])
df = df.rename(columns={'time': 'time_yr'})
df['time_Myr'] = df['time_yr']*1e-6
# Move column
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
dfg = df.groupby('time_yr')
ntime = len(dfg.groups.keys())
nwav = len(dfg.groups[list(dfg.groups.keys())[0]])
# Normalize by the cluster mass
time = np.empty(ntime)
wav = np.empty(nwav)
logf = np.empty((ntime, nwav), dtype=float)
# Luminosity
L_tot = []
L_LyC = [] # LyC
L_LW = [] # LW
L_PE = [] # PE
L_OPT = [] # Optical + IR
wav0 = 912.0
wav1 = 1108.0
wav2 = 2068.0
wav3 = 200000.0
for i, (time_, df_) in enumerate(dfg):
if time_*1e-6 > self.tmax_Myr:
continue
time[i] = time_
logf[i, :] = df_.logf
wav = df_.wav
idx0 = wav <= wav0
idx1 = np.logical_and(wav <= wav1, wav > wav0)
idx2 = np.logical_and(wav <= wav2, wav > wav1)
idx3 = np.logical_and(wav <= wav3, wav > wav2)
L_tot.append(simps(10.0**(df_.logf - self.logM), df_.wav)/Lsun_cgs)
L_LyC.append(simps(10.0**(df_.logf[idx0] - self.logM), df_.wav[idx0])/Lsun_cgs)
L_LW.append(simps(10.0**(df_.logf[idx1] - self.logM), df_.wav[idx1])/Lsun_cgs)
L_PE.append(simps(10.0**(df_.logf[idx2] - self.logM), df_.wav[idx2])/Lsun_cgs)
L_OPT.append(simps(10.0**(df_.logf[idx3] - self.logM), df_.wav[idx3])/Lsun_cgs)
# wavelength in micron
l = wav*1e-4
J = 10.0**df_['logf']
f_J = interp1d(np.log10(l), np.log10(J))
if i == 0:
w = dict()
w['LyC'] = np.logspace(np.log10(l.min()), np.log10(wav0*1e-4), 1000)
w['LW'] = np.logspace(np.log10(wav0*1e-4), np.log10(wav1*1e-4), 1000)
w['PE'] = np.logspace(np.log10(wav1*1e-4), np.log10(wav2*1e-4), 1000)
w['OPT'] = np.logspace(np.log10(wav2*1e-4), np.log10(1.0), 1000)
Cext = dict()
Cabs = dict()
hnu = dict()
for k in w.keys():
Cext[k] = []
Cabs[k] = []
hnu[k] = []
for k in w.keys():
Cext[k].append(simps(10.0**f_Cext(np.log10(w[k]))*10.0**f_J(np.log10(w[k]))*w[k], w[k])/ \
simps(10.0**f_J(np.log10(w[k]))*w[k], w[k]))
Cabs[k].append(simps(10.0**f_Cabs(np.log10(w[k]))*10.0**f_J(np.log10(w[k]))*w[k], w[k])/ \
simps(10.0**f_J(np.log10(w[k]))*w[k], w[k]))
hnu[k].append(simps(10.0**f_J(np.log10(w[k])), w[k])/ \
simps(10.0**f_J(np.log10(w[k]))*w[k], w[k]))
if i == 0:
w = dict()
w['LyC'] = np.logspace(np.log10(l.min()), np.log10(wav0*1e-4), 1000)
w['LW'] = np.logspace(np.log10(wav0*1e-4), np.log10(wav1*1e-4), 1000)
w['PE'] = np.logspace(np.log10(wav1*1e-4), np.log10(wav2*1e-4), 1000)
w['OPT'] = np.logspace(np.log10(wav2*1e-4), np.log10(wav3*1e-4), 1000)
L_tot = np.array(L_tot)
L_LyC = np.array(L_LyC)
L_LW = np.array(L_LW)
L_PE = np.array(L_PE)
L_OPT = np.array(L_OPT)
L_UV = L_LyC + L_PE + L_LW
L_FUV = L_PE + L_LW
time_Myr = time*1e-6
L = dict()
L['tot'] = np.array(L_tot)
L['LyC'] = np.array(L_LyC)
L['LW'] = np.array(L_LW)
L['PE'] = np.array(L_PE)
L['OPT'] = np.array(L_OPT)
L['UV'] = L['LyC'] + L['LW'] + L['PE']
L['FUV'] = L['LW'] + L['PE']
# Momentum injection rate (Msun km/s / Myr / Msun)
pdot = dict()
for v in ('tot', 'LyC', 'LW', 'PE', 'OPT', 'UV', 'FUV'):
pdot[v] = (((L[v]*au.L_sun/ac.c).to('g cm s-2')).to('Msun km s-1 Myr-1')).value
# Luminosity-weighted effective timescale
# (e-folding timescale if L is decaying exponentially)
tdecay_lum = dict()
for k in L.keys():
tdecay_lum[k] = trapz(L[k]*time_Myr, time_Myr)/trapz(L[k], time_Myr)
Cext_mean = dict()
Cabs_mean = dict()
hnu_mean = dict()
# Luminosity-weighted average cross-section, photon energy
for k in Cext.keys():
Cext_mean[k] = np.average(Cext[k], weights=L[k])
Cabs_mean[k] = np.average(Cabs[k], weights=L[k])
hnu_mean[k] = np.average(hnu[k], weights=L[k])
r = dict(df=df, df_dust=df_dust,
time_yr=time, time_Myr=time_Myr,
wav=wav, logf=logf, logM=self.logM,
L=L, pdot=pdot, tdecay_lum=tdecay_lum,
wav0=wav0, wav1=wav1, wav2=wav2, wav3=wav3,
Cabs=Cabs, Cext=Cext, hnu=hnu,
Cabs_mean=Cabs_mean, Cext_mean=Cext_mean, hnu_mean=hnu_mean)
return r
@staticmethod
def plt_spec_sigmad(rr, lambda_Llambda=False, plt_isrf=True, tmax=50.0, nstride=10):
"""Function to plot SB99 spectrum
Parameters
----------
lambda_Llambda : bool
Plot lambda_Llambda instead of Llambda
"""
if plt_isrf:
fig, axes = plt.subplots(3, 2, figsize=(12, 12),
gridspec_kw=dict(width_ratios=(0.98,0.02),
height_ratios=(1/3.0,1/3.0,1/3.0),
wspace=0.05, hspace=0.11),)
else:
fig, axes = plt.subplots(2, 2, figsize=(12, 9),
gridspec_kw=dict(width_ratios=(0.98,0.02),
height_ratios=(0.65,0.35),
wspace=0.05, hspace=0.01),)
# Dust opacity
irow = 0
plt.sca(axes[irow,0])
plt.tick_params(right=False, which='both', axis='y')
from pyathena.microphysics import dust_draine
muH = (1.4*au.u).cgs.value
d = dust_draine.DustDraine()
df = d.dfa['Rv31']
plt.semilogy(df.lwav*1e4, df.Cext/muH, c='k', label='ext')
plt.semilogy(df.lwav*1e4, df.kappa_abs, c='k', ls='--', label='abs')
plt.xlim(1e2,2068)
plt.ylim(1e2,2.5e3)
plt.ylabel(r'$\kappa_{\rm d}\;[{\rm cm}^2\,{\rm g}^{-1}]$')
plt.legend()
def kappa2sigma(x):
return x*muH
def sigma2kappa(x):
return x/muH
sax1 = plt.gca().secondary_yaxis('right', functions=(kappa2sigma,sigma2kappa))
sax1.set_ylabel(r'$\sigma_{\rm d}\;[{\rm cm}^2\,{\rm H}^{-1}]$')
# axes[1,0].tick_params(right=False, labelright=False)
def l_to_hnu(x):
return ac.h.cgs.value*ac.c.cgs.value/1e-8/(1.0*au.eV).cgs.value/x
def hnu_to_l(x):
return 1.0/(ac.h.cgs.value*ac.c.cgs.value/1e-8/(1.0*au.eV).cgs.value)/x
axes[irow,0].tick_params(top=False, labeltop=False)
sax0 = plt.gca().secondary_xaxis('top', functions=(l_to_hnu,hnu_to_l))
tick_loc = np.array([6.0, 11.2, 13.6, 50.0])
def ftick(x):
return ["%.1f" % z for z in x]
# secax.set_xlim(ax1.get_xlim())
sax0.set_xticks(tick_loc)
sax0.set_xticklabels(ftick(tick_loc))
sax0.set_xlabel(r'$h\nu\;[{\rm eV}]$', labelpad=10)
# secax.tick_params(axis='x', which='major', pad=15)
ytext = 1.8e3
plt.annotate('LyC', ((912+plt.gca().get_xlim()[0])*0.5,ytext),
xycoords='data', ha='center')
plt.annotate('LW', ((912+1108)*0.5,ytext), xycoords='data', ha='center')
plt.annotate('PE', ((1108+2068)*0.5,ytext), xycoords='data', ha='center')
plt.sca(axes[irow,1])
plt.axis('off')
irow += 1
plt.sca(axes[irow,0])
norm = mpl.colors.Normalize(0.0, tmax)
cmap = mpl.cm.jet_r
dfg = rr['df'].groupby('time_Myr')
logM = rr['logM']
for i, (time_, df_) in enumerate(dfg):
if time_ > tmax:
continue
if i % nstride == 0:
print('{0:.1f}'.format(time_), end=' ')
if lambda_Llambda:
plt.plot(df_.wav, df_.wav*10.0**(df_.logf - logM),
c=cmap(norm(time_)))#, marker='o', ms=3)
else:
plt.plot(df_.wav, 10.0**(df_.logf - logM),
c=cmap(norm(time_)))#, marker='o', ms=3)
plt.xlim(100, 2068)
if lambda_Llambda:
plt.ylim(1e31, 1e38)
else:
plt.ylim(1e28, 1e35)
plt.yscale('log')
plt.ylabel(r'$L_{\lambda}/M_{\ast}\;[{\rm erg}\,{\rm s}^{-1}\,\AA^{-1}\,M_{\odot}^{-1}]$')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=Normalize(0, tmax))
plt.colorbar(sm, cax=axes[irow,1], label=r'$t_{\rm age}\;[{\rm Myr}]$')
for ax in axes[:,0]:
plt.sca(ax)
plt.axvspan(0,912, color='grey', ymin=0, alpha=0.1)
plt.axvspan(912,1108, color='grey', ymin=0, alpha=0.15)
plt.axvspan(1108,2068, color='grey', ymin=0, alpha=0.2)
plt.xlabel(r'$\lambda\;[\AA]$')
plt.xlim(1e2,2068)
if plt_isrf:
irow += 1
plt.sca(axes[irow,1])
plt.axis('off')
plt_nuJnu_mid_plane_parallel(ax=axes[irow,0])
return fig
@staticmethod
def plt_lum_evol(ax, rr, rw, rs, plt_sn=False):
#pa.set_plt_fancy()
plt.sca(ax)
plt.plot(rr['time_Myr'], rr['L']['tot'], label=r'Bolometric', c='k')
plt.plot(rr['time_Myr'], rr['L']['UV'], label=r'${\rm LyC+FUV}\;(<2068\,{\rm \AA})$', c='k', ls='--')
plt.plot(rr['time_Myr'], rr['L']['LyC'], label=r'${\rm LyC}\;(<912\,{\rm \AA})$', c='C0')
plt.plot(rr['time_Myr'], rr['L']['LW'], label=r'${\rm LW}\;(912$-$1108\,{\rm \AA})$', c='C1')
plt.plot(rr['time_Myr'], rr['L']['PE'], label=r'${\rm PE}\;(1108$-$2068\,{\rm \AA})$', c='C2')
plt.plot(rr['time_Myr'], rr['L']['OPT'], label=r'${\rm OPT}\;(2068$-$10000\,{\rm \AA})$', c='C3')
plt.plot(rw['time_Myr'], rw['Edot_all']/(1.0*au.L_sun).cgs.value, c='C7',
label=r'$L_{\rm w}/M_{\ast}$')
if plt_sn:
plt.plot(rs['time_Myr'], rs['Edot_SN']/(1.0*au.L_sun).cgs.value, c='C8',
label=r'$L_{\rm sn}/M_{\ast}$')
plt.yscale('log')
plt.xlim(0, 20)
plt.ylim(1e-1,2e3)
plt.xlabel(r'$t_{\rm age}\;[{\rm Myr}]$')
#plt.ylabel(r'$\Psi\,{\rm and}\,\Psi_w \;[L_{\odot}\,M_{\odot}^{-1}]$')
plt.ylabel(r'$L/M_{\ast} \;[L_{\odot}\,M_{\odot}^{-1}]$')
plt.legend(fontsize='small', loc=4)
return ax
@staticmethod
def plt_pdot_evol(ax, rr, rw, rs):
plt.sca(ax)
plt.plot(rr['time_Myr'], (rr['L']['tot']*au.L_sun/ac.c/au.M_sun).to('km s-1 Myr-1'),
label=r'Bolometric', c='k')
plt.plot(rr['time_Myr'], (rr['L']['LyC']*au.L_sun/ac.c/au.M_sun).to('km s-1 Myr-1'),
label=r'${\rm LyC}\;(<912\,{\rm \AA})$', c='C0', ls='-')
plt.plot(rr['time_Myr'], (rr['L']['UV']*au.L_sun/ac.c/au.M_sun).to('km s-1 Myr-1'),
label=r'${\rm LyC+FUV}\;(<2068\,{\rm \AA})$', c='k', ls='--')
plt.plot(rw['time_Myr'], (rw['pdot_all'].values*au.dyne/au.M_sun).to('km s-1 Myr-1'),
label=r'$\dot{p}_{\rm wind}/M_{\ast}$', c='C7')
plt.xlim(0,20)
plt.ylim(1e-1,5e1)
plt.yscale('log')
plt.xlabel(r'$t_{\rm age}\;[{\rm Myr}]$')
plt.ylabel(r'$\dot{p}/M_{\ast} \;[{\rm km}\,{\rm s}^{-1}\,{\rm Myr}^{-1}]$')
#plt.legend()
return ax
@staticmethod
def plt_lum_cumul(ax, rr, rw, rs, normed=True, plt_sn=False):
from scipy.integrate import cumulative_trapezoid
integrate_L_cum = lambda L, t: cumulative_trapezoid((L*au.L_sun).cgs.value,
(t*au.yr).cgs.value, initial=0.0)
L_tot_cum = integrate_L_cum(rr['L']['tot'], rr['time_yr'])
L_UV_cum = integrate_L_cum(rr['L']['UV'], rr['time_yr'])
if normed:
norm = L_tot_cum
else:
norm = 1.0
plt.sca(ax)
plt.plot(rr['time_Myr'], integrate_L_cum(rr['L']['LyC'], rr['time_yr'])/norm,
label='LyC', c='C0')
plt.plot(rr['time_Myr'], integrate_L_cum(rr['L']['LW'], rr['time_yr'])/norm,
label='LW', c='C1')
plt.plot(rr['time_Myr'], integrate_L_cum(rr['L']['PE'], rr['time_yr'])/norm,
label='PE', c='C2')
plt.plot(rr['time_Myr'], integrate_L_cum(rr['L']['UV'], rr['time_yr'])/norm,
label=r'${\rm LyC+FUV}\;(<2068\,{\rm \AA})$', c='k', ls='--')
plt.plot(rr['time_Myr'], integrate_L_cum(rr['L']['tot'], rr['time_yr'])/norm,
label='Bolometric', c='k')
plt.plot(rw['time_Myr'], rw['Einj_all'], c='C7', label=r'$L_{\rm w}$')
if plt_sn:
plt.plot(rs['time_Myr'], rs['Einj_SN'], c='C8', label=r'$L_{\rm sn}$')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$t_{\rm age}\;[{\rm Myr}]$')
if not normed:
plt.ylabel(r'$\int (L/M_{\ast}) dt\;[{\rm erg}\,M_{\odot}^{-1}]$')
else:
plt.ylabel(r'$\int L dt/ \int L_{\rm UV}dt$')
plt.xlim(3e-1, 5e1)
plt.ylim(1e47, 2e51)
# plt.legend()
return ax
@staticmethod
def plt_pdot_cumul(ax, rr, rw, rs, normed=True, plt_sn=False):
from scipy.integrate import cumulative_trapezoid
integrate_pdot = lambda pdot, t: cumulative_trapezoid(
pdot, t*au.Myr, initial=0.0)
pdot_tot_cum = integrate_pdot(rr['L']['tot'], rr['time_Myr'])
if normed:
norm = pdot_tot_cum
else:
norm = 1.0
plt.sca(ax)
plt.plot(rr['time_Myr'], integrate_pdot(rr['pdot']['LyC'], rr['time_Myr'])/norm,
label='LyC', c='C0')
# Skip PE and LW
# plt.plot(rr['time_Myr'], integrate_pdot(rr['pdot']['LW'], rr['time_Myr'])/norm,
# label='LW', c='C1')
# plt.plot(rr['time_Myr'], integrate_pdot(rr['pdot']['PE'], rr['time_Myr'])/norm,
# label='PE', c='C2')
plt.plot(rr['time_Myr'], integrate_pdot(rr['pdot']['UV'], rr['time_Myr'])/norm,
label=r'${\rm LyC+FUV}\;(<2068\,{\rm \AA})$', c='k', ls='--')
plt.plot(rr['time_Myr'], integrate_pdot(rr['pdot']['tot'], rr['time_Myr'])/norm,
label='Bolometric', c='k')
# from cgs to astro units
pdot_conv = (1.0*au.g*au.cm/au.s**2).to('Msun km s-1 Myr-1')
plt.plot(rw['time_Myr'], integrate_pdot(rw['pdot_all']*pdot_conv,
rw['time_Myr'])/norm,
c='C7', label=r'$L_{\rm w}$')
# if plt_sn:
# plt.plot(rs['time_Myr'], rs['Einj_SN'], c='C8', label=r'$L_{\rm sn}$')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$t_{\rm age}\;[{\rm Myr}]$')
if not normed:
plt.ylabel(r'$\int (\dot{p}/M_{\ast}) dt\;[{\rm km}\,{\rm s}^{-1}]$')
else:
plt.ylabel(r'$\int \dot{p} dt/ \int \dot{p}_{\rm UV} dt$')
plt.xlim(3e-1, 5e1)
plt.ylim(5e-1,5e2)
# plt.legend()
return ax
@staticmethod
def plt_Edot_pdot_evol_cumul(rr, rw, rs, plt_sn=True, normed=False):
fig, axes = plt.subplots(2,2,figsize=(12, 10), constrained_layout=True,
gridspec_kw=dict(height_ratios=[0.5,0.5]))
axes = axes.flatten()
SB99.plt_lum_evol(axes[0], rr, rw, rs, plt_sn=plt_sn)
SB99.plt_pdot_evol(axes[1], rr, rw, rs)
SB99.plt_lum_cumul(axes[2], rr, rw, rs, normed=normed, plt_sn=plt_sn)
SB99.plt_pdot_cumul(axes[3], rr, rw, rs, normed=normed, plt_sn=plt_sn)
for ax in axes:
ax.grid()
#ax.set_xlim(0,50)
#ax.set_xscale('linear')
return fig
def plt_nuJnu_mid_plane_parallel(ax, Sigma_gas=10.0*au.M_sun/au.pc**2, plt_dr78=True):
sb2 = SB99('/projects/EOSTRIKE/SB99/Z1_SFR1/output/', prefix='Z1_SFR1', logM=0.0)
rr = sb2.read_rad()
w = rr['wav'].values*1e-4
d = DustDraine()
dfdr = d.dfa['Rv31']
f_Cext = interp1d(np.log10(dfdr['lwav']), np.log10(dfdr['Cext']),
bounds_error=False)
f_Cabs = interp1d(np.log10(dfdr['lwav']), np.log10(dfdr['Cext']*(1.0 - dfdr['albedo'])),
bounds_error=False)
Sigma_SFR = 2.5e-3
Llambda = Sigma_SFR*10.0**rr['logf'][-1,:]*au.erg/au.s/au.angstrom
Sigma = 10.0*au.M_sun/au.pc**2
area = (1.0*au.kpc)**2
muH = 1.4*au.u
kappa_dust_ext = (10.0**f_Cext(np.log10(w))*au.cm**2/au.u).cgs
kappa_dust_abs = (10.0**f_Cabs(np.log10(w))*au.cm**2/au.u).cgs
tau_perp = (Sigma*kappa_dust_abs).to('').value
from scipy.special import expn
# Intensity at the midplane (see Ostriker et al. 2010)
Jlambda = (Llambda/area/(4.0*np.pi*tau_perp)*
(1.0 - expn(2, 0.5*tau_perp))).to('erg s-1 cm-2 angstrom-1')
# Naive estimation without attenuation
Jlambda0 = (Llambda/area/4.0).to('erg s-1 cm-2 angstrom-1')
plt.sca(ax)
l, = plt.loglog(rr['wav'], #rr['wav']*
Jlambda, label=r'SB99 + Ostriker et al. (2010)')
plt.loglog(rr['wav'], #rr['wav']*
Jlambda0, c=l.get_color(), alpha=0.5, ls='--', label=r'')
if plt_dr78:
from pyathena.util import rad_isrf
wav2 = np.logspace(np.log10(912),
|
np.log10(2068)
|
numpy.log10
|
import os
import numpy as np
import pandas as pd
from xgboost import XGBRegressor
import lightgbm as lgb
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor,ExtraTreesRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.linear_model import LinearRegression,BayesianRidge
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler, MinMaxScaler,PolynomialFeatures
import warnings
warnings.filterwarnings("ignore")
def rmse(y_true,y_pred):
return mean_squared_error(y_true=y_true,y_pred=y_pred) ** 0.5
params_dict = {
'random_forest':{
'n_estimators':range(10,150,10),
'criterion':['mse']
},
'extra_trees':{
'n_estimators':range(10,150,10),
'criterion':['mse']
},
'bagging':{
'n_estimators':range(10,150,10),
},
'mlp':{
'alpha':[0.01,0.001,0.1],
'hidden_layer_sizes':[(10,10)],
'solver':['lbfgs'],
'activation':['identity'],
'learning_rate':['constant']
},
# 'xgboost':{
# 'n_estimators':range(100,150,5),
# 'max_depth':range(10,15,2),
# 'learning_rate':np.linspace(0.05,0.1,2),
# 'subsample':np.linspace(0.7,0.9,2)
# }
'xgboost':{
'n_estimators':[10000],
'max_depth':[15],
'learning_rate':[0.05],
'subsample':[0.8]
},
'lgb':{
'num_leaves': [60],
'min_data_in_leaf': [30],
'learning_rate': [0.001],
"min_child_samples": [30],
"feature_fraction": [0.9],
},
'lr':{
'normalize':[False]
},
'br':{
'n_iter':[3000]
},
'poly':{
'fit_intercept':[False]
}
}
class ML_Classifier(object):
'''
Machine Learning Classifier for the classification
Args:
- clf_name, string, __all__ = ['lasso','knn','svm','decision tree','random forest','extra trees','bagging','mlp','xgboost']
- params_dict, dict, parameters setting of the specified classifer
'''
def __init__(self,clf_name=None,params=None):
super(ML_Classifier,self).__init__()
self.clf_name = clf_name
self.params = params
self.clf = self._get_clf()
def trainer(self,train_df,target_key,random_state=21,metric=None,k_fold=5,scale_factor=None,pred_flag=False,test_df=None,test_csv=None,save_path=None):
params = self.params
fea_list= [f for f in train_df.columns if f != target_key]
print('feature list:',fea_list)
Y = np.asarray(train_df[target_key])
X = np.asarray(train_df[fea_list])
test = np.asarray(test_df[fea_list])
kfold = KFold(n_splits=k_fold,shuffle=True,random_state=random_state)
if self.clf_name == 'poly':
# poly = PolynomialFeatures(interaction_only=True)
poly = PolynomialFeatures()
X = poly.fit_transform(X)
test = poly.fit_transform(test)
print(X)
print(test)
print(X.shape,test.shape)
predictions = []
for fold_num,(train_index,val_index) in enumerate(kfold.split(X)):
print(f'***********fold {fold_num+1} start!!***********')
x_train, x_val = X[train_index], X[val_index]
y_train, y_val = Y[train_index], Y[val_index]
# print(x_val,y_val)
model = GridSearchCV(estimator=self.clf,
param_grid=params,
cv=kfold,
scoring=metric,
refit='neg_rmse',
verbose=True,
return_train_score=True)
model = model.fit(x_train,y_train)
best_score = -1.0*model.best_score_
best_model = model.best_estimator_
train_pred = model.predict(x_train)
train_score = rmse(y_train,train_pred)
test_pred = model.predict(x_val)
test_score = rmse(y_val,test_pred)
print("MSE Evaluation:")
print("fold {} Best score:{}".format(fold_num + 1,best_score))
print("fold {} Train score:{}".format(fold_num + 1,train_score))
print("fold {} Test score:{}".format(fold_num + 1,test_score))
print("fold {} Best parameter:\n".format(fold_num + 1))
for key in params.keys():
print('%s:'%key)
print(best_model.get_params()[key])
if self.clf_name == 'random_forest' or self.clf_name == 'extra_trees':
if self.clf_name == 'random_forest':
new_grid = RandomForestRegressor(random_state=0,bootstrap=True)
elif self.clf_name == 'extra_trees':
new_grid = ExtraTreesRegressor(random_state=0,bootstrap=True)
new_grid.set_params(**model.best_params_)
new_grid = new_grid.fit(x_train,y_train)
importances = new_grid.feature_importances_
feat_labels = fea_list
# print(feat_labels)
indices =
|
np.argsort(importances)
|
numpy.argsort
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import os.path
import shutil
from typing import Any, Dict, Iterable, List, Mapping, Sequence, Tuple
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tree
from tensorflow_federated.python.program import file_release_manager
from tensorflow_federated.python.program import file_utils
from tensorflow_federated.python.program import test_utils
def _read_values_from_csv(
file_path: os.PathLike) -> Tuple[List[str], List[Dict[str, Any]]]:
with tf.io.gfile.GFile(file_path, 'r') as file:
reader = csv.DictReader(file)
fieldnames = list(reader.fieldnames)
values = list(reader)
return fieldnames, values
def _write_values_to_csv(file_path: os.PathLike, fieldnames: Sequence[str],
values: Iterable[Mapping[str, Any]]):
with tf.io.gfile.GFile(file_path, 'w') as file:
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for value in values:
writer.writerow(value)
class CSVFileReleaseManagerInitTest(parameterized.TestCase):
def test_creates_file_path(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
self.assertFalse(os.path.exists(temp_file))
file_release_manager.CSVFileReleaseManager(file_path=temp_file)
self.assertTrue(os.path.exists(temp_file))
def test_creates_file_dir(self):
temp_dir = self.create_tempdir()
shutil.rmtree(temp_dir)
self.assertFalse(os.path.exists(temp_dir))
temp_file = os.path.join(temp_dir, 'a')
file_release_manager.CSVFileReleaseManager(file_path=temp_file)
self.assertTrue(os.path.exists(temp_file))
def test_initializes_with_empty_file(self):
temp_file = self.create_tempfile()
_write_values_to_csv(file_path=temp_file, fieldnames=['key'], values=[])
self.assertTrue(os.path.exists(temp_file))
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
self.assertIsNone(release_mngr._latest_key)
def test_initializes_with_existing_file(self):
temp_file = self.create_tempfile()
_write_values_to_csv(
file_path=temp_file,
fieldnames=['key', 'a', 'b'],
values=[{
'key': 1,
'a': 10,
'b': 20
}])
self.assertTrue(os.path.exists(temp_file))
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
self.assertEqual(release_mngr._latest_key, 1)
def test_does_not_raise_type_error_with_file_path_str(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
try:
file_release_manager.CSVFileReleaseManager(file_path=temp_file)
except TypeError:
self.fail('Raised TypeError unexpectedly.')
def test_does_not_raise_type_error_with_file_path_path_like(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
try:
file_release_manager.CSVFileReleaseManager(file_path=temp_file)
except TypeError:
self.fail('Raised TypeError unexpectedly.')
@parameterized.named_parameters(
('none', None),
('bool', True),
('int', 1),
('list', []),
)
def test_raises_type_error_with_file_path(self, file_path):
with self.assertRaises(TypeError):
file_release_manager.CSVFileReleaseManager(file_path=file_path)
def test_raises_value_error_with_file_path_empty(self):
with self.assertRaises(ValueError):
file_release_manager.CSVFileReleaseManager(file_path='')
def test_does_not_raise_type_error_with_save_mode(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
try:
file_release_manager.CSVFileReleaseManager(
file_path=temp_file,
save_mode=file_release_manager.CSVSaveMode.APPEND)
except TypeError:
self.fail('Raised TypeError unexpectedly.')
@parameterized.named_parameters(
('none', None),
('bool', True),
('int', 1),
('str', 'a'),
('list', []),
)
def test_raises_type_error_with_save_mode(self, save_mode):
temp_file = self.create_tempfile()
os.remove(temp_file)
with self.assertRaises(TypeError):
file_release_manager.CSVFileReleaseManager(
file_path=temp_file, save_mode=save_mode)
def test_does_not_raise_type_error_with_key_fieldname(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
try:
file_release_manager.CSVFileReleaseManager(
file_path=temp_file, key_fieldname='z')
except TypeError:
self.fail('Raised TypeError unexpectedly.')
@parameterized.named_parameters(
('none', None),
('bool', True),
('int', 1),
('list', []),
)
def test_raises_type_error_with_key_fieldname(self, key_fieldname):
temp_file = self.create_tempfile()
os.remove(temp_file)
with self.assertRaises(TypeError):
file_release_manager.CSVFileReleaseManager(
file_path=temp_file, key_fieldname=key_fieldname)
def test_raises_value_error_with_key_fieldname_empty(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
with self.assertRaises(ValueError):
file_release_manager.CSVFileReleaseManager(
file_path=temp_file, key_fieldname='')
def test_raises_incompatible_file_error_with_unknown_key_fieldname(self):
temp_file = self.create_tempfile()
_write_values_to_csv(
file_path=temp_file,
fieldnames=['z', 'a', 'b'],
values=[{
'z': 1,
'a': 10,
'b': 20
}])
with self.assertRaises(
file_release_manager.FileReleaseManagerIncompatibleFileError):
file_release_manager.CSVFileReleaseManager(file_path=temp_file)
def test_raises_incompatible_file_error_with_unknown_file(self):
temp_file = self.create_tempfile()
with self.assertRaises(
file_release_manager.FileReleaseManagerIncompatibleFileError):
file_release_manager.CSVFileReleaseManager(file_path=temp_file)
class CSVFileReleaseManagerReadValuesTest(parameterized.TestCase):
def test_returns_values_from_empty_file(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
fieldnames, values = release_mngr._read_values()
self.assertEqual(fieldnames, ['key'])
self.assertEqual(values, [])
# pyformat: disable
@parameterized.named_parameters(
('no_values', ['key', 'a', 'b'], []),
('one_value', ['key', 'a', 'b'], [{'key': 1, 'a': 10, 'b': 20}]),
('two_values', ['key', 'a', 'b'],
[{'key': 1, 'a': 10, 'b': 20},
{'key': 1, 'a': 11, 'b': 21}]),
)
# pyformat: enable
def test_returns_values_from_existing_file(self, fieldnames, values):
temp_file = self.create_tempfile()
_write_values_to_csv(
file_path=temp_file, fieldnames=fieldnames, values=values)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
actual_fieldnames, actual_values = release_mngr._read_values()
self.assertEqual(actual_fieldnames, fieldnames)
expected_values = tree.map_structure(str, values)
self.assertEqual(actual_values, expected_values)
class CSVFileReleaseManagerWriteValuesTest(parameterized.TestCase):
# pyformat: disable
@parameterized.named_parameters(
('no_values', ['key', 'a', 'b'], []),
('one_value', ['key', 'a', 'b'], [{'key': 1, 'a': 10, 'b': 20}]),
('two_values', ['key', 'a', 'b'],
[{'key': 1, 'a': 10, 'b': 20},
{'key': 1, 'a': 11, 'b': 21}]),
)
# pyformat: enable
def test_writes_values_to_empty_file(self, fieldnames, values):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
release_mngr._write_values(fieldnames=fieldnames, values=values)
actual_fieldnames, actual_values = _read_values_from_csv(temp_file)
self.assertEqual(actual_fieldnames, fieldnames)
expected_values = tree.map_structure(str, values)
self.assertEqual(actual_values, expected_values)
# pyformat: disable
@parameterized.named_parameters(
('no_values', ['key', 'a', 'b'], []),
('one_value', ['key', 'a', 'b'], [{'key': 1, 'a': 10, 'b': 20}]),
('two_values', ['key', 'a', 'b'],
[{'key': 1, 'a': 10, 'b': 20},
{'key': 1, 'a': 11, 'b': 21}]),
)
# pyformat: enable
def test_writes_values_to_existing_file(self, fieldnames, values):
temp_file = self.create_tempfile()
_write_values_to_csv(
file_path=temp_file,
fieldnames=['key', 'a', 'b'],
values=[{
'key': 1,
'a': 10,
'b': 20
}])
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
release_mngr._write_values(fieldnames=fieldnames, values=values)
actual_fieldnames, actual_values = _read_values_from_csv(temp_file)
self.assertEqual(actual_fieldnames, fieldnames)
expected_values = tree.map_structure(str, values)
self.assertEqual(actual_values, expected_values)
class CSVFileReleaseManagerWriteValueTest(parameterized.TestCase):
# pyformat: disable
@parameterized.named_parameters(
('empty', {}),
('more_fields', {'a': 11, 'b': 21, 'c': 31}),
)
# pyformat: enable
def test_writes_value_to_empty_file(self, value):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file, save_mode=file_release_manager.CSVSaveMode.WRITE)
release_mngr._write_value(value)
actual_fieldnames, actual_values = _read_values_from_csv(temp_file)
expected_fieldnames = ['key']
expected_fieldnames.extend(
[x for x in value.keys() if x not in expected_fieldnames])
self.assertEqual(actual_fieldnames, expected_fieldnames)
expected_value = {name: '' for name in expected_fieldnames}
expected_value.update(value)
expected_values = [expected_value]
expected_values = tree.map_structure(str, expected_values)
self.assertEqual(actual_values, expected_values)
# pyformat: disable
@parameterized.named_parameters(
('empty', {}),
('same_fields', {'a': 11, 'b': 21}),
('less_fields', {'a': 11}),
('more_fields', {'a': 11, 'b': 21, 'c': 31}),
)
# pyformat: enable
def test_writes_value_to_existing_file(self, value):
temp_file = self.create_tempfile()
existing_fieldnames = ['key', 'a', 'b']
existing_value = {'key': 1, 'a': 10, 'b': 20}
_write_values_to_csv(
file_path=temp_file,
fieldnames=existing_fieldnames,
values=[existing_value])
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file, save_mode=file_release_manager.CSVSaveMode.WRITE)
release_mngr._write_value(value)
actual_fieldnames, actual_values = _read_values_from_csv(temp_file)
expected_fieldnames = existing_fieldnames.copy()
expected_fieldnames.extend(
[x for x in value.keys() if x not in expected_fieldnames])
self.assertEqual(actual_fieldnames, expected_fieldnames)
expected_value1 = {name: '' for name in expected_fieldnames}
expected_value1.update(existing_value)
expected_value2 = {name: '' for name in expected_fieldnames}
expected_value2.update(value)
expected_values = [expected_value1, expected_value2]
expected_values = tree.map_structure(str, expected_values)
self.assertEqual(actual_values, expected_values)
class CSVFileReleaseManagerAppendValueTest(parameterized.TestCase):
# pyformat: disable
@parameterized.named_parameters(
('empty', {}),
('more_fields', {'a': 11, 'b': 21, 'c': 31}),
)
# pyformat: enable
def test_appends_value_to_empty_file(self, value):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file, save_mode=file_release_manager.CSVSaveMode.APPEND)
release_mngr._append_value(value)
actual_fieldnames, actual_values = _read_values_from_csv(temp_file)
expected_fieldnames = ['key']
expected_fieldnames.extend(
[x for x in value.keys() if x not in expected_fieldnames])
self.assertEqual(actual_fieldnames, expected_fieldnames)
expected_value = {name: '' for name in expected_fieldnames}
expected_value.update(value)
expected_values = [expected_value]
expected_values = tree.map_structure(str, expected_values)
self.assertEqual(actual_values, expected_values)
# pyformat: disable
@parameterized.named_parameters(
('empty', {}),
('same_fields', {'a': 11, 'b': 21}),
('less_fields', {'a': 11}),
('more_fields', {'a': 11, 'b': 21, 'c': 31}),
)
# pyformat: enable
def test_appends_value_to_existing_file(self, value):
temp_file = self.create_tempfile()
existing_fieldnames = ['key', 'a', 'b']
existing_value = {'key': 1, 'a': 10, 'b': 20}
_write_values_to_csv(
file_path=temp_file,
fieldnames=existing_fieldnames,
values=[existing_value])
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file, save_mode=file_release_manager.CSVSaveMode.APPEND)
release_mngr._append_value(value)
actual_fieldnames, actual_values = _read_values_from_csv(temp_file)
expected_fieldnames = existing_fieldnames.copy()
expected_fieldnames.extend(
[x for x in value.keys() if x not in expected_fieldnames])
self.assertEqual(actual_fieldnames, expected_fieldnames)
expected_value1 = {name: '' for name in expected_fieldnames}
expected_value1.update(existing_value)
expected_value2 = {name: '' for name in expected_fieldnames}
expected_value2.update(value)
expected_values = [expected_value1, expected_value2]
expected_values = tree.map_structure(str, expected_values)
self.assertEqual(actual_values, expected_values)
def test_raises_permission_denied_error(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file, save_mode=file_release_manager.CSVSaveMode.APPEND)
with mock.patch.object(csv.DictWriter, 'writerow') as mock_writerow:
mock_writerow.side_effect = csv.Error()
with self.assertRaises(
file_release_manager.FileReleaseManagerPermissionDeniedError):
release_mngr._append_value({})
class CSVFileReleaseManagerRemoveValuesGreaterThanTest(parameterized.TestCase):
@parameterized.named_parameters(
('0', 0),
('1', 1),
)
def test_removes_values_from_empty_file(self, key):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
release_mngr._remove_values_greater_than(key)
actual_fieldnames, actual_values = _read_values_from_csv(temp_file)
self.assertEqual(actual_fieldnames, ['key'])
self.assertEqual(actual_values, [])
@parameterized.named_parameters(
('0', 0),
('1', 1),
('2', 2),
)
def test_removes_values_from_existing_file(self, key):
temp_file = self.create_tempfile()
existing_fieldnames = ['key', 'a', 'b']
existing_values = [
{
'key': 1,
'a': 10,
'b': 20
},
{
'key': 2,
'a': 11,
'b': 21
},
]
_write_values_to_csv(
file_path=temp_file,
fieldnames=existing_fieldnames,
values=existing_values)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
release_mngr._remove_values_greater_than(key)
actual_fieldnames, actual_values = _read_values_from_csv(temp_file)
if key == 0:
expected_fieldnames = ['key']
else:
expected_fieldnames = existing_fieldnames
self.assertEqual(actual_fieldnames, expected_fieldnames)
expected_values = existing_values[0:key]
expected_values = tree.map_structure(str, expected_values)
self.assertEqual(actual_values, expected_values)
@parameterized.named_parameters(
('none', None),
('str', 'a'),
('list', []),
)
def test_raises_type_error_with_key(self, key):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
with self.assertRaises(TypeError):
release_mngr._remove_values_greater_than(key)
class CSVFileReleaseManagerReleaseTest(parameterized.TestCase):
def test_calls_remove_values_greater_than_with_empty_file(self):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
with mock.patch.object(
release_mngr,
'_remove_values_greater_than') as mock_remove_values_greater_than:
release_mngr.release({'a': 10, 'b': 20}, 1)
mock_remove_values_greater_than.assert_called_with(0)
self.assertEqual(release_mngr._latest_key, 1)
def test_calls_remove_values_greater_than_with_existing_file(self):
temp_file = self.create_tempfile()
_write_values_to_csv(
file_path=temp_file,
fieldnames=['key', 'a', 'b'],
values=[{
'key': 1,
'a': 10,
'b': 20
}])
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file)
with mock.patch.object(
release_mngr,
'_remove_values_greater_than') as mock_remove_values_greater_than:
release_mngr.release({'a': 11, 'b': 21}, 1)
mock_remove_values_greater_than.assert_called_with(0)
self.assertEqual(release_mngr._latest_key, 1)
# pyformat: disable
@parameterized.named_parameters(
('empty', {}, 1),
('more_fields', {'a': 10, 'b': 20}, 1),
)
# pyformat: enable
def test_calls_append_value(self, value, key):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file, save_mode=file_release_manager.CSVSaveMode.APPEND)
with mock.patch.object(release_mngr, '_append_value') as mock_append_value:
release_mngr.release(value, key)
mock_append_value.assert_called_once()
call = mock_append_value.mock_calls[0]
_, args, _ = call
actual_value, = args
expected_fieldnames = ['key']
expected_fieldnames.extend(
[x for x in value.keys() if x not in expected_fieldnames])
expected_value = {name: '' for name in expected_fieldnames}
expected_value.update({'key': key})
expected_value.update(value)
self.assertEqual(actual_value, expected_value)
self.assertEqual(release_mngr._latest_key, key)
# pyformat: disable
@parameterized.named_parameters(
('empty', {}, 1),
('more_fields', {'a': 10, 'b': 20}, 1),
)
# pyformat: enable
def test_calls_write_value(self, value, key):
temp_file = self.create_tempfile()
os.remove(temp_file)
release_mngr = file_release_manager.CSVFileReleaseManager(
file_path=temp_file, save_mode=file_release_manager.CSVSaveMode.WRITE)
with mock.patch.object(release_mngr, '_write_value') as mock_write_value:
release_mngr.release(value, key)
mock_write_value.assert_called_once()
call = mock_write_value.mock_calls[0]
_, args, _ = call
actual_value, = args
expected_fieldnames = ['key']
expected_fieldnames.extend(
[x for x in value.keys() if x not in expected_fieldnames])
expected_value = {name: '' for name in expected_fieldnames}
expected_value.update({'key': key})
expected_value.update(value)
self.assertEqual(actual_value, expected_value)
self.assertEqual(release_mngr._latest_key, key)
# pyformat: disable
@parameterized.named_parameters(
('none', None, [{'key': '1', '': ''}]),
('bool', True, [{'key': '1', '': 'True'}]),
('int', 1, [{'key': '1', '': '1'}]),
('str', 'a', [{'key': '1', '': 'a'}]),
('list',
[True, 1, 'a'],
[{'key': '1', '0': 'True', '1': '1', '2': 'a'}]),
('list_empty', [], [{'key': '1'}]),
('list_nested',
[[True, 1], ['a']],
[{'key': '1', '0/0': 'True', '0/1': '1', '1/0': 'a'}]),
('dict',
{'a': True, 'b': 1, 'c': 'a'},
[{'key': '1', 'a': 'True', 'b': '1', 'c': 'a'}]),
('dict_empty', {}, [{'key': '1'}]),
('dict_nested',
{'x': {'a': True, 'b': 1}, 'y': {'c': 'a'}},
[{'key': '1', 'x/a': 'True', 'x/b': '1', 'y/c': 'a'}]),
('attr',
test_utils.TestAttrObject1(True, 1),
[{'key': '1', 'a': 'True', 'b': '1'}]),
('attr_nested',
{'a': [test_utils.TestAttrObject1(True, 1)],
'b': test_utils.TestAttrObject2('a')},
[{'key': '1', 'a/0/a': 'True', 'a/0/b': '1', 'b/a': 'a'}]),
('tensor_int', tf.constant(1), [{'key': '1', '': '1'}]),
('tensor_str', tf.constant('a'), [{'key': '1', '': 'b\'a\''}]),
('tensor_2d',
tf.ones((2, 3)),
[{'key': '1', '': '[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]'}]),
('tensor_nested',
{'a': [tf.constant(True), tf.constant(1)], 'b': [tf.constant('a')]},
[{'key': '1', 'a/0': 'True', 'a/1': '1', 'b/0': 'b\'a\''}]),
('numpy_int', np.int32(1), [{'key': '1', '': '1'}]),
('numpy_2d',
np.ones((2, 3)),
[{'key': '1', '': '[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]'}]),
('numpy_nested',
{'a': [np.bool(True),
|
np.int32(1)
|
numpy.int32
|
import numpy as np
import xarray as xr
from itertools import combinations
import dask.array as dsa
import dask
from xcape.core import calc_cape
from xcape.core import calc_srh
from .fixtures import empty_dask_array, dataset_soundings, dataset_ERA5pressurelevel
import pytest
@pytest.fixture(scope='module')
def p_t_td_1d(nlevs=20):
p = np.random.rand(nlevs)
t = np.random.rand(nlevs)
td = np.random.rand(nlevs)
return p, t, td
@pytest.fixture(scope='module')
def p_t_td_3d(nlevs=20, nx=10, ny=5):
p = np.random.rand(ny, nx, nlevs)
t = np.random.rand(ny, nx, nlevs)
td = np.random.rand(ny, nx, nlevs)
return p, t, td
@pytest.fixture(scope='module')
def p_t_td_surface(nx=10, ny=5):
ps = np.random.rand(ny, nx)
ts =
|
np.random.rand(ny, nx)
|
numpy.random.rand
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import seaborn
import datetime
flno = [2,3,4,6,7,8]
#colors = ['black','red','orange','lime','black','green','blue','purple']
colors = ["k","#045275","#0C7BDC","#7CCBA2","k","#FED976","#F0746E","#7C1D6F"]
maxlag = [0,0,5,10,10,20]
#c = plt.rcParams['axes.prop_cycle'].by_key()['color']
#c = ["#FF1F58","#009ADE","#FFC61E","blue","green"]
c = ["#009ADE","#FF1F58","k","green","orange"]
def make_means(dat,pt):
chiA, chiB = np.zeros((len(pt),3)), np.zeros((len(pt),3))
flaA, flaB = np.zeros((len(pt),3)), np.zeros((len(pt),3))
fishA, fishB = np.zeros((len(pt),3)), np.zeros((len(pt),3))
chiA[:,0], flaA[:,0], fishA[:,0] = pt, pt, pt
chiB[:,0], flaB[:,0], fishB[:,0] = pt, pt, pt
# add cloudy flag
dat['CLOUDY'] = ((dat['NICE'] > 0) | (dat['MASBR'] >= 1.2)).astype(int)
for i,f in enumerate(flno):
for lag in np.arange(1,maxlag[i]):
dat.loc[(dat['FLIGHT'] == f),'CLOUDY'] = np.maximum(dat.loc[(dat['FLIGHT'] == f),'CLOUDY'],
dat[(dat['FLIGHT'] == f)].shift(periods=lag, fill_value=0.0)['CLOUDY'])
# add ascent/descent flag
dz = (dat['ALT'] - dat.shift(periods=1)['ALT'])*1e3
dt = dat['TIME'] - dat.shift(periods=1)['TIME']
vert = np.abs(dz / dt)
vert_avg = vert.rolling(window=20).mean()
dat['ASCENT_FLAG'] = ((vert_avg > 10) | (dat['ALT'] < 12)).astype(int)
# add chiwis flag
dat['CELL_FLAG'] = ((dat['PRES_CELL'] < 30.0) | (dat['PRES_CELL'] > 45.0) | (dat['FLAG'] == 1)).astype(int)
#dat['CELL_FLAG'] = ((dat['PRES_CELL'] < 20.0) | (dat['PRES_CELL'] > 45.0) | (dat['FLAG'] == 1)).astype(int)
#dat['OOR'] = ((dat['PRES_CELL'] < 20.0) | (dat['PRES_CELL'] > 30.0) | (dat['FLAG'] == 1)).astype(int)
# FL7 dive flag
dat['F7_DIVE'] = ((dat['FLIGHT'] == 7) & (dat['TIME'] > 19.9e3) & (dat['TIME'] < 20.2e3)).astype('int')
for i,pti in enumerate(pt):
datA = dat[(dat['ASCENT_FLAG'] == 0) & (dat['PT'] >= pti-2.0) & (dat['PT'] < pti+2.0) & (dat['FLIGHT'] < 5)]
dat_chiwisA = datA[(datA['CELL_FLAG'] == 0)]
#dat_chiwisA_oor = datA[(datA['OOR'] == 0)]
dat_flashA = datA[(datA['F7_DIVE'] == 0)]
dat_clr_fishA = datA[(datA['CLOUDY'] == 0)]
chiA[i,1], chiA[i,2] = np.mean(dat_chiwisA['H2O']), np.std(dat_chiwisA['H2O'])
#chiA_oor[i,1], chiA_oor[i,2] = np.mean(dat_chiwisA_oor['H2O']), np.std(dat_chiwisA_oor['H2O'])
flaA[i,1], flaA[i,2] = np.mean(dat_flashA['FLH2O']), np.std(dat_flashA['FLH2O'])
fishA[i,1], fishA[i,2] = np.mean(dat_clr_fishA['FIH2O']), np.std(dat_clr_fishA['FIH2O'])
datB = dat[(dat['ASCENT_FLAG'] == 0) & (dat['PT'] >= pti-2.0) & (dat['PT'] < pti+2.0) & (dat['FLIGHT'] > 5)]
dat_chiwisB = datB[(datB['CELL_FLAG'] == 0)]
#dat_chiwisB_oor = datB[(datB['OOR'] == 0)]
dat_flashB = datB[(datB['F7_DIVE'] == 0)]
dat_clr_fishB = datB[(datB['CLOUDY'] == 0)]
chiB[i,1], chiB[i,2] = np.mean(dat_chiwisB['H2O']), np.std(dat_chiwisB['H2O'])
flaB[i,1], flaB[i,2] =
|
np.mean(dat_flashB['FLH2O'])
|
numpy.mean
|
# coding: utf-8
""" Utilities for dealing with spectra. """
from __future__ import division, print_function
__author__ = "<NAME> <<EMAIL>>"
__all__ = ["Spectrum1D", "Spectrum"]
# Standard library
import logging
import os
# Third-party
import numpy as np
import pyfits
class Spectrum(object):
""" A general spectrum class. """
@classmethod
def load(cls, filename, **kwargs):
"""
Load a spectrum from a filename.
:param filename:
The filename to load.
:returns:
A spectrum.
:rtype: :class:`Spectrum1D`
"""
# Try as a Spectrum1D class first
methods = (Spectrum1D.load, )#, OTHER_LOAD_FUNCTIONS
for method in methods:
try:
spectrum = method(filename)
except:
continue
else:
if isinstance(spectrum, Spectrum1D) and spectrum.variance is None:
raise ValueError("no variance array found")
return spectrum
raise IOError("could not interpret spectrum in {0}".format(filename))
class Spectrum1D(object):
"""
This is a temporary class holder for a Spectrum1D object until the
:class:`astropy.specutils.Spectrum1D` module has sufficiently matured.
"""
def __init__(self, disp, flux, variance=None, headers=None):
"""Initializes a `Spectrum1D` object with the given dispersion and flux
arrays.
:param disp:
Dispersion of the spectrum (i.e. the wavelength points).
:type disp:
:class:`numpy.array`
:param flux:
Flux points for each `disp` point.
:type flux:
:class:`numpy.array`
:param variance:
The variance in flux points for each dispersion point.
:type variance:
:class:`numpy.array`
:returns:
A spectrum.
"""
if len(disp) != len(flux):
raise ValueError("dispersion and flux must have the same length")
if len(disp) == 0:
raise ValueError("dispersion and flux cannot be empty arrays")
self.disp = disp
self.flux = flux
if variance is None:
variance = flux
self.variance = variance
# Better to send an extra array (ivariance) around than calculate it at
# every single likelihood call.
self.ivariance = 1.0/variance
if headers is not None:
self.headers = headers
else:
self.headers = {}
return None
def copy(self):
""" Creates a copy of the object. """
return self.__class__(self.disp.copy(), self.flux.copy(),
variance=self.variance, headers=self.headers)
@classmethod
def load(cls, filename, **kwargs):
"""
Load a Spectrum1D from a given filename.
:param filename:
Path of the filename to load. Can be either simple FITS extension
or an ASCII filename.
:type filename:
str
:notes:
If you are loading from an non-standard ASCII file, you can pass
kwargs to :func:`numpy.loadtxt` through this function.
"""
if not os.path.exists(filename):
raise IOError("path {0} does not exist".format(filename))
variance = None
if filename.endswith(".fits"):
image = pyfits.open(filename, **kwargs)
header = image[0].header
# Check for a tabular data structure
if len(image) > 1 and image[0].data is None:
names = [name.lower() for name in image[1].data.names]
dispersion_key = "wave" if "wave" in names else "disp"
disp, flux = image[1].data[dispersion_key], image[1].data["flux"]
if "error" in names or "variance" in names:
variance_key = "error" if "error" in names else "variance"
variance = image[1].data[variance_key]
else:
# According to http://iraf.net/irafdocs/specwcs.php ....
#li = a.headers["LTM1_1"] * np.arange(a.headers["NAXIS1"]) + a.headers["LTV1"]
#a.headers["CRVAL1"] + a.headers["CD1_1"] * (li - a.headers["CRPIX1"])
if np.all([key in header.keys() for key in ("CDELT1", "NAXIS1", "CRVAL1")]):
disp = header["CRVAL1"] + np.arange(header["NAXIS1"]) * header["CDELT1"]
if "LTV1" in header.keys():
disp -= header["LTV1"] * header["CDELT1"]
flux = image[0].data
# Check for logarithmic dispersion
if "CTYPE1" in header.keys() and header["CTYPE1"] == "AWAV-LOG":
disp = np.exp(disp)
# Add the headers in
headers = {}
for row in header.items():
key, value = row
# Check the value is valid
try:
str(value)
except TypeError:
continue
if len(key) == 0 or len(str(value)) == 0: continue
if key in headers.keys():
if not isinstance(headers[key], list):
headers[key] = [headers[key]]
headers[key].append(value)
else:
headers[key] = value
for key, value in headers.iteritems():
if isinstance(value, list):
headers[key] = "\n".join(map(str, value))
else:
headers = {}
# Try for variance too first
try:
disp, flux, variance = np.loadtxt(filename, unpack=True, **kwargs)
except:
disp, flux = np.loadtxt(filename, unpack=True, **kwargs)
return cls(disp, flux, variance=variance, headers=headers)
def save(self, filename, clobber=True):
"""
Saves the spectrum to disk.
:param filename:
The filename to save the spectrum to.
:type filename:
str
:param clobber: [optional]
Whether to overwite the ``filename`` if it already exists.
:type clobber:
bool
:raises IOError:
If the filename exists and we are not asked to clobber it.
"""
if os.path.exists(filename) and not clobber:
raise IOError("Filename already exists and we have been asked not \
to clobber it." % (filename, ))
if not filename.endswith("fits"):
# ASCII
if self.variance is not None:
data = np.hstack([
self.disp.reshape(-1, 1),
self.flux.reshape(-1, 1),
self.variance.reshape(-1, 1)
])
else:
data = np.hstack([self.disp.reshape(len(self.disp), 1),
self.flux.reshape(len(self.disp), 1)])
np.savetxt(filename, data)
else:
# FITS
crpix1, crval1 = 1, self.disp.min()
cdelt1 = np.mean(np.diff(self.disp))
test_disp = (crval1 + np.arange(len(self.disp), dtype=self.disp.dtype)\
* cdelt1).astype(self.disp.dtype)
if np.max(self.disp - test_disp) > 10e-2 or self.variance is not None:
# Non-linear dispersion map, or we have variance information too
# Create a tabular FITS format.
col_disp = pyfits.Column(name="disp", format="1D", array=self.disp)
col_flux = pyfits.Column(name="flux", format="1D", array=self.flux)
if self.variance is not None:
col_variance = pyfits.Column(name="variance", format="1D",
array=self.variance)
table_hdu = pyfits.new_table([col_disp, col_flux, col_variance])
else:
table_hdu = pyfits.new_table([col_disp, col_flux])
# Create Primary HDU
hdu = pyfits.PrimaryHDU()
# Update primary HDU with headers
for key, value in self.headers.iteritems():
if len(key) > 8:
# To deal with ESO compatibility
hdu.header.update("HIERARCH {0}".format(key), value)
try:
hdu.header.update(key, value)
except ValueError:
logger.warn("Could not save header: {0} = {1}".format(
key, value))
# Create HDU list with our tables
hdulist = pyfits.HDUList([hdu, table_hdu])
hdulist.writeto(filename, clobber=clobber)
else:
# Linear dispersion map.
# Create a PrimaryHDU file.
# Ensure we have an array!
hdu = pyfits.PrimaryHDU(np.array(self.flux))
headers = self.headers.copy()
headers.update({
"CRVAL1": crval1,
"CRPIX1": crpix1,
"CDELT1": cdelt1
})
for key, value in headers.iteritems():
if len(key) > 8:
# To deal with ESO compatibility
hdu.header.update("HIERARCH {0}".format(key), value)
else:
try:
hdu.header.update(key, value)
except ValueError:
logger.warn("Could not save header: %s = %s".format(
key, value))
hdu.writeto(filename, clobber=clobber)
def cross_correlate(observed, template, wavelength_range=None):
"""
Return a redshift by cross correlation of a template and observed spectra.
:param observed:
The observed spectrum.
:type observed:
:class:`Spectrum1D`
:param template:
The template spectrum, expected to be at rest-frame.
:type template:
:class:`Spectrum1D`
:param wavelength_range: [optional]
The (observed) start and ending wavelengths to use for the cross correlation.
:type wavelength_range:
tuple
:returns:
The relative velocity and associated uncertainty in km/s.
:rtype:
tuple
"""
# Put the spectra on the same dispersion mapping
if wavelength_range is not None:
if not isinstance(wavelength_range, (list, tuple, np.array)) \
or len(wavelength_range) != 2:
raise TypeError("wavelength range must either be None or a two-length"\
" tuple-like object with the start and end wavelength ranges")
indices = observed.disp.searchsorted(wavelength_range)
dispersion = observed.disp[indices[0]:indices[1] + 1]
observed_flux = observed.flux[indices[0]:indices[1] + 1]
else:
dispersion = observed.disp
observed_flux = observed.flux
template_flux = np.interp(dispersion, template.disp, template.flux,
left=1, right=1)
# Be forgiving, although we shouldn't have to be.
N = np.min(map(len, [dispersion, observed_flux, template_flux]))
# Ensure an even number of points
if N % 2 > 0:
N -= 1
dispersion = dispersion[:N]
observed_flux = observed_flux[:N]
template_flux = template_flux[:N]
assert len(dispersion) == len(observed_flux)
assert len(observed_flux) == len(template_flux)
# Set up z array
m = len(dispersion) / 2
z_array = dispersion/dispersion[N/2] - 1.0
# Apodize edges
edge_buffer = 0.1 * (dispersion[-1] - dispersion[0])
low_w_indices = np.nonzero(dispersion < dispersion[0] + edge_buffer)[0]
high_w_indices = np.nonzero(dispersion > dispersion[-1] - edge_buffer)[0]
apod_curve = np.ones(N, dtype='d')
apod_curve[low_w_indices] = (1.0 + np.cos(np.pi*(1.0 - \
(dispersion[low_w_indices] - dispersion[0])/edge_buffer)))/2.
apod_curve[high_w_indices] = (1.0 + np.cos(np.pi*(1.0 - \
(dispersion[-1] - dispersion[high_w_indices])/edge_buffer)))/2.
apod_observed_flux = observed_flux * apod_curve
apod_template_flux = template_flux * apod_curve
fft_observed_flux = np.fft.fft(apod_observed_flux)
fft_template_flux = np.fft.fft(apod_template_flux)
template_flux_corr = (fft_observed_flux * fft_template_flux.conjugate()) \
/ np.sqrt(np.inner(apod_observed_flux, apod_observed_flux) \
* np.inner(apod_template_flux, apod_template_flux))
correlation =
|
np.fft.ifft(template_flux_corr)
|
numpy.fft.ifft
|
'''
Based on OPEN AI A2C implementation in the "Baselines" package
https://github.com/openai/baselines
'''
import os.path as osp
import gym
import time
import joblib
import logging
import numpy as np
import tensorflow as tf
from baselines import logger
from baselines.a2c.utils import discount_with_dones
from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables
from baselines.a2c.utils import cat_entropy, mse
import random
class Runner(object):
def __init__(self, env, model,modelsavepath, nsteps=5, gamma=0.99):
self.env = env
self.model = model
obs_space = self.env.ObsSpace
nh = sum(obs_space)
self.batch_ob_shape = (nsteps, nh)
self.obs = np.zeros((1, nh), dtype=np.uint8)
#obs = env.reset()
#self.update_obs(obs)
self.gamma = gamma
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(1)]
self.mb_Classes = []
self.sumOfRewards = 0
self.modelsavepath = modelsavepath
# Load Model params if save exist
self.model.load(self.modelsavepath)
self.ActionStats = self.ResetActionStats()
def on_Message(observation, reward, done, comID, step):
#Update self.observation
observation = observation.reshape(self.obs.shape)
#print("OBS_SINGLE:" + str(observation))
self.update_obs(observation)
#print("OBS_SINGLE__SELF:" + str(self.obs))
#set reward and done to arrays
self.sumOfRewards = self.sumOfRewards + reward
reward = [reward]
done = [done]
self.dones = done
#if first message for communication create new mbs class
# if not first step append reward and done
if step == 1:
mbs = mb_class(comID, self.gamma, self.nsteps)
self.mb_Classes.append(mbs)
else:
mbs = [m for m in self.mb_Classes if m.ID == comID][0]
#for n, done in enumerate(done):
#if done:
#self.obs = self.obs*0
mbs.mb_rewards.append(reward)
#Add Done to mb class => dones length +1 then other
mbs.mb_dones.append(self.dones)
#decide if to train model, call callback train function
# if response to nth action ............ OR if done ???
if (step != 1 and step%self.nsteps == 1) or done[0]:
#transofrom arrays and call learn callback function
lastValues = self.model.value(self.obs, self.states, self.dones).tolist()
'''
RESHAPE ACTION AND REWARD AND VALUES!!!!!
'''
mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_advantages = mbs.transform(self.states, self.batch_ob_shape,lastValues)
self.learn_cb(mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values,mb_advantages,self.sumOfRewards )
#reset nbs class
mbs.reset()
#Add last done statement back to mbs class
mbs.mb_dones.append(self.dones)
actions, values, states = self.model.step(self.obs, self.states, self.dones)
actions = self.exploration(actions)
mbs.mb_obs.append(np.copy(self.obs))
mbs.mb_actions.append(actions)
mbs.mb_values.append(values)
#store last state --------- shouldn't it be stored in mbs class since it depends on comID??????????
mbs.mb_states.append(np.copy(self.states))
self.states = states
#mbs.mb_dones.append(self.dones)
self.actionStats(actions)
if done[0]:
#remove mbs from list
self.mb_Classes.remove(mbs)
self.states = model.initial_state
#call action
self.env.emit(comID,actions)
self.env.SetCallbackFunction(on_Message)
self.update = 1
self.train_writer = tf.summary.FileWriter('/usr/src/app/log')
def learning(obs, states, rewards, masks, actions, values,advantages,sumOfRewards):
#print("OBS:" + str(obs))
#print("REW:" +str(rewards))
#print("ACT:"+str(actions))
log_interval=100
save_interval = 1000
update = self.update
policy_loss, value_loss, policy_entropy,summary = model.train(obs, states, rewards, masks, actions, values,advantages)
nbatch = 1*nsteps
tstart = time.time()
nseconds = time.time()-tstart
fps = int((update*nbatch)/nseconds)
self.train_writer.add_summary(summary,update)
if update % log_interval == 0:
actStat = self.ActionStats
logger.record_tabular("action_CTYP_AGENT",actStat['ChatType'][0])
logger.record_tabular("action_CTYP_WORKER",actStat['ChatType'][1])
logger.record_tabular("action_CTYP_KNOWLEDGE",actStat['ChatType'][2])
#logger.record_tabular("action_CTYP_REVIEW",actStat['ChatType'][3])
logger.record_tabular("action_CNR_0",actStat['ChatNr'][0])
logger.record_tabular("action_CNR_1",actStat['ChatNr'][1])
logger.record_tabular("action_MTYP_QUESTION",actStat['MessageType'][0])
logger.record_tabular("action_MTYP_ANSWER",actStat['MessageType'][1])
logger.record_tabular("action_MTYP_OBSERVATION",actStat['MessageType'][2])
logger.record_tabular("nupdates", update)
logger.record_tabular("avg_reward",sumOfRewards/(nbatch*log_interval))
logger.record_tabular("sum_reward",sumOfRewards)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.dump_tabular()
self.sumOfRewards = 0
self.ResetActionStats()
if update % save_interval == 0:
self.model.save(self.modelsavepath)
self.update = update+ 1
self.learn_cb = learning
def listen(self):
self.env.ListenToSocketIO()
#print('ListenCom')
def update_obs(self, obs):
# Do frame-stacking here instead of the FrameStack wrapper to reduce
# IPC overhead
#print(str(obs))
#self.obs = np.roll(self.obs, shift=-1, axis=3)
#self.obs[:, :, :, -1] = obs[:, :, :, 0]
self.obs = obs
#self.obs[:,-1] = obs[:,0]
def exploration(self,actions):
epsilon = 0.8
if random.random() > epsilon:
ret = actions
else:
ncty = self.env.ChatType.n
ncnr = self.env.ChatNumber.n
nmty = self.env.MessageType.n
nmsg = self.env.MessageText.n
nrew = self.env.FeedbackReward.n
actspace = self.env.ActSpace
cty = [random.sample(range(ncty),1)[0] for _ in range(actspace[0])]
cnr = [random.sample(range(ncnr),1)[0] for _ in range(actspace[1])]
mty = [random.sample(range(nmty),1)[0] for _ in range(actspace[2])]
msg = [random.sample(range(nmsg),1)[0] for _ in range(actspace[3])]
rew = [random.sample(range(nrew),1)[0] for _ in range(actspace[4])]
ret = np.reshape(np.concatenate((cty,cnr,mty,msg,rew)),(1,int(np.sum(actspace))))
return ret
def ResetActionStats(self):
ncty = self.env.ChatType.n
ncnr = self.env.ChatNumber.n
nmty = self.env.MessageType.n
nrew = self.env.FeedbackReward.n
dic = {}
dic['ChatType'] = np.zeros(ncty)
dic['ChatNr'] = np.zeros(ncnr)
dic['MessageType'] = np.zeros(nmty)
dic['Reward'] = np.zeros(nrew)
self.ActionStats = dic
return dic
def actionStats(self, actions):
ctyi,cnri, mtyi, mtxt, reward = np.split(actions,np.cumsum(self.env.ActSpace), axis=1)[:-1]
ctyi = ctyi[0][0]
cnri = cnri[0][0]
mtyi = mtyi[0][0]
reward = reward[0][0]
self.ActionStats['ChatType'][ctyi] += 1
self.ActionStats['ChatNr'][cnri] += 1
self.ActionStats['MessageType'][mtyi] += 1
self.ActionStats['Reward'][reward] += 1
class mb_class():
def __init__(self,id, gamma, nsteps):
self.ID = id
self.gamma = gamma
self.nsteps = nsteps
self.gae_lambda = 0.96
self.mb_obs, self.mb_rewards, self.mb_actions, self.mb_values, self.mb_dones, self.mb_states = [],[],[],[],[], []
def reset(self):
self.mb_obs, self.mb_rewards, self.mb_actions, self.mb_values, self.mb_dones, self.mb_states = [],[],[],[],[], []
def transform(self, mb_states, batch_ob_shape, last_values):
if len(self.mb_obs) < self.nsteps:
short = True
else:
short = False
mb_obs = self.mb_obs
mb_rewards = self.mb_rewards
mb_actions = self.mb_actions
mb_values = self.mb_values
mb_dones = self.mb_dones
mb_states = self.mb_states
mb_masks = []
[mb_masks.append([1]) for i in range(len(mb_obs))]
[mb_masks.append([0]) for i in range(self.nsteps-len(mb_masks))]
#append missing nsteps
[mb_obs.append(np.zeros(mb_obs[0].shape)) for i in range(self.nsteps-len(mb_obs))]
[mb_rewards.append([0]) for i in range(self.nsteps-len(mb_rewards))]
[mb_actions.append(np.zeros(mb_actions[0].shape)) for i in range(self.nsteps-len(mb_actions))]
[mb_values.append(np.zeros(mb_values[0].shape)) for i in range(self.nsteps-len(mb_values))]
[mb_dones.append([True]) for i in range(self.nsteps +1-len(mb_dones))]
[mb_states.append(np.zeros(mb_states[0].shape)) for i in range(self.nsteps-len(mb_states))]
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = np.asarray(mb_masks, dtype=np.float32).swapaxes(1, 0)
#mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
#last_values = self.model.value(self.obs, self.states, self.dones).tolist()
#discount/bootstrap off value fn
#print('mbrewards: ' +str(mb_rewards))
mb_advantages = self.GenrelaziedAdvantageEstimate(mb_rewards,mb_values,mb_dones,last_values)
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
#print('before disc: ' +str(rewards))
#print('dones: ' +str(dones))
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] #Don't get it why not use last?
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
#print('after disc: ' +str(rewards))
mb_rewards[n] = rewards
mb_rewards = np.squeeze(mb_rewards, axis=0)
mb_advantages =
|
np.squeeze(mb_advantages, axis=0)
|
numpy.squeeze
|
# -*- coding: utf-8 -*-
"""Simulated Minimum Distance
"""
##############
# 1. imports #
##############
import time
import numpy as np
from numba import njit, prange
from scipy import optimize
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-whitegrid')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
markers = ['.','s','P','D','v','^','*']
style = ['-','--','-.',':','-','--','-.']
############
# 2. model #
############
class SimulatedMinimumDistanceClass():
#########
# setup #
#########
def __init__(self,est_par,mom_func,datamoms=None,options=None):
""" initialize """
# a. the parameters that should be estimated
self.est_par = est_par
# b. the function that calculates moments
self.mom_func = mom_func
# c. the moments in the data to be matched
self.datamoms = datamoms
# d. estimation options
self.options = options
def bootstrap_mom_var(self,data,num_obs,num_boot,num_moms,seed=9210):
""" bootstrap moment covariance matrix """
# a. set seed
np.random.seed(seed)
# b. allocate memory
boot = np.empty((num_moms,num_boot))
# c. draw random samples
for b in range(num_boot):
ids = np.random.randint(low=0,high=num_obs,size=num_obs)
boot[:,b] = self.mom_func(data,ids)
# d. calculate covariance
Omega = np.cov(boot)
# e. return Omega (scaled due to averages in moments)
return Omega*num_obs
def estimate(self,model,W,do_print_initial=True):
""" estimate """
# a. initial guesses
est_par = self.est_par
theta0 = np.array([val['guess'] for key,val in est_par.items()])
names = [key for key,val in est_par.items()]
# b. bounds
lower = np.array([val['lower'] for key,val in est_par.items()])
upper = np.array([val['upper'] for key,val in est_par.items()])
# c. evaluate the objective function
if do_print_initial: print(f'objective function at starting values: {self.obj_func(theta0,model,W,names,lower,upper)}')
# d. call numerical solver
method = 'nelder-mead'
res = optimize.minimize(self.obj_func,theta0,args=(model,W,names,lower,upper),
method=method)
est = {'theta':res.x , 'obj_val':res.fun}
for i,val in enumerate(res.x):
key = names[i]
est[key] = val
return est
def obj_func(self,theta,model,W,names,lower,upper):
""" calculate objective function """
# a. impose bounds and calculate penalty
penalty = 0.0
theta_clipped = theta.copy()
for i in range(theta.size):
# i. clip
if (lower[i] != None) or (upper[i] != None):
theta_clipped[i] = np.clip(theta_clipped[i],lower[i],upper[i])
# ii. penalty
penalty += 10_000.0*(theta[i]-theta_clipped[i])**2
# b. calcualte the vector of differences between moments in data and in simulated data
diff = self.diff_vec_func(theta_clipped,model,names)
# c. return the objective function
objval = diff.T @ W @ diff
return objval + penalty
def diff_vec_func(self,theta,model,names):
""" difference between data and simulated model moments """
# a. update parameters in par
for i in range(theta.size):
setattr(model.par,names[i],theta[i])
# b. solve model
model.solve(do_print=False)
# c. simulate model
model.simulate(do_print=False)
# calculate moments in simulated data
moms_sim = self.mom_func(model.sim)
# return the vector of differences
return self.datamoms - moms_sim
###################
# standard errors #
###################
def num_grad(self,theta,model,names,step=1.0e-5,num_moms=None):
""" calulcate numerical gradient vector """
# a. determine number of moments and parameters
num_par = theta.size
if num_moms is None:
num_moms = self.diff_vec_func(theta,model,names).size
# b. allocate memory
grad = np.empty((num_moms,num_par))
# c. loop through parameters
for par in range(num_par):
# i. construct step as a function of scale of theta
step_now = np.zeros(num_par)
step_now[par] = np.fmax(np.abs(theta[par]*step),step)
# ii. update theta's
theta_plus = theta.copy() + step_now
theta_minus = theta.copy() - step_now
# iii. calcualte moments at these parameters
mom_plus = self.diff_vec_func(theta_plus,model,names)
mom_minus = self.diff_vec_func(theta_minus,model,names)
# iv. store the gradient
grad[:,par] = (mom_plus - mom_minus)/(2*step_now[par])
# d. re-set all parameters
for par in range(num_par):
setattr(model.par,names[par],theta[par])
return grad
def calc_influence_function(self,theta,model,W):
""" calculate influence function (Gamma) """
# gradient wrt. theta parameters
names = [key for key,val in self.est_par.items()]
G = self.num_grad(theta,model,names)
# return Gamma
return - np.linalg.inv(G.T @ W @ G) @ G.T @ W , G
########################
# sensitivity measures #
########################
def informativeness_moments(self,grad,Omega,W):
""" calculate informativeness of moments """
info = dict()
# a. calculate objects re-used below
GW = grad.T @ W
GWG = GW @ grad
GWG_inv = np.linalg.inv(GWG)
GSi = grad.T @ np.linalg.inv(Omega)
GSiG = GSi @ grad
Avar = GWG_inv @ (GW @ Omega @ GW.T) @ GWG_inv
AvarOpt = np.linalg.inv(GSiG)
# b. informativenss measures
info['M1'] = - GWG_inv @ GW
num_mom = len(Omega)
num_par = len(grad[0])
shape = (num_par,num_mom)
info['M2'] = np.nan + np.zeros(shape)
info['M3'] = np.nan + np.zeros(shape)
info['M4'] = np.nan + np.zeros(shape)
info['M5'] = np.nan + np.zeros(shape)
info['M6'] = np.nan + np.zeros(shape)
info['M2e'] = np.nan + np.zeros(shape)
info['M3e'] = np.nan + np.zeros(shape)
info['M4e'] = np.nan + np.zeros(shape)
info['M5e'] = np.nan + np.zeros(shape)
info['M6e'] = np.nan + np.zeros(shape)
for k in range(num_mom):
# pick out the kk'th element: Okk
O = np.zeros((num_mom,num_mom))
O[k,k] = 1
M2kk = (np.linalg.inv(GSiG) @ (GSi @ O @ GSi.T)) @ np.linalg.inv(GSiG) # num_par-by-num_par
M3kk = GWG_inv @ (GW @ O @ GW.T) @ GWG_inv
M6kk = - GWG_inv @ (grad.T@ O @ grad) @ Avar \
+ GWG_inv @ (grad.T @ O @ Omega @ W @ grad) @ GWG_inv \
+ GWG_inv @ (grad.T @ W @ Omega @ O @ grad) @ GWG_inv \
- Avar @ (grad.T @ O @ grad) @ GWG_inv # num_par-by-num_par
info['M2'][:,k] = np.diag(M2kk) # store only the diagonal: the effect on the variance of a given parameter from a slight change in the variance of the kth moment
info['M3'][:,k] = np.diag(M3kk) # store only the diagonal: the effect on the variance of a given parameter from a slight change in the variance of the kth moment
info['M6'][:,k] = np.diag(M6kk) # store only the diagonal: the effect on the variance of a given parameter from a slight change in the variance of the kth moment
info['M2e'][:,k] = info['M2'][:,k]/np.diag(AvarOpt) * Omega[k,k] # store only the diagonal: the effect on the variance of a given parameter from a slight change in the variance of the kth moment
info['M3e'][:,k] = info['M3'][:,k]/np.diag(Avar) * Omega[k,k] # store only the diagonal: the effect on the variance of a given parameter from a slight change in the variance of the kth moment
info['M6e'][:,k] = info['M6'][:,k]/np.diag(Avar) * W[k,k] # store only the diagonal: the effect on the variance of a given parameter from a slight change in the variance of the kth moment
# remove the kth moment from the weight matrix and
# calculate the asymptotic variance without this moment
W_now = W.copy()
W_now[k,:] = 0
W_now[:,k] = 0
GW_now = grad.T@W_now
GWG_now = GW_now@grad
Avar_now = (np.linalg.inv(GWG_now) @ (GW_now@ Omega @GW_now.T)) @ np.linalg.inv(GWG_now)
info['M4'][:,k] = np.diag(Avar_now) - np.diag(Avar)
info['M4e'][:,k] = info['M4'][:,k] / np.diag(Avar)
# optimal version
Omega_now = np.delete(Omega,k,axis=0)
Omega_now = np.delete(Omega_now,k,axis=1)
grad_now = np.delete(grad,k,axis=0)
AvarOpt_now = np.linalg.inv((grad_now.T @ np.linalg.inv(Omega_now)) @ grad_now)
info['M5'][:,k] =
|
np.diag(AvarOpt_now)
|
numpy.diag
|
from numpy import pi, array, cos, sin, dot
import cv2 as cv
import numpy as np
from .Point import Point
confirm_box = False
draw_rectangle = False
box_x = 0
box_y = 0
box_w = 0
box_h = 0
k = 0
p = None
class Shape:
def __init__(self, points):
self.points = []
for p in points:
point = Point(*p)
self.points.append(point)
self.centroid()
def to_image(self, i, frame):
cv.imwrite('image{}.jpg'.format(i), self.extract_content(frame))
def to_array(self):
return np.array([np.array(p.to_tuple()) for p in self.points])
def centroid(self):
points = array([array(p.to_tuple())
for p in self.points]) / len(self.points)
self.center = Point(*points.sum(axis=0))
def translate_x(self, x):
for point in self.points:
point.translate_x(x)
def translate_y(self, y):
for point in self.points:
point.translate_y(y)
# translate the fist point of the shape then the whole shape to the point
# TODO Translate Shape by its center
def translate_to(self, x, y):
point = self.points[0].to_tuple()
self.to_rectangle()
xx = self.max_x
yy = self.max_y
x_distance = x - xx
y_distance = y - yy
for p in self.points:
p.translate_x(x_distance)
p.translate_y(y_distance)
self.centroid()
def rotate_around_center(self, theta):
P = array([array(list(p.to_tuple())) for p in self.points])
self.centroid()
C = self.center.to_tuple()
C = array([array(list(C)) for i in range(len(self.points))])
R = array([[cos(theta), sin(theta)], [-sin(theta), cos(theta)]])
P_res =
|
dot(R, (P - C).T)
|
numpy.dot
|
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import colors
import copy
from tqdm import tqdm
class IsingSquare:
# initialise a spin lattice and populate with random spins
def __init__(self, order, interactionVal=1, magMoment=1):
if order < 3:
raise ValueError('Order number needs to be greater than 2.')
self.temp = 0.0
self.beta = 0.0
self.boltzmann = 1.38064852 * (10 ** -23)
self.order = order
self.J = float(interactionVal)
self.h = float(magMoment)
self.magList = []
self.specHeatList = []
self.energyList = []
self.suscepList = []
self.spins = []
self.resetSpins()
# reset the spin lattice to a random configuration
def resetSpins(self):
vals = np.array([-1, 1])
self.spins = np.random.choice(vals, size=(self.order, self.order))
# returns an array of an atom's 4 nearest neighbours
def neighbours(self, row, col):
return np.asarray([self.spins[row][col - 1], #left
self.spins[row][(col + 1) % self.order], #right
self.spins[row - 1][col], #up
self.spins[(row + 1) % self.order][col]]) #down
# calculates the energy of a single atom, using the Hamiltonian
def singleEnergy(self, row, col):
neighbours = self.neighbours(row, col)
selfSpin = self.spins[row][col]
return -self.J * selfSpin * np.sum(np.sum(neighbours)) - self.h * selfSpin
# calculates the magnitude of the entire energy of the lattice
def totalEnergy(self):
energy = 0.0
for i in np.arange(self.order):
for j in np.arange(self.order):
energy += self.singleEnergy(i, j)
# to avoid counting pairs twice, divide by two
# divide by maximum possible energy to normalise
return energy# / (self.order * self.order * (-4 * self.J - self.h) )
# calculates the magnitude of the residual magnetic spin of the lattice
# normalise by dividing by order of lattice squared
def totalMag(self):
return np.sum(np.sum(self.spins)) / (self.order ** 2)
def specHeat(self, energy, energySquared, temp):
return (energySquared - energy ** 2) * (1 / (self.order * self.order * 2 * temp * temp))
def suscep(self, mag, magSquared, temp):
return (magSquared - mag ** 2) / temp
# attempts to flip a random spin using the metropolis algorithm and the Boltzmann distribution
def tryFlip(self, row, col):
# energy change = -2 * E_initial
# so accept change if E_initial >= 0
energy = self.singleEnergy(row, col)
if energy >= 0 or np.random.random() <= math.exp(self.beta * 2 * energy):
self.spins[row][col] *= -1
# closes plot window
def close_event(self):
plt.close() # timer calls this function after 3 seconds and closes the window
# plots a meshgrid of the initial and final spin lattices
def plotStartEndSpins(self, spinsList, iters=1000000):
cmap = colors.ListedColormap(['red', 'yellow'])
bounds = [-1, 0, 1]
norm = colors.BoundaryNorm(bounds, cmap.N)
plt.subplots(nrows=1, ncols=2)
plt.tight_layout()
plt.subplot(1,2,1)
plt.imshow(spinsList[0], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Initial Configuration')
plt.subplot(1, 2, 2)
plt.imshow(spinsList[1], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Final Configuration')
title = "Temperature (J/K_B) = {0}, J = {1}, h = {2}, Iterations = {3}".format(self.temp, self.J, self.h, iters) + "\n" + "Order: {0} x {1}".format(self.order, self.order)
plt.suptitle(title)
# timer = fig.canvas.new_timer(
# interval=graphInterval) # creating a timer object and setting an interval of 3000 milliseconds
# timer.add_callback(self.close_event)
# timer.start()
plt.show()
# simulates the lattice at a constant temperature temp, for iters iterations, plots the resulting lattices, and returns the spin configurations
def basicIter(self, iters=1000000, temp=1, plot=False):
self.resetSpins()
spinsList = [copy.deepcopy(self.spins)]
self.temp = temp
self.beta = 1.0 / self.temp
for i in np.arange(iters + 1):
row, col = np.random.randint(self.order), np.random.randint(self.order)
self.tryFlip(row, col)
spinsList.append(self.spins)
if plot:
self.plotStartEndSpins(spinsList, iters)
else:
for i in np.arange(len(spinsList[0])):
spinsList[0][i] = np.asarray(spinsList[0][i])
for i in np.arange(len(spinsList[1])):
spinsList[1][i] = np.asarray(spinsList[1][i])
spinsList = np.array(spinsList)
return spinsList
# simulates the lattice oer a temperature range tempRange, with itersPerTemp iterations per temperature
# plotProperties: plot the residual spin, total energy, susceptibility and specific heat
def tempRangeIter(self, tempRange=np.arange(start=0.8, stop=3.2, step=0.05), itersPerTemp=100000, plotProperties=False):
self.resetSpins()
# store the averages here
energyList = []
magList = []
specHeatList = []
suscepList = []
for temp in tqdm(tempRange):
self.beta = 1.0 / temp
#print("Calculating temp:", temp)
# allow to reach equilibrium
for i in np.arange(itersPerTemp + 1):
row, col = np.random.randint(0, self.order), np.random.randint(0, self.order)
self.tryFlip(row, col)
#do a further thousand iterations to get average, and every hundred iterations, store the properties
#store the values used to calculate averages here
magListEquilib = []
energyListEquilib = []
for i in np.arange(500000):
if i % 5000 == 0:
energy = self.totalEnergy()
mag = self.totalMag()
energyListEquilib.append(energy)
magListEquilib.append(mag)
row, col = np.random.randint(0, self.order), np.random.randint(0, self.order)
self.tryFlip(row, col)
energyAvg = np.average(energyListEquilib)
energySquaredAvg = np.average(np.square(energyListEquilib))
magAvg = np.average(magListEquilib)
magSquaredAvg = np.average(np.square(magListEquilib))
energyList.append(energyAvg)
magList.append(magAvg)
specHeatList.append(self.specHeat(energyAvg, energySquaredAvg, temp))
suscepList.append(self.suscep(magAvg, magSquaredAvg, temp))
# reset the spins for the next temperature
self.resetSpins()
if plotProperties:
plt.tight_layout()
plt.subplot(2, 2, 1)
plt.plot(tempRange, energyList)
plt.title("Total Energy")
plt.axvline(x=2.269185, c='r', linestyle='--')
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.subplot(2, 2, 2)
plt.plot(tempRange, magList)
plt.title("Residual Spin")
plt.axvline(x=2.269185, c='r', linestyle='--')
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.subplot(2, 2, 3)
plt.plot(tempRange, specHeatList)
plt.title("Specific Heat Capacity")
plt.axvline(x=2.269185, c='r', linestyle='--')
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.subplot(2, 2, 4)
plt.plot(tempRange, suscepList)
plt.title("Susceptibility")
plt.axvline(x=2.269185, c='r', linestyle='--')
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.show()
return {"tempRange": tempRange,
"energyList": energyList,
"magList": magList,
"specHeatList": specHeatList,
"suscepList": suscepList}
class IsingTriangle:
# initialise a spin lattice and populate with random spins
def __init__(self, order, interactionVal=1, magMoment=1):
if order < 4:
raise ValueError('Order number needs to be greater than 3.')
self.temp = 0.0
self.beta = 0.0
self.boltzmann = 1.38064852 * (10 ** -23)
self.order = order
self.J = float(interactionVal)
self.h = float(magMoment)
self.magList = []
self.specHeatList = []
self.energyList = []
self.suscepList = []
self.spins = []
self.resetSpins()
# reset the spin lattice to a random configuration
def resetSpins(self):
self.spins = []
vals = np.array([1, -1])
for i in np.arange(self.order):
self.spins.append(list(np.random.choice(vals, size=i + 1)))
# returns an array of an atom's 6 nearest neighbours
def neighbours(self, row, col):
# centre atoms
if 1 < row < self.order - 1 and 0 < col < row:
return np.asarray([self.spins[row - 1][col - 1],
self.spins[row - 1][col],
self.spins[row][col - 1],
self.spins[row][col + 1],
self.spins[row + 1][col],
self.spins[row + 1][col + 1]])
# left side central
elif 0 < row < self.order - 1 and col == 0:
return np.asarray([self.spins[row - 1][0],
self.spins[row][1],
self.spins[row + 1][0],
self.spins[row + 1][1],
self.spins[row][row],
self.spins[row - 1][row - 1]])
# right side central
elif 0 < row < self.order - 1 and col == row:
return np.asarray([self.spins[row - 1][row - 1],
self.spins[row - 1][0],
self.spins[row][row - 1],
self.spins[row][0],
self.spins[row + 1][row],
self.spins[row + 1][row + 1]])
# bottom side central
elif row == self.order - 1 and 0 < col < row:
return np.asarray([self.spins[row - 1][col - 1],
self.spins[row - 1][col],
self.spins[row][col - 1],
self.spins[row][col + 1],
self.spins[0][0],
self.spins[0][0]])
# very top
elif row == 0:
return np.asarray([self.spins[1][0],
self.spins[1][1],
self.spins[self.order - 1][0],
self.spins[self.order - 1][self.order - 1],
self.spins[self.order - 1][1],
self.spins[self.order - 1][self.order - 2]])
# bottom left
elif row == self.order - 1 and col == 0:
return np.asarray([self.spins[row - 1][0],
self.spins[row - 1][row - 1],
self.spins[row][1],
self.spins[row][row],
self.spins[0][0],
self.spins[0][0]])
# bottom right
elif row == self.order - 1 and (col == row):
return np.asarray([self.spins[row - 1][0],
self.spins[row - 1][row - 1],
self.spins[row][0],
self.spins[row][row - 1],
self.spins[0][0],
self.spins[0][0]])
# calculates the energy of a single atom, using the Hamiltonian
def singleEnergy(self, row, col):
neighbours = self.neighbours(row, col)
selfSpin = self.spins[row][col]
return self.J * selfSpin * np.sum(np.sum(neighbours)) - self.h * selfSpin
# calculates the magnitude of the entire energy of the lattice
def totalEnergy(self):
energy = 0.0
for i in np.arange(self.order):
for j in np.arange(len(self.spins[i])):
energy += self.singleEnergy(i, j)
# to avoid counting pairs twice, divide by two
# divide by maximum possible energy to normalise
return -math.fabs(energy / ((-6 * self.J - self.h) * ((self.order ** 2 + self.order) / 2)))
# calculates the magnitude of the residual magnetic spin of the lattice
# normalise by dividing by order of lattice squared
def totalMag(self):
return math.fabs((np.sum(np.sum(self.spins)) * 2) / (self.order ** 2 + self.order))
def specHeat(self, energy, energySquared, temp):
return (energySquared - energy ** 2) * (1 / (self.order * self.order * 2 * temp * temp))
def suscep(self, mag, magSquared, temp):
return self.J * (magSquared - mag ** 2) * (1 / (self.order * self.order * 2 * temp))
# attempts to flip a random spin using the metropolis algorithm and the Boltzmann distribution
def tryFlip(self, row, col):
# energy change = -2 * E_initial
# so accept change if E_initial <= 0
energy = self.singleEnergy(row, col)
if energy <= 0 or np.random.random() <= math.exp(-self.beta * 2 * energy):
self.spins[row][col] *= -1
# closes plot window
def close_event(self):
plt.close() # timer calls this function after 3 seconds and closes the window
# plots a meshgrid of the initial and final spin lattices
def plotStartEndSpins(self, spinsList, iters=1000000):
for i in np.arange(self.order):
for j in np.arange(self.order - i - 1):
spinsList[0][i].append(8)
spinsList[1][i].append(8)
cmap = colors.ListedColormap(['red', 'yellow', 'white'])
bounds = [-1, 0, 2, 10]
norm = colors.BoundaryNorm(bounds, cmap.N)
plt.subplots(nrows=1, ncols=2)
plt.tight_layout()
for i in np.arange(len(spinsList[0])):
spinsList[0][i] = np.asarray(spinsList[0][i])
for i in np.arange(len(spinsList[1])):
spinsList[1][i] = np.asarray(spinsList[1][i])
spinsList = np.array(spinsList)
plt.subplot(1,2,1)
plt.imshow(spinsList[0], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Initial Configuration')
plt.subplot(1, 2, 2)
plt.imshow(spinsList[1], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Final Configuration')
title = "Temperature (J/K_B) = {0}, J = {1}, h = {2}, Iterations = {3}".format(self.temp, self.J, self.h, iters) + "\n" + "Order: {0}".format(self.order,)
plt.suptitle(title)
# timer = fig.canvas.new_timer(
# interval=graphInterval) # creating a timer object and setting an interval of 3000 milliseconds
# timer.add_callback(self.close_event)
# timer.start()
plt.show()
# simulates the lattice at a constant temperature temp, for iters iterations, and returns the spin configurations
def basicIter(self, iters=1000000, temp=1, plot=False):
self.resetSpins()
spinsList = [copy.deepcopy(self.spins)]
self.temp = temp
self.beta = 1.0 / self.temp
for i in np.arange(iters + 1):
row = np.random.randint(self.order)
col = np.random.randint(row + 1)
self.tryFlip(row, col)
spinsList.append(self.spins)
print(spinsList[0])
print(spinsList[1])
if plot:
self.plotStartEndSpins(spinsList, iters)
else:
for i in np.arange(len(spinsList[0])):
spinsList[0][i] = np.asarray(spinsList[0][i])
for i in np.arange(len(spinsList[1])):
spinsList[1][i] = np.asarray(spinsList[1][i])
spinsList = np.array(spinsList)
return spinsList
# simulates the lattice oer a temperature range tempRange, with itersPerTemp iterations per temperature
# plotProperties: plot the residual spin, total energy, susceptibility and specific heat
def tempRangeIter(self, tempRange=np.arange(start=1, stop=5, step=0.2), itersPerTemp=100000, plotProperties=False):
self.resetSpins()
# store the averages here
energyList = []
magList = []
specHeatList = []
suscepList = []
for temp in tqdm(tempRange):
self.beta = 1.0 / temp
#print("Calculating temp:", temp)
# allow to reach equilibrium
for i in np.arange(itersPerTemp + 1):
row = np.random.randint(self.order)
col = np.random.randint(row + 1)
self.tryFlip(row, col)
#do a further ten thousand iterations to get average, and every two hundred iterations, store the properties
if plotProperties:
#store the values used to calculate averages here
magListEquilib = []
energyListEquilib = []
for i in np.arange(10000):
if i % 200 == 0:
energy = self.totalEnergy()
mag = self.totalMag()
energyListEquilib.append(energy)
magListEquilib.append(mag)
row = np.random.randint(self.order)
col = np.random.randint(row + 1)
self.tryFlip(row, col)
energyAvg = np.average(energyListEquilib)
energySquaredAvg = np.average(np.square(energyListEquilib))
magAvg = np.average(magListEquilib)
magSquaredAvg = np.average(np.square(magListEquilib))
energyList.append(energyAvg)
magList.append(magAvg)
specHeatList.append(self.specHeat(energyAvg, energySquaredAvg, temp))
suscepList.append(self.suscep(magAvg, magSquaredAvg, temp))
# reset the spins for the next temperature
self.resetSpins()
if plotProperties:
plt.tight_layout()
plt.subplot(2, 2, 1)
plt.plot(tempRange, energyList)
plt.title("Total Energy")
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.subplot(2, 2, 2)
plt.plot(tempRange, magList)
plt.title("Residual Spin")
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.subplot(2, 2, 3)
plt.plot(tempRange, specHeatList)
plt.title("Specific Heat Capacity")
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.subplot(2, 2, 4)
plt.plot(tempRange, suscepList)
plt.title("Susceptibility")
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.show()
class IsingHexagon:
# initialise a spin lattice and populate with random spins
def __init__(self, order, interactionVal=1, magMoment=1):
if order < 2:
raise ValueError('Order number needs to be greater than 3.')
self.temp = 0.0
self.beta = 0.0
self.boltzmann = 1.38064852 * (10 ** -23)
self.order = order
self.J = float(interactionVal)
self.h = float(magMoment)
self.magList = []
self.specHeatList = []
self.energyList = []
self.suscepList = []
self.resetSpins()
# reset the spin lattice to a random configuration
def resetSpins(self):
self.spins = []
vals = np.array([1, -1])
if self.order == 1:
self.spins.append(list(np.random.choice(vals, size=2)))
self.spins.append(list(np.random.choice(vals, size=2)))
self.spins.append(list(np.random.choice(vals, size=2)))
self.spins = np.array(self.spins)
return
# top layers
iter = 2
while self.order >= iter / 2.0 and not iter == 2 * self.order:
self.spins.append(list(np.random.choice(vals, size=iter)))
iter += 2
# middle layers
for i in np.arange(5 + 2 * (self.order - 2)):
self.spins.append(list(np.random.choice(vals, size=2 * self.order)))
# bottom layers
iter = 2 * self.order - 2
while iter > 0:
self.spins.append(list(np.random.choice(vals, size= iter)))
iter -= 2
# returns the nearest neighbours for centre atoms, used in the main nearest neighbour function
def centreReturn(self, left, row, col): # left is boolean, when true, use atom to left, otherwise, use atom to right
if left:
return np.asarray([self.spins[row - 1][col],
self.spins[row][col - 1],
self.spins[row + 1][col]])
else:
return np.asarray([self.spins[row - 1][col],
self.spins[row][col + 1],
self.spins[row + 1][col]])
# returns an array of an atom's 3 nearest neighbours
def neighbours(self, row, col):
# centre atoms
if 1 < row < 4 * self.order - 3:
if self.order - 1 < row < 3 * self.order - 1 and 0 < col < len(self.spins[row]) - 1:
# handles centre atoms
if self.order % 2 == 0:
if (row - col) % 2 == 0:
return self.centreReturn(True, row, col)
else:
return self.centreReturn(False, row, col)
else:
if (row - col) % 2 == 0:
return self.centreReturn(False, row, col)
else:
return self.centreReturn(True, row, col)
elif (row < self.order or row > 3 * self.order - 2) and 1 < col < len(self.spins[row]) - 2:
# handles centre atoms
if self.order % 2 == 0:
if (row - col) % 2 == 0:
return self.centreReturn(True, row, col)
else:
return self.centreReturn(False, row, col)
else:
if (row - col) % 2 == 0:
return self.centreReturn(False, row, col)
else:
return self.centreReturn(True, row, col)
# left
# incorrect but works
if self.order - 1 < row < (3 * self.order - 1) and col < 2:
return np.asarray([self.spins[row - 1][0],
self.spins[row + 1][0],
self.spins[row][len(self.spins[row]) - 1]])
# right
# incorrect but works
elif self.order - 1 < row < (3 * self.order - 1) and col > len(self.spins[row]) - 3:
return np.asarray([self.spins[row - 1][col],
self.spins[row + 1][col],
self.spins[row][0]])
# annoying left corner atoms
elif (row < self.order or row > 3 * self.order - 2) and col < 2 and len(self.spins[row]) > 2:
if col == 0:
# top left corner
if row < 2 * (self.order - 1):
return np.asarray([self.spins[row + 1][1],
self.spins[row][1],
self.spins[row][len(self.spins[row]) - 1]])
# bottom left corner
else:
return np.asarray([self.spins[row - 1][1],
self.spins[row][1],
self.spins[row][len(self.spins[row]) - 1]])
else:
# top left corner
if row < 2 * (self.order - 1):
return np.asarray([self.spins[row][0],
self.spins[row - 1][0],
self.spins[row + 1][2]])
# bottom left corner
else:
return np.asarray([self.spins[row][0],
self.spins[row - 1][2],
self.spins[row + 1][0]])
# annoying right corner atoms
elif (row < self.order or row > 3 * self.order - 2) and col > len(self.spins[row]) - 3 and len(self.spins[row]) > 2:
if not col % 2 == 0:
# top right corner
if row < 2 * (self.order - 1):
try:
return np.asarray([self.spins[row + 1][col + 1],
self.spins[row][col - 1],
self.spins[row][0]])
except IndexError:
return np.asarray([self.spins[row + 1][col],
self.spins[row][col - 1],
self.spins[row][0]])
# bottom right corner
else:
try:
return np.asarray([self.spins[row - 1][col + 1],
self.spins[row][col - 1],
self.spins[row][0]])
except IndexError:
return np.asarray([self.spins[row - 1][col],
self.spins[row][col - 1],
self.spins[row][0]])
else:
# top right corner
if row < 2 * (self.order - 1):
return np.asarray([self.spins[row][col + 1],
self.spins[row - 1][col - 1],
self.spins[row + 1][col]])
# bottom right corner
else:
return np.asarray([self.spins[row][col + 1],
self.spins[row - 1][col + 1],
self.spins[row + 1][col - 1]])
# top
elif row == 0:
if col == 0:
return np.asarray([self.spins[0][1],
self.spins[1][1],
self.spins[4 * self.order - 2][0]])
else:
return np.asarray([self.spins[0][0],
self.spins[1][2],
self.spins[4 * self.order - 2][1]])
# bottom
# don't have to check for anything, only option remaining
elif col == 0:
return np.asarray([self.spins[row][1],
self.spins[row - 1][1],
self.spins[0][0]])
else:
return np.asarray([self.spins[row][0],
self.spins[1][2],
self.spins[0][1]])
# calculates the energy of a single atom, using the Hamiltonian
def singleEnergy(self, row, col):
neighbours = self.neighbours(row, col)
selfSpin = self.spins[row][col]
return self.J * selfSpin * np.sum(np.sum(neighbours)) - self.h * selfSpin
# calculates the magnitude of the entire energy of the lattice
def totalEnergy(self):
energy = 0.0
for i in np.arange(len(self.spins)):
for j in np.arange(len(self.spins[i])):
energy += self.singleEnergy(i, j)
# to avoid counting pairs twice, divide by two
# divide by maximum possible energy to normalise
return -math.fabs(energy / ((3 * self.J + self.h) * (6 * self.order * self.order))) #( * (-3 * self.J - self.h)
# calculates the magnitude of the residual magnetic spin of the lattice
# normalise by dividing by order of lattice squared
def totalMag(self):
sum = 0
for i in np.arange(len(self.spins)):
sum += np.sum(self.spins[i])
return math.fabs(float(sum) / (6 * self.order ** 2))
def specHeat(self, energy, energySquared, temp):
return (energySquared - energy ** 2) * (1 / (self.order * self.order * 2 * temp * temp))
def suscep(self, mag, magSquared, temp):
return self.J * (magSquared - mag ** 2) * (1 / (self.order * self.order * 2 * temp))
# attempts to flip a random spin using the metropolis algorithm and the Boltzmann distribution
def tryFlip(self, row, col):
# energy change = -2 * E_initial
# so accept change if E_initial <= 0
energy = self.singleEnergy(row, col)
if energy <= 0 or np.random.random() <= math.exp(-self.beta * 2 * energy):
self.spins[row][col] *= -1
# closes plot window
def close_event(self):
plt.close() # timer calls this function after 3 seconds and closes the window
# plots a meshgrid of the initial and final spin lattices
def plotStartEndSpins(self, spinsList, iters=1000000):
for i in np.arange(2):
for j in spinsList[i]:
while len(j) < 2 * self.order:
j.insert(0, 8)
j.append(8)
cmap = colors.ListedColormap(['red', 'yellow', 'white'])
bounds = [-1, 0, 2, 10]
norm = colors.BoundaryNorm(bounds, cmap.N)
plt.subplots(nrows=1, ncols=2)
plt.tight_layout()
for i in np.arange(len(spinsList[0])):
spinsList[0][i] = np.asarray(spinsList[0][i])
for i in np.arange(len(spinsList[1])):
spinsList[1][i] = np.asarray(spinsList[1][i])
spinsList = np.array(spinsList)
plt.subplot(1,2,1)
plt.imshow(spinsList[0], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Initial Configuration')
plt.subplot(1, 2, 2)
plt.imshow(spinsList[1], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Final Configuration')
title = "Temperature (J/K_B) = {0}, J = {1}, h = {2}, Iterations = {3}".format(self.temp, self.J, self.h, iters) + "\n" + "Order: {0}".format(self.order,)
plt.suptitle(title)
# timer = fig.canvas.new_timer(
# interval=graphInterval) # creating a timer object and setting an interval of 3000 milliseconds
# timer.add_callback(self.close_event)
# timer.start()
plt.show()
# simulates the lattice at a constant temperature temp, for iters iterations, and returns the spin configurations
def basicIter(self, iters=1000000, temp=1, plot=False):
self.resetSpins()
spinsList = [copy.deepcopy(self.spins)]
self.temp = temp
self.beta = 1.0 / self.temp
for i in np.arange(iters + 1):
row = np.random.randint(4 * self.order - 1)
col = np.random.randint(len(self.spins[row]))
self.tryFlip(row, col)
spinsList.append(self.spins)
if plot:
self.plotStartEndSpins(spinsList, iters)
else:
for i in np.arange(len(spinsList[0])):
spinsList[0][i] = np.asarray(spinsList[0][i])
for i in np.arange(len(spinsList[1])):
spinsList[1][i] = np.asarray(spinsList[1][i])
spinsList = np.array(spinsList)
return spinsList
# simulates the lattice oer a temperature range tempRange, with itersPerTemp iterations per temperature
# plotProperties: plot the residual spin, total energy, susceptibility and specific heat
def tempRangeIter(self, tempRange=np.arange(start=1, stop=5, step=0.2), itersPerTemp=100000, plotProperties=False):
self.resetSpins()
# store the averages here
energyList = []
magList = []
specHeatList = []
suscepList = []
for temp in tqdm(tempRange):
self.beta = 1.0 / temp
#print("Calculating temp:", temp)
# allow to reach equilibrium
for i in np.arange(itersPerTemp + 1):
row =
|
np.random.randint(4 * self.order - 1)
|
numpy.random.randint
|
""" MuJoCo env wrappers. """
# Adapted from https://github.com/openai/baselines
import gym
import numpy as np
class RunningMeanVar:
""" Computes running mean and variance.
Args:
eps (float): a small constant used to initialize mean to zero and
variance to 1.
shape tuple(int): shape of the statistics.
"""
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, eps=1e-4, shape=()):
self.mean = np.zeros(shape)
self.var = np.ones(shape)
self.count = eps
def update(self, batch):
""" Updates the running statistics given a batch of samples. """
if not batch.shape[1:] == self.mean.shape:
raise ValueError(f"batch has invalid shape: {batch.shape}, "
f"expected shape {(None,) + self.mean.shape}")
batch_mean = np.mean(batch, axis=0)
batch_var = np.var(batch, axis=0)
batch_count = batch.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
""" Updates the running statistics given their new values on new data. """
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def update_mean_var_count_from_moments(mean, var, count,
batch_mean, batch_var, batch_count):
""" Updates running mean statistics given a new batch. """
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
new_var = (
var * (count / tot_count)
+ batch_var * (batch_count / tot_count)
+ np.square(delta) * (count * batch_count / tot_count ** 2))
new_count = tot_count
return new_mean, new_var, new_count
class Normalize(gym.Wrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
# pylint: disable=too-many-arguments
def __init__(self, env, obs=True, ret=True,
clipobs=10., cliprew=10., gamma=0.99, eps=1e-8):
super().__init__(env)
self.obs_rmv = (RunningMeanVar(shape=self.observation_space.shape)
if obs else None)
self.ret_rmv = RunningMeanVar(shape=()) if ret else None
self.clipob = clipobs
self.cliprew = cliprew
self.ret = np.zeros(getattr(self.env.unwrapped, "nenvs", 1))
self.gamma = gamma
self.eps = eps
def observation(self, obs):
""" Preprocesses a given observation. """
if not self.obs_rmv:
return obs
rmv_batch = (
|
np.expand_dims(obs, 0)
|
numpy.expand_dims
|
import ioplin
import efficientnet
import tensorflow as tf
import os
import numpy as np
import PIL
from sklearn.metrics import roc_auc_score, roc_curve
import argparse
here = os.path.abspath(os.path.dirname(os.getcwd()))
def getData(type='train'):
y = []
path = os.path.join(here,'miniset',type)
for root,dirs,files in os.walk(path):
num = len(files)
j = 0
for name in files:
if name != 'groundtruth.txt':
p = os.path.join(path, name)
img_o = PIL.Image.open(p)
img = np.array(img_o,'uint8')
img = img.reshape((img.shape[0],img.shape[1],1))
if j == 0:
img_matrix =
|
np.zeros((num-1,img.shape[0],img.shape[1],1),'uint8')
|
numpy.zeros
|
'''
Computes a bunch of estimates from an event study data set:
- AKM variance decomposition
- Andrews bias correction
- KSS bias correction
Does this through class FEEstimator
'''
import warnings
from pathlib import Path
import pyamg
import numpy as np
import pandas as pd
from bipartitepandas import update_dict, logger_init
from scipy.sparse import csc_matrix, coo_matrix, diags, linalg
import time
# import pyreadr
import os
from multiprocessing import Pool, TimeoutError, set_start_method
from timeit import default_timer as timer
import itertools
import pickle
import time
import json
import glob, sys
# Try to use tqdm
try:
from tqdm import tqdm, trange
except ImportError:
trange = range
# def pipe_qcov(df, e1, e2): # FIXME I moved this from above, also this is used only in commented out code
# v1 = df.eval(e1)
# v2 = df.eval(e2)
# return np.cov(v1, v2)[0][1]
class FEEstimator:
'''
Uses multigrid and partialing out to solve two way fixed effect model.
@ FIXME I think delete everything below this, it's basically contained in the class/functions within the class
takes as an input this adata
and creates associated A = [J W] matrix which are AKM dummies
provides methods to do A x Y but also (A'A)^-1 A'Y solve method
'''
def __init__(self, data, params):
'''
Arguments:
data (Pandas DataFrame): cross-section labor data. Data contains the following columns:
i (worker id)
j1 (firm id 1)
j2 (firm id 2)
y1 (compensation 1)
y2 (compensation 2)
t1 (last period of observation 1)
t2 (last period of observation 2)
w1 (weight 1)
w2 (weight 2)
m (0 if stayer, 1 if mover)
cs (0 if not in cross section, 1 if in cross section)
params (dict): dictionary of parameters for FE estimation
Dictionary parameters:
ncore (int, default=1): number of cores to use
batch (int, default=1): batch size to send in parallel
ndraw_pii (int, default=50): number of draws to use in approximation for leverages
levfile (str, default=''): file to load precomputed leverages`
ndraw_tr (int, default=5): number of draws to use in approximation for traces
he (bool, default=False): if True, compute heteroskedastic correction
out (str, default='res_fe.json'): outputfile where results are saved
statsonly (bool, default=False): if True, return only basic statistics
feonly (bool, default=False): if True, compute only fixed effects and not variances
Q (str, default='cov(alpha, psi)'): which Q matrix to consider. Options include 'cov(alpha, psi)' and 'cov(psi_t, psi_{t+1})'
seed (int, default=None): NumPy RandomState seed
'''
# Start logger
logger_init(self)
# self.logger.info('initializing FEEstimator object')
self.adata = data
try:
self.adata.sort_values(['i', 't'], inplace=True)
except KeyError:
self.adata.sort_values(['i', 't1'], inplace=True)
# Define default parameter dictionaries
default_params = {
'ncore': 1, # Number of cores to use
'batch': 1, # Batch size to send in parallel
'ndraw_pii': 50, # Number of draws to use in approximation for leverages
'levfile': '', # File to load precomputed leverages
'ndraw_tr': 5, # Number of draws to use in approximation for traces
'he': False, # If True, compute heteroskedastic correction
'out': 'res_fe.json', # Outputfile where results are saved
'statsonly': False, # If True, return only basic statistics
'feonly': False, # If True, compute only fixed effects and not variances
'Q': 'cov(alpha, psi)', # Which Q matrix to consider. Options include 'cov(alpha, psi)' and 'cov(psi_t, psi_{t+1})'
# 'con': False, # Computes the smallest eigen values, this is the filepath where these results are saved FIXME not used
# 'logfile': '', # Log output to a logfile FIXME not used
# 'check': False # If True, compute the non-approximated estimates as well FIXME not used
'seed': None # np.random.RandomState() seed
}
self.params = update_dict(default_params, params)
self.res = {} # Results dictionary
self.summary = {} # Summary results dictionary
# Save some commonly used parameters as attributes
self.ncore = self.params['ncore'] # Number of cores to use
self.ndraw_pii = self.params['ndraw_pii'] # Number of draws to compute leverage
self.ndraw_trace = self.params['ndraw_tr'] # Number of draws to compute heteroskedastic correction
self.compute_he = self.params['he']
# Store some parameters in results dictionary
self.res['cores'] = self.ncore
self.res['ndp'] = self.ndraw_pii
self.res['ndt'] = self.ndraw_trace
# Create NumPy Generator instance
self.rng = np.random.default_rng(self.params['seed'])
# self.logger.info('FEEstimator object initialized')
def __getstate__(self):
'''
Defines how the model is pickled.
'''
odict = {k: self.__dict__[k] for k in self.__dict__.keys() - {'ml'}}
return odict
def __setstate__(self, d):
'''
Defines how the model is unpickled.
Arguments:
d (dict): attribute dictionary
'''
# Need to recreate the simple model and the search representation
self.__dict__ = d # Make d the attribute dictionary
self.ml = pyamg.ruge_stuben_solver(self.M)
@staticmethod
def __load(filename):
'''
Load files for heteroskedastic correction.
Arguments:
filename (string): file to load
Returns:
fes: loaded file
'''
fes = None
with open(filename, 'rb') as infile:
fes = pickle.load(infile)
return fes
def __save(self, filename):
'''
Save FEEstimator class to filename as pickle.
Arguments:
filename (string): filename to save to
'''
with open(filename, 'wb') as outfile:
pickle.dump(self, outfile)
def fit_1(self):
'''
Run FE solver, part 1. Before fit_2(), modify adata to allow creation of Q matrix.
'''
self.start_time = time.time()
# Begin cleaning and analysis
self.__prep_vars() # Prepare data
self.__prep_JWM() # Use cleaned adata to generate some attributes
self.__compute_early_stats() # Use cleaned data to compute some statistics
def fit_2(self):
'''
Run FE solver, part 2.
'''
if self.params['statsonly']: # If only returning early statistics
self.__save_early_stats()
else: # If running analysis
self.__create_fe_solver() # Solve FE model
self.__get_fe_estimates() # Add fixed effect columns
if not self.params['feonly']: # If running full model
self.__compute_trace_approximation_ho() # Compute trace approximation
# If computing heteroskedastic correction
if self.compute_he:
self.__compute_leverages_Pii() # Solve heteroskedastic model
self.__compute_trace_approximation_he() # Compute trace approximation
self.__collect_res() # Collect all results
end_time = time.time()
self.res['total_time'] = end_time - self.start_time
del self.start_time
self.__save_res() # Save results to json
self.__drop_cols() # Drop irrelevant columns
self.logger.info('------ DONE -------')
def __prep_vars(self):
'''
Generate some initial class attributes and results.
'''
self.logger.info('preparing the data')
self.nf = self.adata.n_firms() # Number of firms
self.nw = self.adata.n_workers() # Number of workers
self.nn = len(self.adata) # Number of observations
self.logger.info('data firms={} workers={} observations={}'.format(self.nf, self.nw, self.nn))
self.res['n_firms'] = self.nf
self.res['n_workers'] = self.nw
self.res['n_movers'] = len(np.unique(self.adata[self.adata['m'] == 1]['i']))
self.res['n_stayers'] = self.res['n_workers'] - self.res['n_movers']
self.logger.info('data movers={} stayers={}'.format(self.res['n_movers'], self.res['n_stayers']))
# Prepare 'cs' column (0 if observation is first for a worker, 1 if intermediate, 2 if last for a worker)
worker_first_obs = (self.adata['i'] != self.adata['i'].shift(1))
worker_last_obs = (self.adata['i'] != self.adata['i'].shift(-1))
self.adata['cs'] = 1
self.adata.loc[(worker_first_obs) & ~(worker_last_obs), 'cs'] = 0
self.adata.loc[(worker_last_obs) & ~(worker_first_obs), 'cs'] = 2
#res['year_max'] = int(sdata['year'].max())
#res['year_min'] = int(sdata['year'].min())
def __prep_JWM(self):
'''
Generate J, W, and M matrices.
'''
# Matrices for the cross-section
J = csc_matrix((np.ones(self.nn), (self.adata.index, self.adata['j'])), shape=(self.nn, self.nf)) # Firms
J = J[:, range(self.nf - 1)] # Normalize one firm to 0
self.J = J
W = csc_matrix((np.ones(self.nn), (self.adata.index, self.adata['i'])), shape=(self.nn, self.nw)) # Workers
self.W = W
if 'w' in self.adata.columns:
# Diagonal weight matrix
Dp = diags(self.adata['w'])
# Dwinv = diags(1.0 / ((W.T @ Dp @ W).diagonal())) # linalg.inv(csc_matrix(W.T @ Dp @ W))
else:
# Diagonal weight matrix - all weight one
Dp = diags(np.ones(len(self.adata)))
Dwinv = diags(1.0 / ((W.T @ Dp @ W).diagonal()))
self.Dp = Dp
self.Dp_sqrt = np.sqrt(Dp)
self.Dwinv = Dwinv
self.logger.info('Prepare linear solver')
# Finally create M
M = J.T @ Dp @ J - J.T @ Dp @ W @ Dwinv @ W.T @ Dp @ J
self.M = M
self.ml = pyamg.ruge_stuben_solver(M)
# Save time variable
self.last_invert_time = 0
def __weighted_quantile(self, values, quantiles, sample_weight=None, values_sorted=False, old_style=False): # FIXME was formerly a function outside the class
'''
Very close to numpy.percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
Arguments:
values (NumPy Array): data
quantiles (array-like): quantiles to compute
sample_weight (array-like): weighting, must be same length as `array` (is `array` supposed to be quantiles?)
values_sorted (bool): if True, skips sorting of initial array
old_style (bool): if True, changes output to be consistent with numpy.percentile
Returns:
(NumPy Array): computed quantiles
'''
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), \
'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
def __weighted_var(self, v, w): # FIXME was formerly a function outside the class
'''
Compute weighted variance.
Arguments:
v: vector to weight
w: weights
Returns:
v0: weighted variance
'''
m0 = np.sum(w * v) / np.sum(w)
v0 = np.sum(w * (v - m0) ** 2) / np.sum(w)
return v0
def __weighted_cov(self, v1, v2, w): # FIXME was formerly a function outside the class
'''
Compute weighted covariance.
Arguments:
v1: vector to weight
v2: vector to weight
w: weights
Returns:
v0: weighted variance
'''
m1 = np.sum(w * v1) / np.sum(w)
m2 = np.sum(w * v2) / np.sum(w)
v0 = np.sum(w * (v1 - m1) * (v2 - m2)) / np.sum(w)
return v0
def __compute_early_stats(self):
'''
Compute some early statistics.
'''
fdata = self.adata.groupby('j').agg({'m': 'sum', 'y': 'mean', 'i': 'count'})
self.res['mover_quantiles'] = self.__weighted_quantile(fdata['m'], np.linspace(0, 1, 11), fdata['i']).tolist()
self.res['size_quantiles'] = self.__weighted_quantile(fdata['i'], np.linspace(0, 1, 11), fdata['i']).tolist()
self.res['between_firm_var'] = self.__weighted_var(fdata['y'], fdata['i'])
self.res['var_y'] = self.__weighted_var(self.adata['y'], self.Dp)
self.logger.info('total variance: {:0.4f}'.format(self.res['var_y']))
# extract woodcock moments using sdata and jdata
# get averages by firms for stayers
#dsf = adata.query('cs==1').groupby('j1').agg(y1sj=('y1','mean'), nsj=('y1','count'))
#ds = pd.merge(adata.query('cs==1'), dsf, on="j1")
#ds.eval("y1s_lo = (nsj * y1sj - y1) / (nsj - 1)",inplace=True)
#res['woodcock_var_psi'] = ds.query('nsj > 1').pipe(pipe_qcov, 'y1', 'y1s_lo')
#res['woodcock_var_alpha'] = np.minimum( jdata.pipe(pipe_qcov, 'y1','y2'), adata.query('cs==1')['y1'].var() - res['woodcock_var_psi'] )
#res['woodcock_var_eps'] = adata.query('cs==1')['y1'].var() - res['woodcock_var_alpha'] - res['woodcock_var_psi']
#self.logger.info("[woodcock] var psi = {}", res['woodcock_var_psi'])
#self.logger.info("[woodcock] var alpha = {}", res['woodcock_var_alpha'])
#self.logger.info("[woodcock] var eps = {}", res['woodcock_var_eps'])
def __save_early_stats(self):
'''
Save the early statistics computed in compute_early_stats().
'''
with open(self.params['out'], 'w') as outfile:
json.dump(self.res, outfile)
self.logger.info('saved results to {}'.format(self.params['out']))
self.logger.info('--statsonly was passed as argument, so we skip all estimation.')
self.logger.info('------ DONE -------')
# sys.exit() # FIXME I don't think this is necessary (does it even work?) since this is now a class object
def construct_Q(self):
'''
Generate columns in adata necessary to construct Q.
'''
if self.params['Q'] == 'cov(alpha, psi)':
# Which rows to select
self.adata['Jq'] = 1
self.adata['Wq'] = 1
# Rows for csc_matrix
self.adata['Jq_row'] = self.adata['Jq'].cumsum() - 1
self.adata['Wq_row'] = self.adata['Wq'].cumsum() - 1
# Columns for csc_matrix
self.adata['Jq_col'] = self.adata['j']
self.adata['Wq_col'] = self.adata['i']
elif self.params['Q'] == 'cov(psi_t, psi_{t+1})':
self.adata['Jq'] = (self.adata['m'] == 1) & ((self.adata['cs'] == 0) | (self.adata['cs'] == 1))
self.adata['Jq_row'] = self.adata['Jq'].cumsum() - 1
self.adata['Jq_col'] = self.adata['j']
self.adata['Wq'] = (self.adata['m'] == 1) & ((self.adata['cs'] == 1) | (self.adata['cs'] == 2))
self.adata['Wq_row'] = self.adata['Wq'].cumsum() - 1
self.adata['Wq_col'] = self.adata['j']
elif self.params['Q'] == 'cov(psi_i, psi_j)': # Code doesn't work
self.adata['Jq'] = (self.adata['m'] == 1) & (self.adata['cs'] == 1)
self.adata['Jq_row'] = self.adata['j1']
self.adata['Jq_col'] = self.adata['j1']
self.adata['Wq'] = (self.adata['m'] == 1) & (self.adata['cs'] == 0)
# Recall j1, j2 swapped for m==1 and cs==0
self.adata['Wq_row'] = self.adata['j2']
self.adata['Wq_col'] = self.adata['j1']
def __construct_Jq_Wq(self):
'''
Construct Jq and Wq matrices.
Returns:
Jq (Pandas DataFrame): left matrix for computing Q
Wq (Pandas DataFrame): right matrix for computing Q
'''
# Construct Jq, Wq matrices
Jq = self.adata[self.adata['Jq'] == 1].reset_index(drop=True)
self.Yq = Jq['y']
nJ = len(Jq)
nJ_row = Jq['Jq_row'].max() + 1 # FIXME len(Jq['Jq_row'].unique())
nJ_col = Jq['Jq_col'].max() + 1 # FIXME len(Jq['Jq_col'].unique())
Jq = csc_matrix((np.ones(nJ), (Jq['Jq_row'], Jq['Jq_col'])), shape=(nJ_row, nJ_col))
if nJ_col == self.nf: # If looking at firms, normalize one to 0
Jq = Jq[:, range(self.nf - 1)]
Wq = self.adata[self.adata['Wq'] == 1].reset_index(drop=True)
nW = len(Wq)
nW_row = Wq['Wq_row'].max() + 1 # FIXME len(Wq['Wq_row'].unique())
nW_col = Wq['Wq_col'].max() + 1 # FIXME len(Wq['Wq_col'].unique())
Wq = csc_matrix((np.ones(nW), (Wq['Wq_row'], Wq['Wq_col'])), shape=(nW_row, nW_col)) # FIXME Should we use nJ because require Jq, Wq to have the same size?
# if nW_col == self.nf: # If looking at firms, normalize one to 0
# Wq = Wq[:, range(self.nf - 1)]
return Jq, Wq
def __create_fe_solver(self):
'''
Solve FE model.
'''
self.Y = self.adata['y']
# try to pickle the object to see its size
# self.save('tmp.pkl') # FIXME should we delete these 2 lines?
self.logger.info('extract firm effects')
self.psi_hat, self.alpha_hat = self.__solve(self.Y)
self.logger.info('solver time {:2.4f} seconds'.format(self.last_invert_time))
self.logger.info('expected total time {:2.4f} minutes'.format( (self.ndraw_trace * (1 + self.compute_he) + self.ndraw_pii * self.compute_he) * self.last_invert_time / 60))
self.E = self.Y - self.__mult_A(self.psi_hat, self.alpha_hat)
self.res['solver_time'] = self.last_invert_time
fe_rsq = 1 - np.power(self.E, 2).mean() /
|
np.power(self.Y, 2)
|
numpy.power
|
"""Provide basic models to build up image pipelines."""
import cv2
import numpy as np
from . import helper
class Canvas2:
"""
The superclass for any single-channel image elements.
:self.attribute canvas: a numpy image matrix of the same size as the image to be processed.
"""
def __init__(self, img):
"""
set the self.img as a black image of the same size as img.
:param img: the feature image numpy matrix
"""
self.img = img
self.canvas = np.zeros_like(img)
def show_layer(self, key=False):
if key:
helper.image_show(self.canvas)
def save_layer(self, name="image", suffix = ".jpg",path="./"):
img_name = helper.image_save(self.canvas, name,suffix, path)
print("image saved as", img_name)
return self
def img_normalization(self):
self.canvas = helper.image_normalization(self.canvas)
def __and__(self, other):
"""Return the bitwise-and result of 2 image matrices"""
self.canvas = helper.image_normalization(cv2.bitwise_and(self.canvas, other.canvas))
return self.canvas
def __or__(self, other):
"""Return the bitwise-or result of 2 image matrices"""
self.canvas = helper.image_normalization(cv2.bitwise_or(self.canvas, other.canvas))
return self.canvas
def __xor__(self, other):
"""Return the bitwise-xor result of 2 images matrices"""
self.canvas = helper.image_normalization(cv2.bitwise_xor(self.canvas, other.canvas))
return self.canvas
def __add__(self, other):
"""Combine the 2 image features by setting them to 2 color channels."""
self.canvas = helper.image_normalization(
np.stack((self.canvas, other.canvas, np.zeros_like(self.canvas)), axis=2))
return self.canvas
class Canvas3(Canvas2):
"""
The superclass for any three-channel image elements.
:self.attribute canvas: an image matrix of the same size as the image to be processed.
:self.attribute canvas3: the original 3-channel BGR image matrix.
"""
def Canvas2GRAY(self):
"""Turn the Canvas to gray scale image."""
self.canvas = cv2.cvtColor(self.canvas, cv2.COLOR_BGR2GRAY).cvtColor(self.canvas, cv2.COLOR_GRAY2BGR)
return self
def __add__(self, other):
"""Combine the 2 image features by setting them to 2 color channels."""
self.canvas = helper.image_normalization(
np.stack((self.canvas[0], other.canvas[0], np.zeros_like(self.canvas[0])), axis=2))
return self.canvas
class ImgFeature2(Canvas2):
"""
The class takes in a single-channel image matrix and can do multiple openCV operations on the image.
:self.attribute canvas: the feature image(single channel)
:self.attribute img: the original image(single channel)
"""
def __init__(self, img):
"""
The initialization takes in an image matrix.
For 2-dim images, format must be GRAY;
for 3-dim images, format must be BGR.
:param img: 2-dim image matrix
"""
Canvas2.__init__(self, img)
self.img = img
self.canvas = img.copy()
def binary_threshold(self, thresholds=(0, 255), show_key=False):
"""Create a binary image, in which 0 refers to the region within the thresholds. """
self.canvas = helper.image_normalization((self.canvas > thresholds[0]) & (self.canvas < thresholds[1]))
self.show_layer(show_key)
return self
def gaussian_blur(self, sigma=10, k_size=(3, 3), show_key=False):
"""Use a Gaussian Kernel to blur the image"""
self.canvas = cv2.GaussianBlur(self.canvas, k_size, sigma)
self.show_layer(show_key)
return self
def sobel_convolute(self, method, k_size=3, show_key=False):
"""
Use a Sobel kernel to calculate the derivative of the image.
"""
img_gray = self.canvas
if method == 'x':
self.canvas = np.absolute(cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=k_size))
elif method == 'y':
self.canvas = np.absolute(cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=k_size))
elif method == 'dir':
dx_img_sobel = np.absolute(cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=k_size))
dy_img_sobel = np.absolute(cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=k_size))
self.canvas = np.arctan2(np.absolute(dy_img_sobel), np.absolute(dx_img_sobel))
else:
dx_img_sobel = np.absolute(cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=k_size))
dy_img_sobel = np.absolute(cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=k_size))
self.canvas = np.sqrt(np.square(dx_img_sobel) + np.square(dy_img_sobel))
# self.img_normalization()
self.show_layer(show_key)
return self
def canny_detection(self, threshold_low=0, threshold_high=255, apertureSize=3, show_key=False):
"""
Apply a canny detector the the image.
:param threshold_low: integer 0~255, the lower threshold for canny detection
:param threshold_high: integer 0~255, the higher threshold for canny detection
"""
self.canvas = cv2.Canny(self.canvas[:, :, 0], threshold_low, threshold_high, apertureSize=apertureSize)
self.show_layer(show_key)
return self
class ImgFeature3(Canvas3, ImgFeature2):
"""
The class takes in a three-channel image matrix and append some channel generating image_functions
to the ImageFeature2 superclass.
:self.attribute img: the feature image(single channel)
"""
def __init__(self, img):
ImgFeature2.__init__(self, img)
def binary_threshold(self, thresholds=((0, 0, 0), (255, 255, 255)), show_key=False):
"""For 3-channel images, thresholds can be tuples."""
return ImgFeature2.binary_threshold(self, thresholds, show_key)
def channel_selection(self, label, show_key=False):
"""
Get the specified channel image.
:param label: Supported labels:
('R', 'G', 'B', 'H', 'L', 'S')
"""
if label.upper() == 'GRAY':
self.canvas = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
elif label.upper() in 'BGR':
self.canvas = self.img[:, :, 'BGR'.index(label)]
elif label.upper() in 'HLS':
self.canvas = cv2.cvtColor(self.img, cv2.COLOR_BGR2HLS)[:, :, 'HLS'.index(label)]
else:
print("Sorry but this channel is not supported, return GRAY Scale instead.")
self.canvas = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.canvas = cv2.cvtColor(self.canvas, cv2.COLOR_GRAY2BGR)
self.img_normalization()
self.show_layer(show_key)
return self
def canny_detection(self, threshold_low=0, threshold_high=255, apertureSize=3, show_key=False):
"""
Apply a canny detector the the image.
:param threshold_low: integer 0~255, the lower threshold for canny detection
:param threshold_high: integer 0~255, the higher threshold for canny detection
"""
ImgFeature2.canny_detection(self, threshold_low, threshold_high, apertureSize=apertureSize)
self.canvas = cv2.cvtColor(self.canvas, cv2.COLOR_GRAY2BGR)
self.show_layer(show_key)
return self
class ImgMask2(Canvas2):
"""
Create an binary image mask using different kinds of edge extraction techniques.
:self.attribute img: an image mask matrix of the same size as the image to be processed.
"""
def geometrical_mask(self, vertices, show_key=False):
"""
mask out the region outside of the vertices.
:param vertices: numpy matrix of vertices, size: num_vertices x num_edges x 2
"""
mask = np.zeros_like(self.canvas) # defining a blank mask to start with
ignore_mask_color = 255
vertices = np.array([vertices,], np.int32)
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
self.canvas = mask
if show_key:
self.show_layer(show_key)
return self
def fill_region(self, fill_x, fill_y, color=255, show_key=True):
"""
Fill in a certain region with the color designated.
:param fill_x: the x coordinates of pixels to be filled in
:param fill_y: the y coordinates of pixels to be filled in
:param color: the color of the fill-in
"""
self.canvas[fill_x, fill_y] = color
if show_key:
self.show_layer(show_key)
return self
def straight_lines(self, lines, color=(255, 255, 255), thickness=6, show_key=False):
"""Create a mask with lines drawn with the parameters provided."""
self.canvas = helper.draw_lines(self.canvas, lines, color, thickness)
if show_key:
self.show_layer(show_key)
return self
def curves(self, params, color=(255, 255, 255), thickness=3, show_key=False):
"""
Create a mask with curves draen with the parameters provided
:param params: the parameters of the curve, tuple(degree_of_curve+1)
:param color: color of the curve, tuple(3)
:param thickness: thickness of the curve, float
:param show_key: bool
"""
self.canvas = helper.draw_multinomials(self.canvas, params, color, thickness)
if show_key:
self.show_layer(show_key)
return self
def polylines(self, vertices, closed=False, color=(255,255,255), thickness=3, show_key=False):
self.canvas = helper.draw_polylines(self.canvas, vertices, closed, color, thickness)
if show_key:
self.show_layer(show_key)
return self
def puttext(self, text, position=(30, 60), size=1.5, color=(255,255,255), thickness=4, show_key=False):
"""
Put some text onto the image.
:param text: string, the text string
:param position: tuple[2], gives the position of the left-bottom corner
:param size: uint, the size of the text
:param color: tuple[3], BGR value of the color the text
:param thickness: thickness of text lines
"""
cv2.putText(self.canvas, text, position, cv2.FONT_HERSHEY_SIMPLEX, size, color, thickness)
if show_key:
self.show_layer(show_key)
return self
class ImgMask3(Canvas3, ImgMask2):
"""Image mask in 3 channels"""
def __init__(self, img):
ImgMask2.__init__(self, img)
def geometrical_mask(self, vertices, ignore_mask_color=(0,255,0), show_key=False):
"""
mask out the region outside of the vertices.
:param vertices: numpy matrix of vertices, size: num_vertices x num_edges x 2
"""
vertices = np.array([vertices,], np.int32)
if len(self.img.shape) == 2:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(self.canvas, vertices, ignore_mask_color)
self.show_layer(show_key)
return self
def fill_region(self, fill_x, fill_y, color=(0, 0, 255), show_key=False):
"""
Fill in a certain region with the color designated.
:param fill_x: the x coordinates of pixels to be filled in
:param fill_y: the y coordinates of pixels to be filled in
:param color: the color of the fill-in
"""
ImgMask2.fill_region(self, fill_x, fill_y, color, show_key)
return self
def straight_lines(self, lines, color_BGR=(0, 0, 255), thickness=6, show_key=False):
"""Create a mask with lines drawn with the parameters provided."""
return ImgMask2.straight_lines(self, lines, color_BGR, thickness, show_key)
def curves(self, params, color_BGR=(0, 255, 0), thickness=3, show_key=False):
"""
Create a mask with curves draen with the parameters provided
:param params: the parameters of the curve, tuple(degree_of_curve+1)
:param color: color of the curve, tuple(3)
:param thickness: thickness of the curve, float
:param show_key: bool
"""
return ImgMask2.curves(self, params, color_BGR, thickness, show_key)
class FeatureCollector:
"""
Collects a list of features extracted from a single image.
Use them for showing, combination, or simply acts as a pipeline.
:self.attribute img: the BGR or GRAY image matrix
:self.attribute layers_dict: list of image_feature instance
:self.attribute color_model: the color model of image
:self.attribute cameraMtx: camera matrix for calibration
:self.attribute dist_coef: distortion coefficients for calibration
"""
def __init__(self, img, color_model='BGR', calibrators=(0, 0, 0, np.eye(3))):
"""
The initialization takes in an image matrix.
Acceptable formats including:
GRAY scale
all validate color formats supported by openCV
Images would be in default stored as uint8 format in BGR or GRAY.
If the format is not BGR for a 3-dim image, a [format] must be assigned.
:param img: 2-dim or 3-dim image matrix
:param color_model: labels among: BAYER_BG, HLS, HSV, LAB, RGB, BGR, GRAY...
:param calibrators: calibration parameters list following the order(number of
chessboard images fed, Camera Matrix, Distortion Coefficient, Warp Matrix)
"""
self.img = helper.image_normalization(img)
self.img_processed = self.img.copy()
self.layers_dict = {}
# self.add_layer('main_canvas', "mask")
self.calibrators = {"number_of_img": calibrators[0], "CamMtx": calibrators[1],
"DistCoe": calibrators[2], "WarpMtx": calibrators[3]}
if len(self.img.shape) == 2:
self.color_model = 'GRAY'
elif color_model != 'BGR':
l_valid_color_format = [key for key in cv2.__dict__.keys()
if key.startswith('COLOR')
and key.endswith('2BGR')
and len(key.split('_')) == 2]
if color_model in l_valid_color_format:
cvt_method = "cv2.COLOR_" + color_model + "2BGR"
self.img = cv2.cvtColor(self.img, eval(cvt_method))
else:
print('Unknown color model, please manually transfer to BGR.')
self.color_model = 'BGR'
self.add_layer("main", "feature")
def image_reload(self, img):
"""
Reload an image into the instance. Calibrators, color_model would be keeped.
:param img:
"""
self.img = helper.image_normalization(img)
self.img_processed = self.img.copy()
self.layers_dict = {}
self.add_layer("main", "feature")
return self
def add_layer(self, key='layer', type='feature', use_calibrated=False, layer=None):
"""Add a new key:ImgFeature/ImgMask instance to the self.layers_dict."""
if key == 'layer':
key = 'layer_' + str(len(self.layers_dict))
if layer is not None:
self.layers_dict[key] = layer
else:
if use_calibrated and self.layers_dict["calibrated"] is not None:
img_template = self.layers_dict["calibrated"].img
else:
img_template = self.img
if type == 'feature':
if self.color_model == "GRAY":
self.layers_dict[key] = ImgFeature2(img_template)
else:
self.layers_dict[key] = ImgFeature3(img_template)
else:
if self.color_model == "GRAY":
self.layers_dict[key] = ImgMask2(img_template)
else:
self.layers_dict[key] = ImgMask3(img_template)
def get_chessboard_calibrators(self, chessboard_imgs_list, num_x=2, num_y=2, show_key=False):
"""
Get calibrators using a chessboard image and the specified number of corners.
By inputting an image, which is laid on the surface on which the features are,
and shot by the original camera lens, this function would calculate out the parameters
for both image undistortion and perspective transformation.
:param chessboard_imgs_list: A list of chess board images
:param corners_number: The number of corners on chessboard in x and y directions
"""
obj_points = []
img_points = []
number_of_imgs = 0
objp = np.zeros((1, num_x * num_y, 3), np.float32)
objp[0, :, :2] = np.mgrid[0:num_x, 0:num_y].T.reshape(-1, 2)
for chessboard_img in chessboard_imgs_list:
chessboard_img_gray = cv2.cvtColor(chessboard_img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(chessboard_img_gray, (num_x, num_y), cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
if ret:
number_of_imgs += 1
obj_points.append(objp)
corners2 = cv2.cornerSubPix(chessboard_img_gray, corners, (11, 11), (-1, -1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
img_points.append(corners2)
else:
print("Unable to detect corners for this image!")
ret, mtx, dist, _, _ = cv2.calibrateCamera(obj_points, img_points,
chessboard_img_gray.shape[::-1], None, None)
self.calibrators["number_of_img"] = number_of_imgs
self.calibrators["CamMtx"] = mtx
self.calibrators["DistCoe"] = dist
i = 0
for chessboard_img in chessboard_imgs_list[::5]:
i+=1
chessboard_img_gray = cv2.cvtColor(chessboard_img, cv2.COLOR_BGR2GRAY)
chessboard_undistorted_gray = cv2.undistort(chessboard_img_gray, mtx, dist)
# helper.image_save(chessboard_img, "chessboard"+str(i))
# helper.image_save(chessboard_undistorted_gray, "chessboard_undistorted"+str(i))
if show_key:
helper.image_show(chessboard_undistorted_gray)
def get_warp_params(self, src_vertices):
"""
Get the warp matrix with a source vertice.
:param src_vertices:
:return:
"""
y_m, x_m, _ = self.img.shape
margin = 10
dst = np.array([[margin, y_m - margin], [x_m - margin, y_m - margin], [x_m - margin, margin], [margin, margin]],
dtype=np.float32)
warp_mtx = cv2.getPerspectiveTransform(src_vertices, dst)
self.calibrators["WarpMtx"] = warp_mtx
def undistort(self, key="img"):
"""
Undistort the image using the provided parameters.
If key == "img", the undistorted will be stored under the key "calibrated",
else, only the img_processed will be undistorted.
"""
if self.calibrators["number_of_img"] == 0:
print("Please calibrate with a chessboard image first. Undistortion will not be conducted. ")
return self
if key.lower()=="img":
img_undistorted = cv2.undistort(self.img, self.calibrators["CamMtx"], self.calibrators["DistCoe"])
try:
self.layers_dict["calibrated"].img = img_undistorted
except:
self.layers_dict["calibrated"] = ImgFeature3(img_undistorted)
else:
img_undistorted = cv2.undistort(self.img_processed, self.calibrators["CamMtx"], self.calibrators["DistCoe"])
self.img_processed = img_undistorted
return self
def warp(self, key="img", reverse=False):
"""
Warp the image using a perspective transformation matrix.
If key == "img", the unwarped will be stored under the key "calibrated",
else, unwarp the designated key.
:param key: the content to be warped
"""
WarpMtx = self.calibrators["WarpMtx"]
if reverse:
WarpMtx = np.linalg.inv(WarpMtx)
if key.lower()=="img":
try:
img_warped = cv2.warpPerspective(self.layers_dict["calibrated"].img, WarpMtx, self.img.shape[1::-1])
self.layers_dict["calibrated"].img = img_warped
except:
img_warped = cv2.warpPerspective(self.img, WarpMtx, self.img.shape[1::-1])
self.layers_dict["calibrated"] = ImgFeature3(img_warped)
elif key.lower()=="img_processed":
img_warped = cv2.warpPerspective(self.img_processed, WarpMtx, self.img.shape[1::-1])
else:
try:
img_warped = cv2.warpPerspective(self.layers_dict[key].canvas, WarpMtx, self.img.shape[1::-1])
self.layers_dict[key].canvas = img_warped
except:
print("Invalid Keys in warpping! Return warpping of original image instead")
img_warped = cv2.warpPerspective(self.img, WarpMtx, self.img.shape[1::-1])
self.img_processed = img_warped
# self.image_save("image_warped")
return self
def image_show(self, show_key=True):
if show_key:
helper.image_show(self.img_processed)
def image_save(self, name="image", suffix=".jpg", path="./"):
img_name = helper.image_save(self.img_processed, name, suffix, path)
print("image saved as", img_name)
def combine(self, key1, key2, method='and', parameter=(1, 0.5, 0)):
"""
Return the Combination of 2 features in the self.layers_dict according to the method.
:param key1, key2: The keys of canvases to be combined.
:param method: Choose from("and", "or", "xor", "add")
"""
try:
layer1 = self.layers_dict[key1]
layer2 = self.layers_dict[key2]
except:
print("Invalid keys!")
return
if method == 'and':
self.img_processed = layer1 & layer2
elif method == 'or':
self.img_processed = layer1 | layer2
elif method == 'xor':
self.img_processed = layer1 ^ layer2
elif method == 'add':
self.img_processed = layer1 + layer2
elif method == 'mix':
self.img_processed = helper.weighted_img(layer1.canvas, layer2.canvas, parameter[0], parameter[1],
parameter[2])
self.layers_dict[key1].canvas = self.img_processed
else:
print("Doesn't support such method, sorry.")
return
class LaneFeatureCollector(FeatureCollector):
"""
This class adds some attributes for storing lane coefficients.
"""
def __init__(self, img, color_model='BGR', calibrators=(0, 0, 0, np.eye(3)), params_cache_size=10):
FeatureCollector.__init__(self, img, color_model, calibrators)
self.lane_params_list =
|
np.array([])
|
numpy.array
|
import got3
import arrow
from textblob import TextBlob
import numpy as np
from termcolor import colored
def dates_to_sentiment(dates, ticker, max_tweets):
ticker = ticker
print(colored("Calculating sentiment for:" + ticker, 'white'))
sentiments = []
positives = []
negatives = []
arrow_date = dates
tweetCriteria = got3.manager.TweetCriteria().setQuerySearch("{}{}".format("#", ticker)).setMaxTweets(max_tweets)
tweets = got3.manager.TweetManager.getTweets(tweetCriteria)
sents_per_date = []
subjectivity = []
for t in tweets:
blob = TextBlob(t.text)
sent = blob.sentiment[0] #get the polarity
subjectives = blob.sentiment[1] #get the subjectivity
sents_per_date.append(sent) #Saving polarity to sents_per_date
subjectivity.append(subjectives) #Saving subjectivity to subjectivity
if blob.sentiment[0] > 0: #Separating positive and negative tweets to lists
positives.append(t)
else:
negatives.append(t)
standard_dev_array = np.asarray(sents_per_date)
if len(sents_per_date) >= 1:
mean_polarity = sum(sents_per_date) / len(sents_per_date)
mean_subjectivity = sum(subjectivity) / len(sents_per_date)
percent_positive = len(positives) / len(sents_per_date)
standard_deviation_polarity = np.std(standard_dev_array)
else:
mean_polarity = 0
mean_subjectivity = 0
percent_positive = .5
standard_deviation_polarity = 0
#Mean Polarity
try:
sentiments.append(mean_polarity)
except:
sentiments.append(0)
#Mean Subjectivity
try:
sentiments.append(mean_subjectivity)
except:
sentiments.append(0)
#Percentage of Tweets that are positive
try:
sentiments.append(percent_positive)
except:
sentiments.append(0.5)
#Standard Deviation of tweet sentiment Polarity
try:
sentiments.append(standard_deviation_polarity)
except:
sentiments.append(0)
split_symbol = ticker.split('/')
ticker = split_symbol[1]
print(colored("Calculating sentiment for:" + ticker, 'red'))
positives = []
negatives = []
subjectivity = []
sents_per_date = []
tweetCriteria = got3.manager.TweetCriteria().setQuerySearch("{}{}".format("#", ticker)).setMaxTweets(max_tweets)
tweets = got3.manager.TweetManager.getTweets(tweetCriteria)
for t in tweets:
blob = TextBlob(t.text)
sent = blob.sentiment[0] #get the polarity
subjectives = blob.sentiment[1] #get the subjectivity
sents_per_date.append(sent) #Saving polarity to sents_per_date
subjectivity.append(subjectives) #Saving subjectivity to subjectivity
if blob.sentiment[0] > 0: #Separating positive and negative tweets to lists
positives.append(t)
else:
negatives.append(t)
standard_dev_array = np.asarray(sents_per_date)
if len(sents_per_date) >= 1:
mean_polarity_from = sum(sents_per_date) / len(sents_per_date)
mean_subjectivity_from = sum(subjectivity) / len(sents_per_date)
percent_positive_from = len(positives) / len(sents_per_date)
standard_deviation_polarity_from = np.std(standard_dev_array)
else:
mean_polarity_from = 0
mean_subjectivity_from = 0
percent_positive_from = .5
standard_deviation_polarity_from = 0
#Mean Polarity
try:
sentiments.append(mean_polarity_from)
except:
sentiments.append(0)
#Mean Subjectivity
try:
sentiments.append(mean_subjectivity_from)
except:
sentiments.append(0)
#Percentage of Tweets that are positive
try:
sentiments.append(percent_positive_from)
except:
sentiments.append(0.5)
#Standard Deviation of tweet sentiment Polarity
try:
sentiments.append(standard_deviation_polarity_from)
except:
sentiments.append(0)
ticker = split_symbol[0]
print(colored("Calculating sentiment for:" + ticker, 'green'))
positives = []
negatives = []
tweetCriteria = got3.manager.TweetCriteria().setQuerySearch("{}{}".format("#", ticker)).setMaxTweets(max_tweets)
tweets = got3.manager.TweetManager.getTweets(tweetCriteria)
sents_per_date = []
subjectivity = []
for t in tweets:
blob = TextBlob(t.text)
sent = blob.sentiment[0] #get the polarity
subjectives = blob.sentiment[1] #get the subjectivity
sents_per_date.append(sent) #Saving polarity to sents_per_date
subjectivity.append(subjectives) #Saving subjectivity to subjectivity
if blob.sentiment[0] > 0: #Separating positive and negative tweets to lists
positives.append(t)
else:
negatives.append(t)
standard_dev_array = np.asarray(sents_per_date)
if len(sents_per_date) >= 1:
mean_polarity_to = sum(sents_per_date) / len(sents_per_date)
mean_subjectivity_to = sum(subjectivity) / len(sents_per_date)
percent_positive_to = len(positives) / len(sents_per_date)
standard_deviation_polarity_to = np.std(standard_dev_array)
else:
mean_polarity_to = 0
mean_subjectivity_to = 0
percent_positive_to = .5
standard_deviation_polarity_to = 0
#Mean Polarity
try:
sentiments.append(mean_polarity_to)
except:
sentiments.append(0)
#Mean Subjectivity
try:
sentiments.append(mean_subjectivity_to)
except:
sentiments.append(0)
#Percentage of Tweets that are positive
try:
sentiments.append(percent_positive_to)
except:
sentiments.append(0.5)
#Standard Deviation of tweet sentiment Polarity
try:
sentiments.append(standard_deviation_polarity_to)
except:
sentiments.append(0)
sentiments =
|
np.asarray(sentiments)
|
numpy.asarray
|
# Import stuff
import pystan
import numpy as np
import sys
import scipy.interpolate as interpolate
import netCDF4
import tqdm
from mpi4py import MPI
import pickle
from utils.utils import *
# Results directory and run name
results_dir = 'results'
results_filename = 'BASIC_V1_2017'
# How many MCMC chains/samples to do
n_chains = 1
warmup = 1
iterations = 3
n_samples = n_chains*(iterations-warmup)
# MPI comm, number of processes and rank info
comm = MPI.COMM_WORLD
nprocs=comm.Get_size()
myrank=comm.Get_rank()
# Import the DLM model
model_kalman_ar1 = pickle.load(open('models/dlm_vanilla_ar1.pkl', 'rb'))
# Import the data
# Import data from a netCDF
data = netCDF4.Dataset('data/BASIC_V1_2017_lotus_seascyc_gcsw2017_fac2.nc')
# Extract time, pressure and latitude variables from the netCDF
T = data['time'][:]
P = data['pressure'][:]
L = data['latitude'][:]
# How many time steps are there? (ie how long is the time-series)
N = len(T)
# Import the regressors and project them onto the time grid corresponding to the imported data
# ENSO
regressor_data = np.loadtxt('regressors/ENSO_MEI_1950_201802.txt')
Y = interpolate.InterpolatedUnivariateSpline(regressor_data[:,0], regressor_data[:,1])
enso = Y(T)
# SOLAR
regressor_data = np.loadtxt('regressors/Flux_F30_monthly_195111_201803_absolute.txt')
Y = interpolate.InterpolatedUnivariateSpline(regressor_data[:,0], regressor_data[:,1])
solar = Y(T)
# QBO30
regressor_data = np.loadtxt('regressors/multi_qbo30_1953_2018.txt')
Y = interpolate.InterpolatedUnivariateSpline(regressor_data[:,0], regressor_data[:,1])
qbo30 = Y(T)
# QBO50
regressor_data =
|
np.loadtxt('regressors/multi_qbo50_1953_2018.txt')
|
numpy.loadtxt
|
# Third-party
from astropy.utils.misc import isiterable
import numpy as np
# Gala
from ....dynamics import Orbit
__all__ = ['static_to_constantrotating', 'constantrotating_to_static']
def rodrigues_axis_angle_rotate(x, vec, theta):
"""
Rotated the input vector or set of vectors `x` around the axis
`vec` by the angle `theta`.
Parameters
----------
x : array_like
The vector or array of vectors to transform. Must have shape
"""
x = np.array(x).T
vec = np.array(vec).T
theta = np.array(theta).T[...,None]
out = np.cos(theta)*x + np.sin(theta)*
|
np.cross(vec, x)
|
numpy.cross
|
from __future__ import absolute_import
from __future__ import print_function
import os
import argparse
import warnings
import numpy as np
from sklearn.neighbors import KernelDensity
from keras.models import load_model
from util import (get_data, get_noisy_samples, get_mc_predictions,
get_deep_representations, score_samples, normalize,
get_lids_random_batch, get_kmeans_random_batch)
# In the original paper, the author used optimal KDE bandwidths dataset-wise
# that were determined from CV tuning
BANDWIDTHS = {'mnist': 3.7926, 'cifar': 0.26, 'svhn': 1.00}
# Here we further tune bandwidth for each of the 10 classes in mnist, cifar and svhn
# Run tune_kernal_density.py to get the following settings.
# BANDWIDTHS = {'mnist': [0.2637, 0.1274, 0.2637, 0.2637, 0.2637, 0.2637, 0.2637, 0.2069, 0.3360, 0.2637],
# 'cifar': [0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000],
# 'svhn': [0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1274, 0.1000, 0.1000]}
PATH_DATA = "data/"
PATH_IMAGES = "plots/"
def merge_and_generate_labels(X_pos, X_neg):
"""
merge positve and nagative artifact and generate labels
:param X_pos: positive samples
:param X_neg: negative samples
:return: X: merged samples, 2D ndarray
y: generated labels (0/1): 2D ndarray same size as X
"""
X_pos = np.asarray(X_pos, dtype=np.float32)
print("X_pos: ", X_pos.shape)
X_pos = X_pos.reshape((X_pos.shape[0], -1))
X_neg = np.asarray(X_neg, dtype=np.float32)
print("X_neg: ", X_neg.shape)
X_neg = X_neg.reshape((X_neg.shape[0], -1))
X = np.concatenate((X_pos, X_neg))
y = np.concatenate((np.ones(X_pos.shape[0]), np.zeros(X_neg.shape[0])))
y = y.reshape((X.shape[0], 1))
return X, y
def get_kd(model, X_train, Y_train, X_test, X_test_noisy, X_test_adv):
"""
Get kernel density scores
:param model:
:param X_train:
:param Y_train:
:param X_test:
:param X_test_noisy:
:param X_test_adv:
:return: artifacts: positive and negative examples with kd values,
labels: adversarial (label: 1) and normal/noisy (label: 0) examples
"""
# Get deep feature representations
print('Getting deep feature representations...')
X_train_features = get_deep_representations(model, X_train,
batch_size=args.batch_size)
X_test_normal_features = get_deep_representations(model, X_test,
batch_size=args.batch_size)
X_test_noisy_features = get_deep_representations(model, X_test_noisy,
batch_size=args.batch_size)
X_test_adv_features = get_deep_representations(model, X_test_adv,
batch_size=args.batch_size)
# Train one KDE per class
print('Training KDEs...')
class_inds = {}
for i in range(Y_train.shape[1]):
class_inds[i] = np.where(Y_train.argmax(axis=1) == i)[0]
kdes = {}
warnings.warn("Using pre-set kernel bandwidths that were determined "
"optimal for the specific CNN models of the paper. If you've "
"changed your model, you'll need to re-optimize the "
"bandwidth.")
print('bandwidth %.4f for %s' % (BANDWIDTHS[args.dataset], args.dataset))
for i in range(Y_train.shape[1]):
kdes[i] = KernelDensity(kernel='gaussian',
bandwidth=BANDWIDTHS[args.dataset]) \
.fit(X_train_features[class_inds[i]])
# Get model predictions
print('Computing model predictions...')
preds_test_normal = model.predict_classes(X_test, verbose=0,
batch_size=args.batch_size)
preds_test_noisy = model.predict_classes(X_test_noisy, verbose=0,
batch_size=args.batch_size)
preds_test_adv = model.predict_classes(X_test_adv, verbose=0,
batch_size=args.batch_size)
# Get density estimates
print('computing densities...')
densities_normal = score_samples(
kdes,
X_test_normal_features,
preds_test_normal
)
densities_noisy = score_samples(
kdes,
X_test_noisy_features,
preds_test_noisy
)
densities_adv = score_samples(
kdes,
X_test_adv_features,
preds_test_adv
)
print("densities_normal:", densities_normal.shape)
print("densities_adv:", densities_adv.shape)
print("densities_noisy:", densities_noisy.shape)
## skip the normalization, you may want to try different normalizations later
## so at this step, just save the raw values
# densities_normal_z, densities_adv_z, densities_noisy_z = normalize(
# densities_normal,
# densities_adv,
# densities_noisy
# )
densities_pos = densities_adv
densities_neg = np.concatenate((densities_normal, densities_noisy))
artifacts, labels = merge_and_generate_labels(densities_pos, densities_neg)
return artifacts, labels
def get_bu(model, X_test, X_test_noisy, X_test_adv):
"""
Get Bayesian uncertainty scores
:param model:
:param X_train:
:param Y_train:
:param X_test:
:param X_test_noisy:
:param X_test_adv:
:return: artifacts: positive and negative examples with bu values,
labels: adversarial (label: 1) and normal/noisy (label: 0) examples
"""
print('Getting Monte Carlo dropout variance predictions...')
uncerts_normal = get_mc_predictions(model, X_test,
batch_size=args.batch_size) \
.var(axis=0).mean(axis=1)
uncerts_noisy = get_mc_predictions(model, X_test_noisy,
batch_size=args.batch_size) \
.var(axis=0).mean(axis=1)
uncerts_adv = get_mc_predictions(model, X_test_adv,
batch_size=args.batch_size) \
.var(axis=0).mean(axis=1)
print("uncerts_normal:", uncerts_normal.shape)
print("uncerts_noisy:", uncerts_noisy.shape)
print("uncerts_adv:", uncerts_adv.shape)
## skip the normalization, you may want to try different normalizations later
## so at this step, just save the raw values
# uncerts_normal_z, uncerts_adv_z, uncerts_noisy_z = normalize(
# uncerts_normal,
# uncerts_adv,
# uncerts_noisy
# )
uncerts_pos = uncerts_adv
uncerts_neg = np.concatenate((uncerts_normal, uncerts_noisy))
artifacts, labels = merge_and_generate_labels(uncerts_pos, uncerts_neg)
return artifacts, labels
def get_lid(model, X_test, X_test_noisy, X_test_adv, k=10, batch_size=100, dataset='mnist'):
"""
Get local intrinsic dimensionality
:param model:
:param X_train:
:param Y_train:
:param X_test:
:param X_test_noisy:
:param X_test_adv:
:return: artifacts: positive and negative examples with lid values,
labels: adversarial (label: 1) and normal/noisy (label: 0) examples
"""
print('Extract local intrinsic dimensionality: k = %s' % k)
lids_normal, lids_noisy, lids_adv = get_lids_random_batch(model, X_test, X_test_noisy,
X_test_adv, dataset, k, batch_size)
print("lids_normal:", lids_normal.shape)
print("lids_noisy:", lids_noisy.shape)
print("lids_adv:", lids_adv.shape)
## skip the normalization, you may want to try different normalizations later
## so at this step, just save the raw values
# lids_normal_z, lids_adv_z, lids_noisy_z = normalize(
# lids_normal,
# lids_adv,
# lids_noisy
# )
lids_pos = lids_adv
lids_neg = np.concatenate((lids_normal, lids_noisy))
artifacts, labels = merge_and_generate_labels(lids_pos, lids_neg)
return artifacts, labels
def get_kmeans(model, X_test, X_test_noisy, X_test_adv, k=10, batch_size=100, dataset='mnist'):
"""
Calculate the average distance to k nearest neighbours as a feature.
This is used to compare density vs LID. Why density doesn't work?
:param model:
:param X_train:
:param Y_train:
:param X_test:
:param X_test_noisy:
:param X_test_adv:
:return: artifacts: positive and negative examples with lid values,
labels: adversarial (label: 1) and normal/noisy (label: 0) examples
"""
print('Extract k means feature: k = %s' % k)
kms_normal, kms_noisy, kms_adv = get_kmeans_random_batch(model, X_test, X_test_noisy,
X_test_adv, dataset, k, batch_size,
pca=True)
print("kms_normal:", kms_normal.shape)
print("kms_noisy:", kms_noisy.shape)
print("kms_adv:", kms_adv.shape)
## skip the normalization, you may want to try different normalizations later
## so at this step, just save the raw values
# kms_normal_z, kms_noisy_z, kms_adv_z = normalize(
# kms_normal,
# kms_noisy,
# kms_adv
# )
kms_pos = kms_adv
kms_neg = np.concatenate((kms_normal, kms_noisy))
artifacts, labels = merge_and_generate_labels(kms_pos, kms_neg)
return artifacts, labels
def main(args):
assert args.dataset in ['mnist', 'cifar', 'svhn'], \
"Dataset parameter must be either 'mnist', 'cifar' or 'svhn'"
assert args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2', 'all'], \
"Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', " \
"'jsma' or 'cw-l2'"
assert args.characteristic in ['kd', 'bu', 'lid', 'km', 'all'], \
"Characteristic(s) to use 'kd', 'bu', 'lid', 'km', 'all'"
model_file = os.path.join(PATH_DATA, "model_%s.h5" % args.dataset)
assert os.path.isfile(model_file), \
'model file not found... must first train model using train_model.py.'
adv_file = os.path.join(PATH_DATA, "Adv_%s_%s.npy" % (args.dataset, args.attack))
assert os.path.isfile(adv_file), \
'adversarial sample file not found... must first craft adversarial ' \
'samples using craft_adv_samples.py'
print('Loading the data and model...')
# Load the model
model = load_model(model_file)
# Load the dataset
X_train, Y_train, X_test, Y_test = get_data(args.dataset)
# Check attack type, select adversarial and noisy samples accordingly
print('Loading noisy and adversarial samples...')
if args.attack == 'all':
# TODO: implement 'all' option
# X_test_adv = ...
# X_test_noisy = ...
raise NotImplementedError("'All' types detector not yet implemented.")
else:
# Load adversarial samples
X_test_adv = np.load(adv_file)
print("X_test_adv: ", X_test_adv.shape)
# as there are some parameters to tune for noisy example, so put the generation
# step here instead of the adversarial step which can take many hours
noisy_file = os.path.join(PATH_DATA, 'Noisy_%s_%s.npy' % (args.dataset, args.attack))
if os.path.isfile(noisy_file):
X_test_noisy = np.load(noisy_file)
else:
# Craft an equal number of noisy samples
print('Crafting %s noisy samples. ' % args.dataset)
X_test_noisy = get_noisy_samples(X_test, X_test_adv, args.dataset, args.attack)
np.save(noisy_file, X_test_noisy)
# Check model accuracies on each sample type
for s_type, dataset in zip(['normal', 'noisy', 'adversarial'],
[X_test, X_test_noisy, X_test_adv]):
_, acc = model.evaluate(dataset, Y_test, batch_size=args.batch_size,
verbose=0)
print("Model accuracy on the %s test set: %0.2f%%" %
(s_type, 100 * acc))
# Compute and display average perturbation sizes
if not s_type == 'normal':
l2_diff = np.linalg.norm(
dataset.reshape((len(X_test), -1)) -
X_test.reshape((len(X_test), -1)),
axis=1
).mean()
print("Average L-2 perturbation size of the %s test set: %0.2f" %
(s_type, l2_diff))
# Refine the normal, noisy and adversarial sets to only include samples for
# which the original version was correctly classified by the model
preds_test = model.predict_classes(X_test, verbose=0,
batch_size=args.batch_size)
inds_correct = np.where(preds_test == Y_test.argmax(axis=1))[0]
print("Number of correctly predict images: %s" % (len(inds_correct)))
X_test = X_test[inds_correct]
X_test_noisy = X_test_noisy[inds_correct]
X_test_adv = X_test_adv[inds_correct]
print("X_test: ", X_test.shape)
print("X_test_noisy: ", X_test_noisy.shape)
print("X_test_adv: ", X_test_adv.shape)
if args.characteristic == 'kd':
# extract kernel density
characteristics, labels = get_kd(model, X_train, Y_train, X_test, X_test_noisy, X_test_adv)
print("KD: [characteristic shape: ", characteristics.shape, ", label shape: ", labels.shape)
# save to file
bandwidth = BANDWIDTHS[args.dataset]
file_name = os.path.join(PATH_DATA, 'kd_%s_%s_%.4f.npy' % (args.dataset, args.attack, bandwidth))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
elif args.characteristic == 'bu':
# extract Bayesian uncertainty
characteristics, labels = get_bu(model, X_test, X_test_noisy, X_test_adv)
print("BU: [characteristic shape: ", characteristics.shape, ", label shape: ", labels.shape)
# save to file
file_name = os.path.join(PATH_DATA, 'bu_%s_%s.npy' % (args.dataset, args.attack))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
elif args.characteristic == 'lid':
# extract local intrinsic dimensionality
characteristics, labels = get_lid(model, X_test, X_test_noisy, X_test_adv,
args.k_nearest, args.batch_size, args.dataset)
print("LID: [characteristic shape: ", characteristics.shape, ", label shape: ", labels.shape)
# save to file
file_name = os.path.join(PATH_DATA, 'lid_%s_%s.npy' % (args.dataset, args.attack))
# file_name = os.path.join('../data_grid_search/lid_large_batch/', 'lid_%s_%s_%s.npy' %
# (args.dataset, args.attack, args.k_nearest))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
elif args.characteristic == 'km':
# extract k means distance
characteristics, labels = get_kmeans(model, X_test, X_test_noisy, X_test_adv,
args.k_nearest, args.batch_size, args.dataset)
print("K-Mean: [characteristic shape: ", characteristics.shape, ", label shape: ", labels.shape)
# save to file
file_name = os.path.join(PATH_DATA, 'km_pca_%s_%s.npy' % (args.dataset, args.attack))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
elif args.characteristic == 'all':
# extract kernel density
characteristics, labels = get_kd(model, X_train, Y_train, X_test, X_test_noisy, X_test_adv)
file_name = os.path.join(PATH_DATA, 'kd_%s_%s.npy' % (args.dataset, args.attack))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
# extract Bayesian uncertainty
characteristics, labels = get_bu(model, X_test, X_test_noisy, X_test_adv)
file_name = os.path.join(PATH_DATA, 'bu_%s_%s.npy' % (args.dataset, args.attack))
data = np.concatenate((characteristics, labels), axis=1)
|
np.save(file_name, data)
|
numpy.save
|
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import pyworld
import soundfile as sf
# Charge les fichiers wav dans une liste
# wav_dir : Répertoire
# sr : Taux d'échantillonnage
def load_wavs(wav_dir, sr):
wavs = list()
filenames = list()
for file in os.listdir(wav_dir):
file_path = os.path.join(wav_dir, file)
wav, _ = librosa.load(file_path, sr=sr, mono=True)
wavs.append(wav)
filenames.append(file)
return wavs, filenames
# Extrait la f0, l'enveloppe spectrale et l'apériodicité
# d'un fichier audio à l'aide de WORLD vocodeur
# wav : Fichier audio
# fs : Taux d'échantillonnage
# frame_period : Durée d'une trame
def world_decompose(wav, fs, frame_period=5.0):
wav = wav.astype(np.float64)
# Extraction de la f0 et de la position temporelle de chaque trame
f0, timeaxis = pyworld.harvest(
wav, fs, frame_period=frame_period, f0_floor=71.0, f0_ceil=800.0)
# Extraction de l'enveloppe spectrale
sp = pyworld.cheaptrick(wav, f0, timeaxis, fs)
# Extraction de l'aperiodicité
ap = pyworld.d4c(wav, f0, timeaxis, fs)
return f0, timeaxis, sp, ap
# Génère la représentation Mel-cepstral coefficients (MCEP)
# sp : Enveloppe spectrale
# fs : Taux d'échantillonnage
# dim : Nombre de dimensions MCEP souhaitées
def world_encode_spectral_envelop(sp, fs, dim=34):
# Get Mel-Cepstral coefficients (MCEP)
coded_sp = pyworld.code_spectral_envelope(sp, fs, dim)
return coded_sp
def world_encode_data(wave, fs, frame_period=5.0, coded_dim=34):
f0s = list()
timeaxes = list()
sps = list()
aps = list()
coded_sps = list()
for wav in wave:
f0, timeaxis, sp, ap = world_decompose(wav=wav,
fs=fs,
frame_period=frame_period)
coded_sp = world_encode_spectral_envelop(sp=sp, fs=fs, dim=coded_dim)
f0s.append(f0)
timeaxes.append(timeaxis)
sps.append(sp)
aps.append(ap)
coded_sps.append(coded_sp)
return f0s, timeaxes, sps, aps, coded_sps
# Calcule la moyenne et l'écart type de la
# fréquence fondamentale
def logf0_statistics(f0s):
log_f0s_concatenated = np.ma.log(np.concatenate(f0s))
log_f0s_mean = log_f0s_concatenated.mean()
log_f0s_std = log_f0s_concatenated.std()
return log_f0s_mean, log_f0s_std
# Transpose dans une liste
def transpose_in_list(lst):
transposed_lst = list()
for array in lst:
transposed_lst.append(array.T)
return transposed_lst
# Normalise la représentation Mel-cepstral coefficients (MCEP)
def coded_sps_normalization_fit_transform(coded_sps):
coded_sps_concatenated =
|
np.concatenate(coded_sps, axis=1)
|
numpy.concatenate
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import os, sys, time, copy
import yaml, h5py, shutil
from os import path
from pyDOE import lhs
from pyTrajectoryUtils.pyTrajectoryUtils.utils import *
from .trajSampler import TrajSampler
def meta_low_fidelity(poly, alpha_set, t_set_sta, points, debug=True, multicore=False, lb=0.6, ub=1.4):
t_dim = t_set_sta.shape[0]
lb_i = np.ones(t_dim)*lb
ub_i = np.ones(t_dim)*ub
label = np.zeros(alpha_set.shape[0])
if multicore:
data_list = []
for it in range(alpha_set.shape[0]):
alpha_tmp = lb_i + np.multiply(alpha_set[it,:],ub_i-lb_i)
data_list.append((points, t_set_sta, alpha_tmp))
results = parmap(poly.wrapper_sanity_check, data_list)
else:
results = []
for it in range(alpha_set.shape[0]):
alpha_tmp = lb_i + np.multiply(alpha_set[it,:],ub_i-lb_i)
results.append(poly.wrapper_sanity_check((points, t_set_sta, alpha_tmp)))
for it in range(alpha_set.shape[0]):
if results[it]:
label[it] = 1
if debug:
print("Succeeded")
else:
if debug:
print("Failed")
return label
def meta_high_fidelity(poly, alpha_set, t_set_sim, points, lb=0.6, ub=1.4, return_snap=False):
t_dim = t_set_sim.shape[0]
lb_i = np.ones(t_dim)*lb
ub_i = np.ones(t_dim)*ub
label = np.zeros(alpha_set.shape[0])
if return_snap:
snap_array = np.ones(alpha_set.shape[0])
for it in range(alpha_set.shape[0]):
alpha_tmp = lb_i + np.multiply(alpha_set[it,:],ub_i-lb_i)
if return_snap:
_, _, _, res_snap = poly.update_traj_(points, t_set_sim, alpha_tmp, flag_return_snap=True)
snap_array[it] = res_snap
continue
t_set_tmp, d_ordered_tmp, d_ordered_yaw_tmp = poly.update_traj_(points, t_set_sim, alpha_tmp)
if poly.run_sim_loop(t_set_tmp, d_ordered_tmp, d_ordered_yaw_tmp):
label[it] = 1
if return_snap:
return snap_array
else:
return label
def check_dataset_init(name, t_dim, N_L=200, N_H=20, lb=0.6, ub=1.4, sampling_mode=1, dataset_dir="./mfbo_data", flag_robot=False):
path_dataset_low = "{}/{}/low_fidelity_data_sta_{}_{}_smode{}.yaml" \
.format(dataset_dir,str(name),np.int(10*lb),np.int(10*ub),sampling_mode)
if path.exists(path_dataset_low):
X_L = []
Y_L = []
with open(path_dataset_low, "r") as input_stream:
yaml_data_in = yaml.load(input_stream)
alpha_sim = yaml_data_in["alpha_sim"]
if flag_robot:
alpha_robot = yaml_data_in["alpha_robot"]
X_L_t = yaml_data_in["X_L"]
Y_L_t = yaml_data_in["Y_L"]
if len(Y_L_t) >= N_L:
flag_generate_dataset = False
X_L += X_L_t[:np.int(N_L/2)]
Y_L += Y_L_t[:np.int(N_L/2)]
X_L += X_L_t[np.int(len(Y_L_t)/2):np.int(len(Y_L_t)/2)+np.int(N_L/2)]
Y_L += Y_L_t[np.int(len(Y_L_t)/2):np.int(len(Y_L_t)/2)+np.int(N_L/2)]
X_L = np.array(X_L)
Y_L = np.array(Y_L)
X_H = []
Y_H = []
H_init_step = 1./N_H
for i in range(np.int(N_H/2)):
val = np.ones(t_dim)*(0.45-i*H_init_step)
X_H.append(val)
Y_H.append(0.)
for i in range(np.int(N_H/2)):
val = np.ones(t_dim)*(0.5+i*H_init_step)
X_H.append(val)
Y_H.append(1.)
X_H = np.array(X_H)
Y_H = np.array(Y_H)
if flag_robot:
return True, (alpha_sim, alpha_robot, X_L, Y_L, X_H, Y_H)
else:
return True, (alpha_sim, X_L, Y_L, X_H, Y_H)
return False, None
def get_dataset_init(name, \
alpha_sim, \
low_fidelity, \
high_fidelity, \
t_dim, \
N_L=200, N_H=20, \
plot=False, \
t_set_sim=None, \
lb=0.6, ub=1.4, sampling_mode=1, \
batch_size=100, dataset_dir="./mfbo_data", alpha_robot=None):
path_dataset_low = "{}/{}/low_fidelity_data_sta_{}_{}_smode{}.yaml" \
.format(dataset_dir,str(name),np.int(10*lb),np.int(10*ub),sampling_mode)
X_L_0 = np.empty((0,t_dim))
X_L_1 = np.empty((0,t_dim))
X_L = np.empty((0,t_dim))
Y_L = np.empty(0)
if sampling_mode == 0:
sample_data = lambda N_sample: lhs(t_dim, N_sample)
elif sampling_mode == 1:
traj_sampler = TrajSampler(N=t_dim, sigma=0.2, flag_load=False, cov_mode=1, flag_pytorch=False)
sample_data = lambda N_sample: traj_sampler.rsample(N_sample=N_sample)
elif sampling_mode == 2:
traj_sampler = TrajSampler(N=t_dim, sigma=0.2, flag_load=False, cov_mode=1, flag_pytorch=False)
sample_data = lambda N_sample: np.concatenate((lhs(t_dim, N_sample),traj_sampler.rsample(N_sample=N_sample)),axis=0)
elif sampling_mode == 3:
traj_sampler = TrajSampler(N=t_dim, sigma=0.5, flag_load=False, cov_mode=1, flag_pytorch=False)
sample_data = lambda N_sample: traj_sampler.rsample(N_sample=N_sample)
elif sampling_mode == 4:
traj_sampler = TrajSampler(N=t_dim, sigma=1.0, flag_load=False, cov_mode=1, flag_pytorch=False)
sample_data = lambda N_sample: traj_sampler.rsample(N_sample=N_sample)
elif sampling_mode == 5:
traj_sampler = TrajSampler(N=t_dim, sigma=20.0, flag_load=False, cov_mode=1, flag_pytorch=False)
sample_data = lambda N_sample: traj_sampler.rsample(N_sample=N_sample)
elif sampling_mode == 6:
traj_sampler = TrajSampler(N=t_dim, sigma=0.05, flag_load=False, cov_mode=1, flag_pytorch=False)
sample_data = lambda N_sample: traj_sampler.rsample(N_sample=N_sample)
else:
raise("Not Implemented")
while True:
X_L_t = sample_data(batch_size)
labels_low = low_fidelity(X_L_t, debug=False)
Y_L_t = 1.0*labels_low
if np.where(Y_L_t == 0)[0].shape[0] > 0:
X_L_0 = np.concatenate((X_L_0, X_L_t[np.where(Y_L_t == 0)]))
if np.where(Y_L_t > 0)[0].shape[0] > 0:
X_L_1 = np.concatenate((X_L_1, X_L_t[np.where(Y_L_t > 0)]))
print("N_L_0: {}, N_L_1: {}".format(X_L_0.shape[0],X_L_1.shape[0]))
if X_L_0.shape[0] >= N_L/2 and X_L_1.shape[0] >= N_L/2:
X_L = np.concatenate((X_L_0[:np.int(N_L/2),:],X_L_1[:np.int(N_L/2),:]))
Y_L = np.zeros(N_L)
Y_L[np.int(N_L/2):] = 1
break
directory = os.path.dirname(path_dataset_low)
if not os.path.exists(directory):
os.makedirs(directory)
yaml_data = {"X_L":X_L, "Y_L":Y_L}
yamlFile = path_dataset_low
yaml_out = open(yamlFile,"w")
yaml_out.write("alpha_sim: {}\n\n".format(alpha_sim))
if
|
np.all(alpha_robot != None)
|
numpy.all
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import copy
matplotlib.rcParams["font.size"] = 13
def phase(z):
val = np.angle(z)
# val = np.rad2deg(np.unwrap(np.angle((z))))
return val
class DataView(object):
"""
Provides viewingtions for Data
This can be inherited by XXX
"""
def set_xyz(self, x, y, z, normal="Z", geometry="grid"):
self.normal = normal
self.geometry = geometry
if geometry.upper() == "GRID":
if normal.upper() == "X":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = 1, y.size, z.size
self.Y, self.Z = np.meshgrid(y, z)
self.xyz = np.c_[
x * np.ones(self.ncy * self.ncz), self.Y.flatten(), self.Z.flatten()
]
elif normal.upper() == "Y":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = x.size, 1, z.size
self.X, self.Z = np.meshgrid(x, z)
self.xyz = np.c_[
self.X.flatten(), y * np.ones(self.ncx * self.ncz), self.Z.flatten()
]
elif normal.upper() == "Z":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = x.size, y.size, 1
self.X, self.Y = np.meshgrid(x, y)
self.xyz = np.c_[
self.X.flatten(), self.Y.flatten(), z * np.ones(self.ncx * self.ncy)
]
elif geometry.upper() == "PROFILE":
if normal.upper() == "X":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = 1, y.size, 1
self.Y, self.Z = self.y, self.z
self.xyz = np.c_[x * np.ones_like(self.y), self.Y, self.Z]
elif normal.upper() == "Y":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = x.size, 1, 1
self.Y, self.Z = self.y, self.z
self.xyz = np.c_[self.x, y * np.ones_like(self.x), self.Z]
elif normal.upper() == "Z":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = x.size, 1, 1
self.Y, self.Z = self.y, self.z
self.xyz = np.c_[self.x, self.y, z * np.ones_like(self.x)]
def eval_loc(self, srcLoc, obsLoc, log_sigvec, log_fvec, orientation, normal, func):
self.srcLoc = srcLoc
self.obsLoc = obsLoc
self.log_sigvec = log_sigvec
self.log_fvec = log_fvec
self.sigvec = 10.0 ** log_sigvec
self.fvec = 10.0 ** log_fvec
self.orientation = orientation
self.normal = normal
self.func1D = func
self.val_xfs = np.zeros((len(log_sigvec), len(log_fvec)), dtype=complex)
self.val_yfs = np.zeros((len(log_sigvec), len(log_fvec)), dtype=complex)
self.val_zfs = np.zeros((len(log_sigvec), len(log_fvec)), dtype=complex)
for n in range(len(log_sigvec)):
self.val_xfs[n], self.val_yfs[n], self.val_zfs[n] = func(
self.obsLoc,
srcLoc,
10.0 ** log_sigvec[n],
10.0 ** log_fvec,
orientation=self.orientation,
)
def eval(self, xyz, srcLoc, sig, f, orientation, func, normal="Z", t=0.0):
val_x, val_y, val_z = func(xyz, srcLoc, sig, f, orientation=orientation, t=t)
return val_x, val_y, val_z
def eval_TD(self, xyz, srcLoc, sig, t, orientation, func, normal="Z"):
val_x, val_y, val_z = func(xyz, srcLoc, sig, t, orientation=orientation)
return val_x, val_y, val_z
def eval_2D(self, srcLoc, sig, f, orientation, func, t=0.0):
self.func2D = func
self.srcLoc = srcLoc
self.sig = sig
self.t = f
self.orientation = orientation
self.val_x, self.val_y, self.val_z = func(
self.xyz, srcLoc, sig, f, orientation=orientation, t=t
)
if self.normal.upper() == "X":
def Freshape(v):
return v.reshape(self.ncy, self.ncz)
elif self.normal.upper() == "Y":
def Freshape(v):
return v.reshape(self.ncx, self.ncz)
elif self.normal == "Z":
def Freshape(v):
return v.reshape(self.ncx, self.ncy)
self.VAL_X = Freshape(self.val_x)
self.VAL_Y = Freshape(self.val_y)
self.VAL_Z = Freshape(self.val_z)
self.VEC_R_amp = np.sqrt(
self.VAL_X.real ** 2 + self.VAL_Y.real ** 2 + self.VAL_Z.real ** 2
)
self.VEC_I_amp = np.sqrt(
self.VAL_X.imag ** 2 + self.VAL_Y.imag ** 2 + self.VAL_Z.imag ** 2
)
self.VEC_A_amp = np.sqrt(
np.abs(self.VAL_X) ** 2 + np.abs(self.VAL_Y) ** 2 + np.abs(self.VAL_Z) ** 2
)
self.VEC_P_amp = np.sqrt(
phase(self.VAL_X) ** 2 + phase(self.VAL_Y) ** 2 + phase(self.VAL_Z) ** 2
)
def eval_2D_TD(self, srcLoc, sig, t, orientation, func):
self.func2D = func
self.srcLoc = srcLoc
self.sig = sig
self.t = t
self.orientation = orientation
self.val_x, self.val_y, self.val_z = func(
self.xyz, srcLoc, sig, t, orientation=orientation
)
if self.normal.upper() == "X":
def Freshape(v):
return v.reshape(self.ncy, self.ncz)
elif self.normal.upper() == "Y":
def Freshape(v):
return v.reshape(self.ncx, self.ncz)
elif self.normal.upper() == "Z":
def Freshape(v):
return v.reshape(self.ncx, self.ncy)
self.VAL_X = Freshape(self.val_x)
self.VAL_Y = Freshape(self.val_y)
self.VAL_Z = Freshape(self.val_z)
self.VEC_amp = np.sqrt(
self.VAL_X.real ** 2 + self.VAL_Y.real ** 2 + self.VAL_Z.real ** 2
)
def plot2D_FD(
self,
component="real",
view="vec",
ncontour=20,
logamp=True,
clim=None,
showcontour=False,
levels=None,
ax=None,
colorbar=True,
cmap="viridis",
):
"""
2D visualization of dipole fields
"""
if ax is None:
plt.figure(figsize=(6.5, 5))
ax = plt.subplot(111)
if component == "real":
VAL_X = self.VAL_X.real
VAL_Y = self.VAL_Y.real
VAL_Z = self.VAL_Z.real
VEC_amp = self.VEC_R_amp
elif component == "imag":
VAL_X = self.VAL_X.imag
VAL_Y = self.VAL_Y.imag
VAL_Z = self.VAL_Z.imag
VEC_amp = self.VEC_I_amp
elif component == "amplitude":
VAL_X = abs(self.VAL_X)
VAL_Y = abs(self.VAL_Y)
VAL_Z = abs(self.VAL_Z)
VEC_amp = self.VEC_A_amp
elif component == "phase":
VAL_X = phase(self.VAL_X)
VAL_Y = phase(self.VAL_Y)
VAL_Z = phase(self.VAL_Z)
VEC_amp = self.VEC_P_amp
else:
raise Exception("component should be in real, imag, amplitude, or phase!")
if view == "amp" or view == "vec":
val = VEC_amp
elif view.upper() == "X":
val = VAL_X
elif view.upper() == "Y":
val = VAL_Y
elif view.upper() == "Z":
val = VAL_Z
if logamp is True:
zeroind = val == 0
val = np.log10(abs(val))
val[zeroind] = val[~zeroind].min()
if self.normal.upper() == "X":
a, b = self.y, self.z
vec_a, vec_b = self.VAL_Y, self.VAL_Z
xlabel = "Y (m)"
ylabel = "Z (m)"
elif self.normal.upper() == "Y":
a, b = self.x, self.z
vec_a, vec_b = self.VAL_X, self.VAL_Z
xlabel = "X (m)"
ylabel = "Z (m)"
elif self.normal.upper() == "Z":
a, b = self.x, self.y
vec_a, vec_b = self.VAL_X, self.VAL_Y
xlabel = "X (m)"
ylabel = "Y (m)"
if clim is None:
vmin, vmax = val.min(), val.max()
else:
vmin, vmax = clim[0], clim[1]
dat = ax.contourf(
a, b, val, ncontour, clim=(vmin, vmax), vmin=vmin, vmax=vmax, cmap=cmap
)
if showcontour:
ax.contour(a, b, val, levels, colors="k", linestyles="-")
if colorbar:
if logamp is True:
plt.colorbar(
dat, ax=ax, format="$10^{%.1f}$", ticks=np.linspace(vmin, vmax, 3)
)
else:
plt.colorbar(
dat, ax=ax, format="%.1e", ticks=np.linspace(vmin, vmax, 3)
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if view == "vec":
# nx = self.x.size
# nskip = int(nx / 15)
if component == "real":
# ax.quiver(a[::nskip], b[::nskip], (vec_a.real/VEC_amp)[::nskip,::nskip], (vec_b.real/VEC_amp)[::nskip,::nskip], color="w", linewidth=0.5)
ax.streamplot(a, b, vec_a.real, vec_b.real, color="w", linewidth=0.5)
elif component == "imag":
# ax.quiver(a, b, vec_a.imag/VEC_amp, vec_b.imag/VEC_amp, color="w", linewidth=0.5)
ax.streamplot(a, b, vec_a.imag, vec_b.imag, color="w", linewidth=0.5)
if component == "amplitude":
# ax.quiver(a, b, abs(vec_a)/VEC_amp, abs(vec_b)/VEC_amp, color="w", linewidth=0.5)
ax.streamplot(a, b, abs(vec_a), abs(vec_b), color="w", linewidth=0.5)
elif component == "phase":
# ax.quiver(a, b, phase(vec_a)/VEC_amp, phase(vec_b)/VEC_amp, color="w", linewidth=0.5)
ax.streamplot(
a, b, phase(vec_a), phase(vec_b), color="w", linewidth=0.5
)
return ax, dat
def plot2D_TD(
self,
view="vec",
ncontour=20,
logamp=True,
clim=None,
showcontour=False,
levels=None,
ax=None,
colorbar=True,
cmap="viridis",
):
"""
2D visualization of dipole fields
"""
if ax is None:
plt.figure(figsize=(6.5, 5))
ax = plt.subplot(111)
if view == "amp" or view == "vec":
val = self.VEC_amp
elif view.upper() == "X":
val = self.VAL_X
elif view.upper() == "Y":
val = self.VAL_Y
elif view.upper() == "Z":
val = self.VAL_Z
if logamp is True:
zeroind = val == 0
val = np.log10(abs(val))
val[zeroind] = val[~zeroind].min()
if self.normal.upper() == "X":
a, b = self.y, self.z
vec_a, vec_b = self.VAL_Y, self.VAL_Z
xlabel = "Y (m)"
ylabel = "Z (m)"
elif self.normal.upper() == "Y":
a, b = self.x, self.z
vec_a, vec_b = self.VAL_X, self.VAL_Z
xlabel = "X (m)"
ylabel = "Z (m)"
elif self.normal.upper() == "Z":
a, b = self.x, self.y
vec_a, vec_b = self.VAL_X, self.VAL_Y
xlabel = "X (m)"
ylabel = "Y (m)"
if clim is None:
vmin, vmax = val.min(), val.max()
else:
vmin, vmax = clim[0], clim[1]
dat = ax.contourf(
a, b, val, ncontour, clim=(vmin, vmax), vmin=vmin, vmax=vmax, cmap=cmap
)
if showcontour:
ax.contour(a, b, val, levels, colors="k", linestyles="-")
if colorbar:
if logamp is True:
plt.colorbar(
dat, ax=ax, format="$10^{%.1f}$", ticks=np.linspace(vmin, vmax, 3)
)
else:
plt.colorbar(
dat, ax=ax, format="%.1e", ticks=np.linspace(vmin, vmax, 3)
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if view == "vec":
# nx = self.x.size
# nskip = int(nx / 15)
# ax.quiver(a[::nskip], b[::nskip], (vec_a.real/VEC_amp)[::nskip,::nskip], (vec_b.real/VEC_amp)[::nskip,::nskip], color="w", linewidth=0.5)
ax.streamplot(a, b, vec_a, vec_b, color="w", linewidth=0.5)
return ax, dat
def plot_profile_FD(
self,
start,
end,
nbmp,
component="real",
view="x",
logamp=True,
ax=None,
color="black",
):
if ax is None:
plt.figure(figsize=(6.5, 5))
ax = plt.subplot(111)
if self.geometry.upper() == "PROFILE":
start = self.xyz[0]
end = self.xyz[-1]
self1D = copy.deepcopy(self)
# Pr for Profile
Pr = self.xyz
elif self.geometry.upper() == "GRID":
self1D = DataView()
Pr = np.zeros(shape=(nbmp, 3))
Pr[:, 0] = np.linspace(start[0], end[0], nbmp)
Pr[:, 1] = np.linspace(start[1], end[1], nbmp)
Pr[:, 2] = np.linspace(start[2], end[2], nbmp)
self1D.set_xyz(
Pr[:, 0], Pr[:, 1], Pr[:, 2], normal=self.normal, geometry="profile"
)
self1D.eval_2D(self.srcLoc, self.sig, self.f, self.orientation, self.func2D)
# Distance from starting point
D = np.sqrt(
(Pr[0, 0] - Pr[:, 0]) ** 2
+ (Pr[:, 1] - Pr[0, 1]) ** 2
+ (Pr[:, 2] - Pr[0, 2]) ** 2
)
# if self.normal.upper() == "Z":
# self1D.set_xyz(Pr[:,0],Pr[:,1],self.z,normal=self.normal,geometry="profile")
# elif self.normal.upper() == "Y":
# self1D.set_xyz(Pr[:,0],self.y,Pr[:,1],normal=self.normal,geometry="profile")
# elif self.normal.upper() == "X":
# self1D.set_xyz(self.x,Pr[:,0],Pr[:,1],normal=self.normal,geometry="profile")
pltvalue = []
if view.upper() == "X":
pltvalue = self1D.val_x
elif view.upper() == "Y":
pltvalue = self1D.val_y
elif view.upper() == "Z":
pltvalue = self1D.val_z
if component.upper() == "REAL":
ax.plot(D, pltvalue.real, color=color)
ax.set_ylabel("E field, Real part (V/m)")
elif component.upper() == "IMAG":
ax.plot(D, pltvalue.imag, color=color)
ax.set_ylabel("E field, Imag part (V/m)")
elif component.upper() == "AMPLITUDE":
if logamp is True:
ax.set_yscale("log")
ax.plot(D, np.absolute(pltvalue), color=color)
ax.set_ylabel("E field, Amplitude (V/m)")
elif component.upper() == "PHASE":
ax.plot(D, phase(pltvalue), color=color)
ax.set_ylabel("E field, Phase")
ax.set_xlabel("Distance from startinng point (m)")
return ax
def plot_1D_RI_section(self, start, end, nbmp, view, ax0, ax1):
self1D = DataView()
# Pr for Profile
Pr = np.zeros(shape=(nbmp, 2))
Pr[:, 0] = np.linspace(start[0], end[0], nbmp)
Pr[:, 1] = np.linspace(start[1], end[1], nbmp)
# Distance from starting point
D =
|
np.sqrt((Pr[0, 0] - Pr[:, 0]) ** 2 + (Pr[:, 1] - Pr[0, 1]) ** 2)
|
numpy.sqrt
|
import math
import os
import numpy as np
import pygame
from gym import spaces
from gym.utils import seeding
from scipy.spatial import distance as ssd
from .._utils import Agent
FPS = 15
class Archea(Agent):
def __init__(self, idx, radius, n_sensors, sensor_range, max_accel, speed_features=True):
self._idx = idx
self._radius = radius
self._n_sensors = n_sensors
self._sensor_range = sensor_range
self._max_accel = max_accel
# Number of observation coordinates from each sensor
self._sensor_obscoord = 5
if speed_features:
self._sensor_obscoord += 3
self._sensor_obs_coord = self._n_sensors * self._sensor_obscoord
self._obs_dim = self._sensor_obs_coord + 2 # +1 for is_colliding_evader, +1 for is_colliding_poison
self._position = None
self._velocity = None
# Generate self._n_sensors angles, evenly spaced from 0 to 2pi
# We generate 1 extra angle and remove it because linspace[0] = 0 = 2pi = linspace[-1]
angles = np.linspace(0., 2. * np.pi, self._n_sensors + 1)[:-1]
# Convert angles to x-y coordinates
sensor_vectors = np.c_[np.cos(angles), np.sin(angles)]
self._sensors = sensor_vectors
@property
def observation_space(self):
return spaces.Box(low=np.float32(-np.sqrt(2)), high=np.float32(2 * np.sqrt(2)), shape=(self._obs_dim,),
dtype=np.float32)
@property
def action_space(self):
return spaces.Box(low=np.float32(-self._max_accel), high=np.float32(self._max_accel), shape=(2,),
dtype=np.float32)
@property
def position(self):
assert self._position is not None
return self._position
@property
def velocity(self):
assert self._velocity is not None
return self._velocity
def set_position(self, pos):
assert pos.shape == (2,)
self._position = pos
def set_velocity(self, velocity):
assert velocity.shape == (2,)
self._velocity = velocity
@property
def sensors(self):
assert self._sensors is not None
return self._sensors
def sensed(self, object_coord, object_radius, same=False):
"""Whether object would be sensed by the pursuers"""
relative_coord = object_coord - np.expand_dims(self.position, 0)
# Projection of object coordinate in direction of sensor
sensorvals = self.sensors.dot(relative_coord.T)
# Set sensorvals to np.inf when object should not be seen by sensor
distance_squared = (relative_coord ** 2).sum(axis=1)[None, :]
sensorvals[
(sensorvals < 0) # Wrong direction (by more than 90 degrees in both directions)
| (sensorvals - object_radius > self._sensor_range) # Outside sensor range
| (distance_squared - sensorvals ** 2 > object_radius ** 2) # Sensor does not intersect object
] = np.inf
if same:
# Set sensors values for sensing the current object to np.inf
sensorvals[:, self._idx - 1] = np.inf
return sensorvals
def sense_barriers(self, min_pos=0, max_pos=1):
sensor_vectors = self.sensors * self._sensor_range
# Let's try a different method---polar!
# The key insight is that there is a triangle formed when the particle moves outside of the circle
# Namely, the triangle with vertices---center, particle, and intersection
# So the sides are---position vector, velocity vector, and radius
# TODO: get rid of magic number 0.5
position = self.position - 0.5
# We first need the angle formed by center, particle, and particle movement direction
# For vectorized calculation, I will use reshape so that all sensors could be calculated together
unit_vector_1 = (position / np.linalg.norm(position)).reshape(-1, 1)
unit_vector_2 = sensor_vectors / np.linalg.norm(sensor_vectors)
dot_product = unit_vector_2 @ unit_vector_1
theta_two = np.arccos(dot_product)
# Now we apply the sine law once and find the other angle that we can know
theta_one = np.arcsin(np.linalg.norm(position) * np.sin(theta_two) / 0.5)
# Finally, we find the last angle
# As well as the corresponding triangle side because that tells us where the particle will end up
theta_three = np.pi - theta_one - theta_two
max_length = np.linalg.norm(position) * np.sin(theta_three) / np.sin(theta_one)
clipped_vectors = max_length / np.linalg.norm(sensor_vectors, axis=1).reshape(self._n_sensors,
1) * sensor_vectors
# Find the ratio of the clipped sensor vector to the original sensor vector
# Scaling the vector by this ratio will limit the end of the vector to the barriers
ratios = np.divide(clipped_vectors, sensor_vectors, out=np.ones_like(clipped_vectors),
where=np.abs(sensor_vectors) > 0.00000001)
# Find the minimum ratio (x or y) of clipped endpoints to original endpoints
minimum_ratios = np.amin(ratios, axis=1)
# Convert to 2d array of size (n_sensors, 1)
sensor_values = np.expand_dims(minimum_ratios, 0)
# Set values beyond sensor range to infinity
does_sense = minimum_ratios < (1.0 - 1e-4)
does_sense = np.expand_dims(does_sense, 0)
sensor_values[np.logical_not(does_sense)] = np.inf
# Convert -0 to 0
sensor_values[sensor_values == -0] = 0
return sensor_values.T
class MAWaterWorld():
def __init__(self, n_pursuers=5, n_evaders=5, n_poison=10, n_coop=2, n_sensors=30, sensor_range=0.2,
radius=0.015, obstacle_radius=0.2, obstacle_coord=(0.5, 0.5),
pursuer_max_accel=0.01, evader_speed=0.01, poison_speed=0.01, poison_reward=-1.0,
food_reward=10.0, encounter_reward=0.01, thrust_penalty=-0.5, local_ratio=1.0,
speed_features=True, max_cycles=500):
"""
n_pursuers: number of pursuing archea (agents)
n_evaders: number of evader archea
n_poison: number of poison archea
n_coop: number of pursuing archea (agents) that must be touching food at the same time to consume it
n_sensors: number of sensors on all pursuing archea (agents)
sensor_range: length of sensor dendrite on all pursuing archea (agents)
radius: archea base radius. Pursuer: radius, evader: 2 x radius, poison: 3/4 x radius
obstacle_radius: radius of obstacle object
obstacle_coord: coordinate of obstacle object. Can be set to `None` to use a random location
pursuer_max_accel: pursuer archea maximum acceleration (maximum action size)
evader_speed: evading archea speed
poison_speed: poison archea speed
poison_reward: reward for pursuer consuming a poison object (typically negative)
food_reward:reward for pursuers consuming an evading archea
encounter_reward: reward for a pursuer colliding with an evading archea
thrust_penalty: scaling factor for the negative reward used to penalize large actions
local_ratio: Proportion of reward allocated locally vs distributed globally among all agents
speed_features: toggles whether pursuing archea (agent) sensors detect speed of other archea
max_cycles: After max_cycles steps all agents will return done
"""
self.seed()
self.n_pursuers = n_pursuers
self.n_evaders = n_evaders
self.n_coop = n_coop
self.n_poison = n_poison
self.obstacle_radius = obstacle_radius
obstacle_coord = np.array(obstacle_coord)
self.initial_obstacle_coord = self.np_random.uniform(0, 1, 2) if obstacle_coord is None else obstacle_coord
self.pursuer_max_accel = pursuer_max_accel
self.evader_speed = evader_speed
self.poison_speed = poison_speed
self.radius = radius
self.n_sensors = n_sensors
self.sensor_range = np.ones(self.n_pursuers) * min(sensor_range, (math.ceil(math.sqrt(2) * 100) / 100.0))
self.poison_reward = poison_reward
self.food_reward = food_reward
self.thrust_penalty = thrust_penalty
self.encounter_reward = encounter_reward
self.last_rewards = [np.float64(0) for _ in range(self.n_pursuers)]
self.control_rewards = [0 for _ in range(self.n_pursuers)]
self.last_dones = [False for _ in range(self.n_pursuers)]
self.last_obs = [None for _ in range(self.n_pursuers)]
self.n_obstacles = 1
self.local_ratio = local_ratio
self._speed_features = speed_features
self.max_cycles = max_cycles
# TODO: Look into changing hardcoded radius ratios
self._pursuers = [
Archea(pursuer_idx + 1, self.radius, self.n_sensors, sensor_range, self.pursuer_max_accel,
speed_features=self._speed_features)
for pursuer_idx in range(self.n_pursuers)
]
self._evaders = [
Archea(evader_idx + 1, self.radius * 2, self.n_pursuers, 0, self.evader_speed)
for evader_idx in range(self.n_evaders)
]
self._poisons = [
Archea(poison_idx + 1, self.radius * 3 / 4, self.n_poison, 0, self.poison_speed)
for poison_idx in range(self.n_poison)
]
self.num_agents = self.n_pursuers
self.action_space = [agent.action_space for agent in self._pursuers]
self.observation_space = [
agent.observation_space for agent in self._pursuers]
self.renderOn = False
self.pixel_scale = 30 * 25
self.cycle_time = 1.0 * 15. / FPS
self.frames = 0
self.reset()
def close(self):
if self.renderOn:
# pygame.event.pump()
pygame.display.quit()
pygame.quit()
@property
def agents(self):
return self._pursuers
def get_param_values(self):
return self.__dict__
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _generate_coord(self, radius):
# sample points in the circle
# I use the length-angle method of sampling
# There needs to be both a lower bound and upper bound on the length
# because we don't want the object to go outside or start where the obstacle is
# TODO: get rid of magic numbers here: 0.5 is radius of circle
length = self.np_random.uniform(10 * self.radius * (2 ** (1 / 2)), 0.5 - radius * 2)
angle = np.pi * self.np_random.uniform(0, 2)
x = length * np.cos(angle)
y = length * np.sin(angle)
coord = np.array([self.initial_obstacle_coord[0] + x, self.initial_obstacle_coord[1] + y])
# Create random coordinate that avoids obstacles
while ssd.cdist(coord[None, :], self.obstacle_coords) <= radius * 2 + self.obstacle_radius:
length = self.np_random.uniform(10 * self.radius * (2 ** (1 / 2)), 0.5 - radius * 2)
angle = np.pi * self.np_random.uniform(0, 2)
x = length * np.cos(angle)
y = length * np.sin(angle)
coord = np.array([self.initial_obstacle_coord[0] + x, self.initial_obstacle_coord[1] + y])
return coord
def reset(self):
self.frames = 0
# Initialize obstacles
if self.initial_obstacle_coord is None:
# Generate obstacle positions in range [0, 1)
self.obstacle_coords = self.np_random.rand(self.n_obstacles, 2)
else:
self.obstacle_coords = self.initial_obstacle_coord[None, :]
# Set each obstacle's velocity to 0
# TODO: remove if obstacles should never move
self.obstacle_speeds = np.zeros((self.n_obstacles, 2))
# Initialize pursuers
for pursuer in self._pursuers:
pursuer.set_position(self._generate_coord(pursuer._radius))
pursuer.set_velocity(np.zeros(2))
# Initialize evaders
for evader in self._evaders:
evader.set_position(self._generate_coord(evader._radius))
# Generate velocity such that speed <= self.evader_speed
velocity = self.np_random.rand(2) - 0.5
speed = np.linalg.norm(velocity)
if speed > self.evader_speed:
# Limit speed to self.evader_speed
velocity = velocity / speed * self.evader_speed
evader.set_velocity(velocity)
# Initialize poisons
for poison in self._poisons:
poison.set_position(self._generate_coord(poison._radius))
# Generate both velocity components from range [-self.poison_speed, self.poison_speed)
# Generate velocity such that speed <= self.poison_speed
velocity = self.np_random.rand(2) - 0.5
speed = np.linalg.norm(velocity)
if speed > self.poison_speed:
# Limit speed to self.poison_speed
velocity = velocity / speed * self.poison_speed
poison.set_velocity(velocity)
rewards = np.zeros(self.n_pursuers)
sensor_features, collided_pursuer_evader, collided_pursuer_poison, rewards \
= self.collision_handling_subroutine(rewards, True)
obs_list = self.observe_list(
sensor_features, collided_pursuer_evader, collided_pursuer_poison)
self.last_rewards = [np.float64(0) for _ in range(self.n_pursuers)]
self.control_rewards = [0 for _ in range(self.n_pursuers)]
self.last_dones = [False for _ in range(self.n_pursuers)]
self.last_obs = obs_list
return obs_list[0]
def _caught(self, is_colliding_x_y, n_coop):
""" Check whether collision results in catching the object
This is because you need `n_coop` agents to collide with the object to actually catch it
"""
# Number of collisions for each y
n_collisions = is_colliding_x_y.sum(axis=0)
# List of y that have been caught
caught_y = np.where(n_collisions >= n_coop)[0]
# Boolean array indicating which x caught any y in caught_y
did_x_catch_y = is_colliding_x_y[:, caught_y]
# List of x that caught corresponding y in caught_y
x_caught_y = np.where(did_x_catch_y >= 1)[0]
return caught_y, x_caught_y
def _closest_dist(self, closest_object_idx, input_sensorvals):
"""Closest distances according to `idx`"""
sensorvals = []
for pursuer_idx in range(self.n_pursuers):
sensors = np.arange(self.n_sensors) # sensor indices
objects = closest_object_idx[pursuer_idx, ...] # object indices
sensorvals.append(input_sensorvals[pursuer_idx, ..., sensors, objects])
return np.c_[sensorvals]
def _extract_speed_features(self, object_velocities, object_sensorvals, sensed_mask):
# sensed_mask is a boolean mask of which sensor values detected an object
sensorvals = []
for pursuer in self._pursuers:
relative_speed = object_velocities - np.expand_dims(pursuer.velocity, 0)
sensorvals.append(pursuer.sensors.dot(relative_speed.T))
sensed_speed = np.c_[sensorvals] # Speeds in direction of each sensor
speed_features = np.zeros((self.n_pursuers, self.n_sensors))
sensorvals = []
for pursuer_idx in range(self.n_pursuers):
sensorvals.append(
sensed_speed[pursuer_idx, :, :][np.arange(self.n_sensors), object_sensorvals[pursuer_idx, :]]
)
# Set sensed values, all others remain 0
speed_features[sensed_mask] = np.c_[sensorvals][sensed_mask]
return speed_features
def collision_handling_subroutine(self, rewards, is_last):
# Stop pursuers upon hitting a wall
for pursuer in self._pursuers:
# Here we are trying to clip based on a circle, not a square
# Given the current position of the pursuer (outside of the circle) and its velocity,
# we want to "pull it back" along the direction of the velocity vector into the circle again
# TODO: get rid of the 0.5 magic number
# The code below will make a lot more sense if you reference this website
# https://codereview.stackexchange.com/questions/86421/line-segment-to-circle-collision-algorithm
# The code is optimized so that we find the intersection between the velocity vector and the circle
# using vector calculus, and that intersection will be where the pursuer tangents the circle
distance = abs(pursuer.position - 0.5) + pursuer._radius
# if we are outside the circle
if (distance[0] ** 2 + distance[1] ** 2 > 0.5 ** 2):
# again, you should reference the link above to make sure this velocity vector makes sense
# We are treating the pursuer position as the starting point of the velocity vector
if (pursuer.velocity[0] == 0 and pursuer.velocity[1] == 0) or (
pursuer.velocity[0] == -0 and pursuer.velocity[1] == -0):
# If this happens then we have no velocity to rely on.
# In that case, we just bring the particle back directly towards the center
v = 0.5 - pursuer.position
else:
v = -1 * pursuer.velocity
# The determinant of this quadratic equation must always be non-negative because
# there will always be an intersection between the velocity and the circle
# In fact, there will always be two intersections
# We are looking for the closest one, hence the t with the smaller absolute value
q = self.initial_obstacle_coord
p = pursuer.position
a = v.dot(v)
b = 2 * v.dot(p - q)
c = p.dot(p) + q.dot(q) - 2 * p.dot(q) - 0.5 ** 2
disc = b ** 2 - 4 * a * c
assert disc >= 0
sqrt_disc = math.sqrt(disc)
sol = [(-b + sqrt_disc) / (2 * a), (-b - sqrt_disc) / (2 * a)]
abs_sol = [abs(number) for number in sol]
min_abs = min(abs_sol)
idx = abs_sol.index(min_abs)
t = sol[idx]
# The last term is because the pursuer has a radius that we need to account for
pursuer.set_position(
pursuer.position + t * v + 2 * pursuer._radius / np.linalg.norm(0.5 - pursuer.position) * (
0.5 - pursuer.position))
pursuer.set_velocity(v)
def rebound_particles(particles, n):
collisions_particle_obstacle = np.zeros(n)
# Particles rebound on hitting an obstacle
for idx, particle in enumerate(particles):
# We find whether the particle is colliding with any of the four sides our hourglass obstacle
# In graphics the four corners are actually 10 * self.radius
# However, the collision needs to account for an extra radius length
center = self.obstacle_coords[0]
topleft = np.array([center[0] - 11 * self.radius, center[1] - 11 * self.radius])
topright = np.array([center[0] + 11 * self.radius, center[1] - 11 * self.radius])
bottomleft = np.array([center[0] - 11 * self.radius, center[1] + 11 * self.radius])
bottomright = np.array([center[0] + 11 * self.radius, center[1] + 11 * self.radius])
topdist = np.linalg.norm(np.cross(topright - topleft, topleft - particle.position)) / np.linalg.norm(
topright - topleft)
top = (topdist <= particle._radius) and (
topleft[0] <= particle.position[0] and topright[0] >= particle.position[0])
leftdist = np.linalg.norm(
np.cross(bottomright - topleft, topleft - particle.position)) / np.linalg.norm(
bottomright - topleft)
left = (leftdist <= particle._radius) and (
topleft[0] <= particle.position[0] and bottomright[0] >= particle.position[0]) and (
topleft[1] <= particle.position[1] and bottomright[1] >= particle.position[1])
rightdist = np.linalg.norm(
np.cross(topright - bottomleft, bottomleft - particle.position)) / np.linalg.norm(
topright - bottomleft)
right = (rightdist <= particle._radius) and (
topleft[0] <= particle.position[0] and bottomright[0] >= particle.position[0]) and (
topleft[1] <= particle.position[1] and bottomright[1] >= particle.position[1])
bottomdist = np.linalg.norm(
np.cross(bottomright - bottomleft, bottomleft - particle.position)) / np.linalg.norm(
bottomright - bottomleft)
bottom = (bottomdist <= particle._radius) and (
topleft[0] <= particle.position[0] and topright[0] >= particle.position[0])
is_colliding = top or left or right or bottom
collisions_particle_obstacle[idx] = is_colliding.sum()
if collisions_particle_obstacle[idx] > 0:
# Rebound the particle that collided with an obstacle
velocity_scale = particle._radius + self.obstacle_radius - \
ssd.euclidean(particle.position, self.obstacle_coords)
pos_diff = particle.position - self.obstacle_coords[0]
new_pos = particle.position + velocity_scale * pos_diff
particle.set_position(new_pos)
collision_normal = particle.position - self.obstacle_coords[0]
# project current velocity onto collision normal
current_vel = particle.velocity
proj_numer = np.dot(current_vel, collision_normal)
cllsn_mag = np.dot(collision_normal, collision_normal)
proj_vel = (proj_numer / cllsn_mag) * collision_normal
perp_vel = current_vel - proj_vel
total_vel = perp_vel - proj_vel
particle.set_velocity(total_vel)
rebound_particles(self._pursuers, self.n_pursuers)
if is_last:
rebound_particles(self._evaders, self.n_evaders)
rebound_particles(self._poisons, self.n_poison)
positions_pursuer = np.array([pursuer.position for pursuer in self._pursuers])
positions_evader = np.array([evader.position for evader in self._evaders])
positions_poison = np.array([poison.position for poison in self._poisons])
# Find evader collisions
distances_pursuer_evader = ssd.cdist(positions_pursuer, positions_evader)
# Generate n_evaders x n_pursuers matrix of boolean values for collisions
collisions_pursuer_evader = distances_pursuer_evader <= np.asarray([
pursuer._radius + evader._radius for pursuer in self._pursuers
for evader in self._evaders
]).reshape(self.n_pursuers, self.n_evaders)
# Number of collisions depends on n_coop, how many are needed to catch an evader
caught_evaders, pursuer_evader_catches = self._caught(
collisions_pursuer_evader, self.n_coop)
# Find poison collisions
distances_pursuer_poison = ssd.cdist(positions_pursuer, positions_poison)
collisions_pursuer_poison = distances_pursuer_poison <= np.asarray([
pursuer._radius + poison._radius for pursuer in self._pursuers
for poison in self._poisons
]).reshape(self.n_pursuers, self.n_poison)
caught_poisons, pursuer_poison_collisions = self._caught(
collisions_pursuer_poison, 1)
# Find sensed obstacles
sensorvals_pursuer_obstacle = np.array(
[pursuer.sensed(self.obstacle_coords, self.obstacle_radius) for pursuer in self._pursuers])
# Find sensed barriers
sensorvals_pursuer_barrier = np.array(
[pursuer.sense_barriers() for pursuer in self._pursuers])
# Find sensed evaders
sensorvals_pursuer_evader = np.array(
[pursuer.sensed(positions_evader, self.radius * 2) for pursuer in self._pursuers])
# Find sensed poisons
sensorvals_pursuer_poison = np.array(
[pursuer.sensed(positions_poison, self.radius * 3 / 4) for pursuer in self._pursuers])
# Find sensed pursuers
sensorvals_pursuer_pursuer = np.array(
[pursuer.sensed(positions_pursuer, self.radius, same=True) for pursuer in self._pursuers])
# Collect distance features
def sensor_features(sensorvals):
closest_idx_array = np.argmin(sensorvals, axis=2)
closest_distances = self._closest_dist(closest_idx_array, sensorvals)
finite_mask =
|
np.isfinite(closest_distances)
|
numpy.isfinite
|
# import Modules ----------------------------------
import numpy as np
import matplotlib.pyplot as plt
# Create Functions for Rays ----------------------
# rays are definied as ax+by+c=0
#math ref: https://www.cuemath.com/geometry/intersection-of-two-lines/
# generate parrellel rays
def Generate_Rays(Theta,Spacing,Num):
# takes angle, spacing and number of rays
# return a matrix of a,b,c values for each ray
Rays = np.zeros([3,Num]) # empty matrix with a,b and c values ax+by+c=0
if (Theta == 0): #horizontal lines
Rays[0,:] = 0
Rays[1,:] = 1
Rays[2,:] = np.arange(0, Spacing*Num,Spacing) - Num*Spacing/2 + Spacing/2
elif (Theta == np.pi/2): # vertical lines
Rays[0,:] = 1
Rays[1,:] = 0
Rays[2,:] = np.arange(0, Spacing*Num,Spacing) - Num*Spacing/2 + Spacing/2
else:
Rays[0,:] = -
|
np.tan(Theta)
|
numpy.tan
|
# -------------------------------------------------------------------- #
# #
# Python script for calculating the bandstructure of C60 #
# #
# This scgript calcutes the electron transport #
# of the Bucky Ball (C60) #
# #
# Written by <NAME> (<EMAIL>) #
# #
# -------------------------------------------------------------------- #
# --------------------------Import Libraries-------------------------- #
from matplotlib import pyplot as plt # Pyplot for nice graphs
from mpl_toolkits.mplot3d import Axes3D # Used for 3D plots
from matplotlib.widgets import Slider, Button
import numpy as np # NumPy
from numpy import linalg as LA
from collections import Counter
Vppi = -1
# np.set_printoptions(threshold=np.inf)
# BB = molecule('C60')
# xyz = BB.get_positions()
xyz = np.array([[1.3624, 1.5632, 2.8359], [2.0435, 0.36748, 2.7818],
[1.6002, 2.5246, 1.8519], [0.0036388, 1.2996, 3.3319],
[1.2172, -0.64172, 3.2237], [2.9886, 0.13386, 1.8164],
[0.50174, 3.3131, 1.2672], [2.5073, 2.2423, 0.85514],
[-1.1397, 2.0362, 2.6753], [-0.086852, -0.055936, 3.5613],
[1.3122, -1.9012, 2.6354], [3.0831, -1.0979, 1.2391],
[3.2202, 1.0708, 0.8538], [-0.90772, 2.9856, 1.7068],
[0.78701, 3.4713, -0.071127], [2.0706, 2.8055, -0.32213],
[-2.2925, 1.2502, 2.225], [-1.3338, -0.83053, 3.1472],
[2.2289, -2.0986, 1.6273], [0.10933, -2.6948, 2.338],
[3.3729, -0.9212, -0.082145], [3.4595, 0.4197, -0.32075],
[-1.9189, 2.7734, 0.66243], [-0.30423, 3.3175, -1.1239],
[2.3151, 2.1454, -1.5248], [-2.718, 1.7289, 1.0219],
[-2.4072, -0.1101, 2.4492], [-1.2414, -2.0783, 2.5771],
[1.6915, -2.9709, 0.70985], [0.34387, -3.3471, 1.1603],
[2.7975, -1.7395, -1.0186], [2.9824, 0.94083, -1.4955],
[-1.6529, 2.9328, -0.68622], [-0.061038, 2.6748, -2.3153],
[1.2982, 2.0899, -2.5875], [-3.3109, 0.91875, 0.095886],
[-3.0017, -0.92892, 1.5037], [-2.3116, -2.2045, 1.5437],
[1.9754, -2.7766, -0.63964], [-0.75087, -3.4335, 0.13085],
[2.3593, -1.2416, -2.2239], [2.4601, 0.1258, -2.4726],
[-2.2474, 2.1044, -1.6233], [-1.2886, 1.912, -2.6947],
[1.3859, 0.85338, -3.1625], [-3.5067, -0.40969, 0.32408],
[-3.1274, 1.1072, -1.2394], [-2.0814, -2.8689, 0.37769],
[0.92735, -2.9321, -1.6567], [-0.48135, -3.2351, -1.1932],
[1.1636, -1.9938, -2.6284], [-1.1972, 0.6892, -3.2868],
[0.12809, 0.10609, -3.5141], [-3.4109, -1.1172, -0.94606],
[-3.1772, -0.1844, -1.9062], [-2.6065, -2.3553, -0.91036],
[-1.6415, -2.5559, -1.8293], [0.018087, -1.2314, -3.2618],
[-2.1215, -0.40907, -2.9139], [-1.3879, -1.5381, -2.8789]])
Ham = np.zeros((xyz.shape[0], xyz.shape[0]))
for i in range(xyz.shape[0]):
for j in range(xyz.shape[0]):
Ham[i, j] = LA.norm(np.subtract(xyz[i], xyz[j]))
Ham = np.where(Ham < 1.6, Vppi, 0)
Ham = np.subtract(Ham, Vppi * np.identity(xyz.shape[0]))
print(Ham.shape)
print(np.sum(Ham))
plt.imshow(Ham)
plt.colorbar()
plt.show()
e, v = LA.eig(Ham)
e = np.round(e, decimals=3)
w = e.real
c = Counter(w)
y = np.array([p for k, p in sorted(c.items())])
x = np.asarray(sorted([*c]))
fig, ax = plt.subplots(figsize=(16, 10), dpi=80)
ax.vlines(x=x, ymin=0, ymax=y,
color='firebrick', alpha=0.7, linewidth=2)
ax.scatter(x=x, y=y, s=75, color='firebrick', alpha=0.7)
ax.set_title('Energy degeneracy', fontdict={'size': 22})
ax.set_ylabel('Degeneracy')
ax.set_xlabel('Energy')
ax.set_ylim(0, 10)
ax.tick_params(axis='both', which='both')
ax.spines['left'].set_position('center')
plt.grid(which='both')
for i in range(x.size):
ax.text(x[i], y[i] + .5, s=x[i], horizontalalignment='center',
verticalalignment='bottom', fontsize=14)
plt.show()
xlin = np.array([[0, 0]])
ylin = np.array([[0, 0]])
zlin = np.array([[0, 0]])
for i in range(xyz.shape[0]):
for j in range(xyz.shape[0]):
if LA.norm(np.subtract(xyz[i], xyz[j])) < 1.6:
TmpArr = np.array([[xyz[i, 0], xyz[j, 0]]])
xlin = np.append(xlin, TmpArr, axis=0)
TmpArr = np.array([[xyz[i, 1], xyz[j, 1]]])
ylin = np.append(ylin, TmpArr, axis=0)
TmpArr = np.array([[xyz[i, 2], xyz[j, 2]]])
zlin = np.append(zlin, TmpArr, axis=0)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(xlin.shape[0]):
ax.plot(xlin[i], ylin[i], zlin[i])
ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], zdir='z', s=300)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
val = 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(xlin.shape[0]):
ax.plot(xlin[i], ylin[i], zlin[i])
s = np.zeros(v.shape[0])
c = np.zeros(v.shape[0])
val = 1
s = np.absolute(v[:, val - 1])
s = s * 900
c = np.where(v[:, val - 1] > 0, 0, 1)
Stateplot = ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], zdir='z', s=s)
Stateplot.set_cmap("bwr")
plt.subplots_adjust(bottom=0.25)
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
state = Slider(axfreq, 'State', 1, 30, valinit=1, valstep=1)
def update(val):
val = state.val
val = int(val)
s =
|
np.absolute(v[:, val - 1])
|
numpy.absolute
|
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Suite of tests for all estimators.
adapted from scikit-learn's estimator_checks
"""
__author__ = ["mloning", "fkiraly"]
import numbers
import pickle
import types
from copy import deepcopy
from inspect import getfullargspec, isclass, signature
import joblib
import numpy as np
import pytest
from sklearn import clone
from sklearn.utils._testing import set_random_state
from sklearn.utils.estimator_checks import (
check_get_params_invariance as _check_get_params_invariance,
)
from sklearn.utils.estimator_checks import check_set_params as _check_set_params
from sktime.base import BaseEstimator
from sktime.dists_kernels._base import (
BasePairwiseTransformer,
BasePairwiseTransformerPanel,
)
from sktime.exceptions import NotFittedError
from sktime.registry import all_estimators
from sktime.tests._config import (
EXCLUDE_ESTIMATORS,
EXCLUDED_TESTS,
NON_STATE_CHANGING_METHODS,
VALID_ESTIMATOR_BASE_TYPES,
VALID_ESTIMATOR_TAGS,
VALID_ESTIMATOR_TYPES,
VALID_TRANSFORMER_TYPES,
)
from sktime.utils._testing._conditional_fixtures import (
create_conditional_fixtures_and_names,
)
from sktime.utils._testing.deep_equals import deep_equals
from sktime.utils._testing.estimator_checks import (
_assert_array_almost_equal,
_assert_array_equal,
_get_args,
_has_capability,
_list_required_methods,
)
from sktime.utils._testing.scenarios_getter import retrieve_scenarios
class BaseFixtureGenerator:
"""Fixture generator for base testing functionality in sktime.
Test classes inheriting from this and not overriding pytest_generate_tests
will have estimator and scenario fixtures parameterized out of the box.
Descendants can override:
estimator_type_filter: str, class variable; None or scitype string
e.g., "forecaster", "transformer", "classifier", see BASE_CLASS_SCITYPE_LIST
which estimators are being retrieved and tested
fixture_sequence: list of str
sequence of fixture variable names in conditional fixture generation
_generate_[variable]: object methods, all (test_name: str, **kwargs) -> list
generating list of fixtures for fixture variable with name [variable]
to be used in test with name test_name
can optionally use values for fixtures earlier in fixture_sequence,
these must be input as kwargs in a call
is_excluded: static method (test_name: str, est: class) -> bool
whether test with name test_name should be excluded for estimator est
should be used only for encoding general rules, not individual skips
individual skips should go on the EXCLUDED_TESTS list in _config
requires _generate_estimator_class and _generate_estimator_instance as is
_excluded_scenario: static method (test_name: str, scenario) -> bool
whether scenario should be skipped in test with test_name test_name
requires _generate_estimator_scenario as is
Fixtures parameterized
----------------------
estimator_class: estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
estimator_instance: instance of estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
instances are generated by create_test_instance class method
scenario: instance of TestScenario
ranges over all scenarios returned by retrieve_scenarios
"""
# class variables which can be overridden by descendants
# which estimator types are generated; None=all, or scitype string like "forecaster"
estimator_type_filter = None
# which sequence the conditional fixtures are generated in
fixture_sequence = ["estimator_class", "estimator_instance", "scenario"]
def pytest_generate_tests(self, metafunc):
"""Test parameterization routine for pytest.
This uses create_conditional_fixtures_and_names and generator_dict
to create the fixtures for a mark.parameterize decoration of all tests.
"""
# get name of the test
test_name = metafunc.function.__name__
fixture_sequence = self.fixture_sequence
fixture_vars = getfullargspec(metafunc.function)[0]
(
fixture_param_str,
fixture_prod,
fixture_names,
) = create_conditional_fixtures_and_names(
test_name=test_name,
fixture_vars=fixture_vars,
generator_dict=self.generator_dict(),
fixture_sequence=fixture_sequence,
)
metafunc.parametrize(fixture_param_str, fixture_prod, ids=fixture_names)
def _all_estimators(self):
"""Retrieve list of all estimator classes of type self.estimator_type_filter."""
return all_estimators(
estimator_types=getattr(self, "estimator_type_filter", None),
return_names=False,
exclude_estimators=EXCLUDE_ESTIMATORS,
)
def generator_dict(self):
"""Return dict with methods _generate_[variable] collected in a dict.
The returned dict is the one required by create_conditional_fixtures_and_names,
used in this _conditional_fixture plug-in to pytest_generate_tests, above.
Returns
-------
generator_dict : dict, with keys [variable], where
[variable] are all strings such that self has a static method
named _generate_[variable](test_name: str, **kwargs)
value at [variable] is a reference to _generate_[variable]
"""
gens = [attr for attr in dir(self) if attr.startswith("_generate_")]
vars = [gen.replace("_generate_", "") for gen in gens]
generator_dict = dict()
for var, gen in zip(vars, gens):
generator_dict[var] = getattr(self, gen)
return generator_dict
@staticmethod
def is_excluded(test_name, est):
"""Shorthand to check whether test test_name is excluded for estimator est."""
return test_name in EXCLUDED_TESTS.get(est.__name__, [])
# the following functions define fixture generation logic for pytest_generate_tests
# each function is of signature (test_name:str, **kwargs) -> List of fixtures
# function with name _generate_[fixture_var] returns list of values for fixture_var
# where fixture_var is a fixture variable used in tests
# the list is conditional on values of other fixtures which can be passed in kwargs
def _generate_estimator_class(self, test_name, **kwargs):
"""Return estimator class fixtures.
Fixtures parameterized
----------------------
estimator_class: estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
"""
estimator_classes_to_test = [
est
for est in self._all_estimators()
if not self.is_excluded(test_name, est)
]
estimator_names = [est.__name__ for est in estimator_classes_to_test]
return estimator_classes_to_test, estimator_names
def _generate_estimator_instance(self, test_name, **kwargs):
"""Return estimator instance fixtures.
Fixtures parameterized
----------------------
estimator_instance: instance of estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
instances are generated by create_test_instance class method
"""
# call _generate_estimator_class to get all the classes
estimator_classes_to_test, _ = self._generate_estimator_class(
test_name=test_name
)
# create instances from the classes
estimator_instances_to_test = []
estimator_instance_names = []
# retrieve all estimator parameters if multiple, construct instances
for est in estimator_classes_to_test:
all_instances_of_est, instance_names = est.create_test_instances_and_names()
estimator_instances_to_test += all_instances_of_est
estimator_instance_names += instance_names
return estimator_instances_to_test, estimator_instance_names
def _generate_scenario(self, test_name, **kwargs):
"""Return estimator test scenario.
Fixtures parameterized
----------------------
scenario: instance of TestScenario
ranges over all scenarios returned by retrieve_scenarios
"""
if "estimator_class" in kwargs.keys():
obj = kwargs["estimator_class"]
elif "estimator_instance" in kwargs.keys():
obj = kwargs["estimator_instance"]
else:
return []
scenarios = retrieve_scenarios(obj)
scenarios = [s for s in scenarios if not self._excluded_scenario(test_name, s)]
scenario_names = [type(scen).__name__ for scen in scenarios]
return scenarios, scenario_names
@staticmethod
def _excluded_scenario(test_name, scenario):
"""Skip list generator for scenarios to skip in test_name.
Arguments
---------
test_name : str, name of test
scenario : instance of TestScenario, to be used in test
Returns
-------
bool, whether scenario should be skipped in test_name
"""
# for forecasters tested in test_methods_do_not_change_state
# if fh is not passed in fit, then this test would fail
# since fh will be stored in predict through fh handling
# as there are scenarios which pass it early and everything else is the same
# we skip those scenarios
if test_name == "test_methods_do_not_change_state":
if not scenario.get_tag("fh_passed_in_fit", True, raise_error=False):
return True
# this line excludes all scenarios that are not 1:1 to the "pre-scenario" state
# pre-refactor, all tests pass, so all post-refactor tests should with below
# comment out to run the full test suite with new scenarios
if not scenario.get_tag("pre-refactor", False, raise_error=False):
return True
return False
class QuickTester:
"""Mixin class which adds the run_tests method to run tests on one estimator."""
def run_tests(
self, estimator, return_exceptions=True, tests_to_run=None, fixtures_to_run=None
):
"""Run all tests on one single estimator.
All tests in self are run on the following estimator type fixtures:
if est is a class, then estimator_class = est, and
estimator_instance loops over est.create_test_instance()
if est is an object, then estimator_class = est.__class__, and
estimator_instance = est
This is compatible with pytest.mark.parametrize decoration,
but currently only with multiple *single variable* annotations.
Parameters
----------
estimator : estimator class or estimator instance
return_exception : bool, optional, default=True
whether to return exceptions/failures, or raise them
if True: returns exceptions in results
if False: raises exceptions as they occur
tests_to_run : str or list of str, names of tests to run. default = all tests
sub-sets tests that are run to the tests given here.
fixtures_to_run : str or list of str, pytest test-fixture combination codes.
which test-fixture combinations to run. Default = run all of them.
sub-sets tests and fixtures to run to the list given here.
If both tests_to_run and fixtures_to_run are provided, runs the *union*,
i.e., all test-fixture combinations for tests in tests_to_run,
plus all test-fixture combinations in fixtures_to_run.
Returns
-------
results : dict of results of the tests in self
keys are test/fixture strings, identical as in pytest, e.g., test[fixture]
entries are the string "PASSED" if the test passed,
or the exception raised if the test did not pass
returned only if all tests pass, or return_exceptions=True
Raises
------
if return_exception=False, raises any exception produced by the tests directly
Examples
--------
>>> from sktime.forecasting.naive import NaiveForecaster
>>> from sktime.tests.test_all_estimators import TestAllEstimators
>>> TestAllEstimators().run_tests(
... NaiveForecaster,
... tests_to_run="test_required_params"
... )
{'test_required_params[NaiveForecaster]': 'PASSED'}
>>> TestAllEstimators().run_tests(
... NaiveForecaster, fixtures_to_run="test_repr[NaiveForecaster-2]"
... )
{'test_repr[NaiveForecaster-2]': 'PASSED'}
"""
tests_to_run = self._check_None_str_or_list_of_str(
tests_to_run, var_name="tests_to_run"
)
fixtures_to_run = self._check_None_str_or_list_of_str(
fixtures_to_run, var_name="fixtures_to_run"
)
# retrieve tests from self
test_names = [attr for attr in dir(self) if attr.startswith("test")]
# we override the generator_dict, by replacing it with temp_generator_dict:
# the only estimator (class or instance) is est, this is overridden
# the remaining fixtures are generated conditionally, without change
temp_generator_dict = deepcopy(self.generator_dict())
if isclass(estimator):
estimator_class = estimator
else:
estimator_class = type(estimator)
def _generate_estimator_class(test_name, **kwargs):
return [estimator_class], [estimator_class.__name__]
def _generate_estimator_instance(test_name, **kwargs):
return [estimator], [estimator_class.__name__]
def _generate_estimator_instance_cls(test_name, **kwargs):
return estimator_class.create_test_instances_and_names()
temp_generator_dict["estimator_class"] = _generate_estimator_class
if not isclass(estimator):
temp_generator_dict["estimator_instance"] = _generate_estimator_instance
else:
temp_generator_dict["estimator_instance"] = _generate_estimator_instance_cls
# override of generator_dict end, temp_generator_dict is now prepared
# sub-setting to specific tests to run, if tests or fixtures were speified
if tests_to_run is None and fixtures_to_run is None:
test_names_subset = test_names
else:
test_names_subset = []
if tests_to_run is not None:
test_names_subset += list(set(test_names).intersection(tests_to_run))
if fixtures_to_run is not None:
# fixture codes contain the test as substring until the first "["
tests_from_fixt = [fixt.split("[")[0] for fixt in fixtures_to_run]
test_names_subset += list(set(test_names).intersection(tests_from_fixt))
test_names_subset = list(set(test_names_subset))
# the below loops run all the tests and collect the results here:
results = dict()
# loop A: we loop over all the tests
for test_name in test_names_subset:
test_fun = getattr(self, test_name)
fixture_sequence = self.fixture_sequence
# all arguments except the first one (self)
fixture_vars = getfullargspec(test_fun)[0][1:]
fixture_vars = [var for var in fixture_sequence if var in fixture_vars]
# this call retrieves the conditional fixtures
# for the test test_name, and the estimator
_, fixture_prod, fixture_names = create_conditional_fixtures_and_names(
test_name=test_name,
fixture_vars=fixture_vars,
generator_dict=temp_generator_dict,
fixture_sequence=fixture_sequence,
)
# if function is decorated with mark.parameterize, add variable settings
# NOTE: currently this works only with single-variable mark.parameterize
if hasattr(test_fun, "pytestmark"):
if len([x for x in test_fun.pytestmark if x.name == "parametrize"]) > 0:
# get the three lists from pytest
(
pytest_fixture_vars,
pytest_fixture_prod,
pytest_fixture_names,
) = self._get_pytest_mark_args(test_fun)
# add them to the three lists from conditional fixtures
fixture_vars, fixture_prod, fixture_names = self._product_fixtures(
fixture_vars,
fixture_prod,
fixture_names,
pytest_fixture_vars,
pytest_fixture_prod,
pytest_fixture_names,
)
# loop B: for each test, we loop over all fixtures
for params, fixt_name in zip(fixture_prod, fixture_names):
# this is needed because pytest unwraps 1-tuples automatically
# but subsequent code assumes params is k-tuple, no matter what k is
if len(fixture_vars) == 1:
params = (params,)
key = f"{test_name}[{fixt_name}]"
args = dict(zip(fixture_vars, params))
# we subset to test-fixtures to run by this, if given
# key is identical to the pytest test-fixture string identifier
if fixtures_to_run is not None and key not in fixtures_to_run:
continue
if return_exceptions:
try:
test_fun(**args)
results[key] = "PASSED"
except Exception as err:
results[key] = err
else:
test_fun(**args)
results[key] = "PASSED"
return results
@staticmethod
def _check_None_str_or_list_of_str(obj, var_name="obj"):
"""Check that obj is None, str, or list of str, and coerce to list of str."""
if obj is not None:
msg = f"{var_name} must be None, str, or list of str"
if isinstance(obj, str):
obj = [obj]
if not isinstance(obj, list):
raise ValueError(msg)
if not np.all(isinstance(x, str) for x in obj):
raise ValueError(msg)
return obj
# todo: surely there is a pytest method that can be called instead of this?
# find and replace if it exists
@staticmethod
def _get_pytest_mark_args(fun):
"""Get args from pytest mark annotation of function.
Parameters
----------
fun: callable, any function
Returns
-------
pytest_fixture_vars: list of str
names of args participating in mark.parameterize marks, in pytest order
pytest_fixt_list: list of tuple
list of value tuples from the mark parameterization
i-th value in each tuple corresponds to i-th arg name in pytest_fixture_vars
pytest_fixt_names: list of str
i-th element is display name for i-th fixture setting in pytest_fixt_list
"""
from itertools import product
marks = [x for x in fun.pytestmark if x.name == "parametrize"]
def to_str(obj):
return [str(x) for x in obj]
def get_id(mark):
if "ids" in mark.kwargs.keys():
return mark.kwargs["ids"]
else:
return to_str(range(len(mark.args[1])))
pytest_fixture_vars = [x.args[0] for x in marks]
pytest_fixt_raw = [x.args[1] for x in marks]
pytest_fixt_list = product(*pytest_fixt_raw)
pytest_fixt_names_raw = [get_id(x) for x in marks]
pytest_fixt_names = product(*pytest_fixt_names_raw)
pytest_fixt_names = ["-".join(x) for x in pytest_fixt_names]
return pytest_fixture_vars, pytest_fixt_list, pytest_fixt_names
@staticmethod
def _product_fixtures(
fixture_vars,
fixture_prod,
fixture_names,
pytest_fixture_vars,
pytest_fixture_prod,
pytest_fixture_names,
):
"""Compute products of two sets of fixture vars, values, names."""
from itertools import product
# product of fixture variable names = concatenation
fixture_vars_return = fixture_vars + pytest_fixture_vars
# this is needed because pytest unwraps 1-tuples automatically
# but subsequent code assumes params is k-tuple, no matter what k is
if len(fixture_vars) == 1:
fixture_prod = [(x,) for x in fixture_prod]
# product of fixture products = Cartesian product plus append tuples
fixture_prod_return = product(fixture_prod, pytest_fixture_prod)
fixture_prod_return = [sum(x, ()) for x in fixture_prod_return]
# product of fixture names = Cartesian product plus concat
fixture_names_return = product(fixture_names, pytest_fixture_names)
fixture_names_return = ["-".join(x) for x in fixture_names_return]
return fixture_vars_return, fixture_prod_return, fixture_names_return
class TestAllEstimators(BaseFixtureGenerator, QuickTester):
"""Package level tests for all sktime estimators."""
def test_create_test_instance(self, estimator_class):
"""Check first that create_test_instance logic works."""
estimator = estimator_class.create_test_instance()
# Check that init does not construct object of other class than itself
assert isinstance(estimator, estimator_class), (
"object returned by create_test_instance must be an instance of the class, "
f"found {type(estimator)}"
)
def test_create_test_instances_and_names(self, estimator_class):
"""Check that create_test_instances_and_names works."""
estimators, names = estimator_class.create_test_instances_and_names()
assert isinstance(estimators, list), (
"first return of create_test_instances_and_names must be a list, "
f"found {type(estimators)}"
)
assert isinstance(names, list), (
"second return of create_test_instances_and_names must be a list, "
f"found {type(names)}"
)
assert np.all(isinstance(est, estimator_class) for est in estimators), (
"list elements of first return returned by create_test_instances_and_names "
"all must be an instance of the class"
)
assert np.all(isinstance(name, names) for name in names), (
"list elements of second return returned by create_test_instances_and_names"
" all must be strings"
)
assert len(estimators) == len(names), (
"the two lists returned by create_test_instances_and_names must have "
"equal length"
)
def test_required_params(self, estimator_class):
"""Check required parameter interface."""
Estimator = estimator_class
# Check common meta-estimator interface
if hasattr(Estimator, "_required_parameters"):
required_params = Estimator._required_parameters
assert isinstance(required_params, list), (
f"For estimator: {Estimator}, `_required_parameters` must be a "
f"tuple, but found type: {type(required_params)}"
)
assert all([isinstance(param, str) for param in required_params]), (
f"For estimator: {Estimator}, elements of `_required_parameters` "
f"list must be strings"
)
# check if needless parameters are in _required_parameters
init_params = [
par.name for par in signature(Estimator.__init__).parameters.values()
]
in_required_but_not_init = [
param for param in required_params if param not in init_params
]
if len(in_required_but_not_init) > 0:
raise ValueError(
f"Found parameters in `_required_parameters` which "
f"are not in `__init__`: "
f"{in_required_but_not_init}"
)
def test_estimator_tags(self, estimator_class):
"""Check conventions on estimator tags."""
Estimator = estimator_class
assert hasattr(Estimator, "get_class_tags")
all_tags = Estimator.get_class_tags()
assert isinstance(all_tags, dict)
assert all(isinstance(key, str) for key in all_tags.keys())
if hasattr(Estimator, "_tags"):
tags = Estimator._tags
msg = f"_tags must be a dict, but found {type(tags)}"
assert isinstance(tags, dict), msg
assert len(tags) > 0, "_tags is empty"
assert all(
tag in VALID_ESTIMATOR_TAGS for tag in tags.keys()
), "Some tags in _tags are invalid"
# Avoid ambiguous class attributes
ambiguous_attrs = ("tags", "tags_")
for attr in ambiguous_attrs:
assert not hasattr(Estimator, attr), (
f"Please avoid using the {attr} attribute to disambiguate it from "
f"estimator tags."
)
def test_inheritance(self, estimator_class):
"""Check that estimator inherits from BaseEstimator."""
assert issubclass(estimator_class, BaseEstimator), (
f"Estimator: {estimator_class} " f"is not a sub-class of " f"BaseEstimator."
)
Estimator = estimator_class
# Usually estimators inherit only from one BaseEstimator type, but in some cases
# they may be predictor and transformer at the same time (e.g. pipelines)
n_base_types = sum(
issubclass(Estimator, cls) for cls in VALID_ESTIMATOR_BASE_TYPES
)
assert 2 >= n_base_types >= 1
# If the estimator inherits from more than one base estimator type, we check if
# one of them is a transformer base type
if n_base_types > 1:
assert issubclass(Estimator, VALID_TRANSFORMER_TYPES)
def test_has_common_interface(self, estimator_class):
"""Check estimator implements the common interface."""
estimator = estimator_class
# Check class for type of attribute
assert isinstance(estimator.is_fitted, property)
required_methods = _list_required_methods(estimator_class)
for attr in required_methods:
assert hasattr(
estimator, attr
), f"Estimator: {estimator.__name__} does not implement attribute: {attr}"
if hasattr(estimator, "inverse_transform"):
assert hasattr(estimator, "transform")
if hasattr(estimator, "predict_proba"):
assert hasattr(estimator, "predict")
def test_get_params(self, estimator_instance):
"""Check that get_params works correctly."""
estimator = estimator_instance
params = estimator.get_params()
assert isinstance(params, dict)
_check_get_params_invariance(estimator.__class__.__name__, estimator)
def test_set_params(self, estimator_instance):
"""Check that set_params works correctly."""
estimator = estimator_instance
params = estimator.get_params()
assert estimator.set_params(**params) is estimator
_check_set_params(estimator.__class__.__name__, estimator)
def test_clone(self, estimator_instance):
"""Check we can call clone from scikit-learn."""
estimator = estimator_instance
clone(estimator)
def test_repr(self, estimator_instance):
"""Check we can call repr."""
estimator = estimator_instance
repr(estimator)
def check_constructor(self, estimator_class):
"""Check that the constructor behaves correctly."""
estimator = estimator_class.create_test_instance()
# Ensure that each parameter is set in init
init_params = _get_args(type(estimator).__init__)
invalid_attr = set(init_params) - set(vars(estimator)) - {"self"}
assert not invalid_attr, (
"Estimator %s should store all parameters"
" as an attribute during init. Did not find "
"attributes `%s`." % (estimator.__class__.__name__, sorted(invalid_attr))
)
# Ensure that init does nothing but set parameters
# No logic/interaction with other parameters
def param_filter(p):
"""Identify hyper parameters of an estimator."""
return p.name != "self" and p.kind not in [p.VAR_KEYWORD, p.VAR_POSITIONAL]
init_params = [
p
for p in signature(estimator.__init__).parameters.values()
if param_filter(p)
]
params = estimator.get_params()
# Filter out required parameters with no default value and parameters
# set for running tests
required_params = getattr(estimator, "_required_parameters", tuple())
test_params = estimator_class.get_test_params()
if isinstance(test_params, list):
test_params = test_params[0]
test_params = test_params.keys()
init_params = [
param
for param in init_params
if param.name not in required_params and param.name not in test_params
]
for param in init_params:
assert param.default != param.empty, (
"parameter `%s` for %s has no default value and is not "
"included in `_required_parameters`"
% (param.name, estimator.__class__.__name__)
)
if type(param.default) is type:
assert param.default in [np.float64, np.int64]
else:
assert type(param.default) in [
str,
int,
float,
bool,
tuple,
type(None),
np.float64,
types.FunctionType,
joblib.Memory,
]
param_value = params[param.name]
if isinstance(param_value, np.ndarray):
|
np.testing.assert_array_equal(param_value, param.default)
|
numpy.testing.assert_array_equal
|
"""
pyrad.util.radar_utils
======================
Miscellaneous functions dealing with radar data
.. autosummary::
:toctree: generated/
get_data_along_rng
get_data_along_azi
get_data_along_ele
get_ROI
rainfall_accumulation
time_series_statistics
find_contiguous_times
join_time_series
get_range_bins_to_avg
belongs_roi_indices
find_ray_index
find_rng_index
find_ang_index
find_nearest_gate
find_neighbour_gates
find_colocated_indexes
get_target_elevations
time_avg_range
get_closest_solar_flux
get_fixed_rng_data
create_sun_hits_field
create_sun_retrieval_field
compute_quantiles
compute_quantiles_from_hist
compute_quantiles_sweep
compute_histogram
compute_histogram_sweep
get_histogram_bins
compute_2d_stats
compute_1d_stats
compute_2d_hist
quantize_field
compute_profile_stats
compute_directional_stats
project_to_vertical
"""
from warnings import warn
from copy import deepcopy
import datetime
import numpy as np
import scipy
try:
import shapely
_SHAPELY_AVAILABLE = True
except ImportError:
warn('shapely not available')
_SHAPELY_AVAILABLE = False
try:
import pandas as pd
_PANDAS_AVAILABLE = True
except ImportError:
warn('Pandas not available')
_PANDAS_AVAILABLE = False
import pyart
from .stat_utils import quantiles_weighted
def get_data_along_rng(radar, field_name, fix_elevations, fix_azimuths,
ang_tol=1., rmin=None, rmax=None):
"""
Get data at particular (azimuths, elevations)
Parameters
----------
radar : radar object
the radar object where the data is
field_name : str
name of the field to filter
fix_elevations, fix_azimuths: list of floats
List of elevations, azimuths couples [deg]
ang_tol : float
Tolerance between the nominal angle and the radar angle [deg]
rmin, rmax: float
Min and Max range of the obtained data [m]
Returns
-------
xvals : list of float arrays
The ranges of each azi, ele pair
yvals : list of float arrays
The values
valid_azi, valid_ele : float arrays
The azi, ele pairs
"""
if rmin is None:
rmin = 0.
if rmax is None:
rmax = np.max(radar.range['data'])
rng_mask = np.logical_and(
radar.range['data'] >= rmin, radar.range['data'] <= rmax)
x = radar.range['data'][rng_mask]
xvals = []
yvals = []
valid_azi = []
valid_ele = []
if radar.scan_type == 'ppi':
for ele, azi in zip(fix_elevations, fix_azimuths):
ind_sweep = find_ang_index(
radar.fixed_angle['data'], ele, ang_tol=ang_tol)
if ind_sweep is None:
warn('No elevation angle found for fix_elevation '+str(ele))
continue
new_dataset = radar.extract_sweeps([ind_sweep])
try:
dataset_line = pyart.util.cross_section_ppi(
new_dataset, [azi], az_tol=ang_tol)
except EnvironmentError:
warn(' No data found at azimuth '+str(azi) +
' and elevation '+str(ele))
continue
yvals.append(dataset_line.fields[field_name]['data'][0, rng_mask])
xvals.append(x)
valid_azi.append(dataset_line.azimuth['data'][0])
valid_ele.append(dataset_line.elevation['data'][0])
else:
for ele, azi in zip(fix_elevations, fix_azimuths):
ind_sweep = find_ang_index(
radar.fixed_angle['data'], azi, ang_tol=ang_tol)
if ind_sweep is None:
warn('No azimuth angle found for fix_azimuth '+str(azi))
continue
new_dataset = radar.extract_sweeps([ind_sweep])
try:
dataset_line = pyart.util.cross_section_rhi(
new_dataset, [ele], el_tol=ang_tol)
except EnvironmentError:
warn(' No data found at azimuth '+str(azi) +
' and elevation '+str(ele))
continue
yvals.append(
dataset_line.fields[field_name]['data'][0, rng_mask])
xvals.append(x)
valid_azi.append(dataset_line.azimuth['data'][0])
valid_ele.append(dataset_line.elevation['data'][0])
return xvals, yvals, valid_azi, valid_ele
def get_data_along_azi(radar, field_name, fix_ranges, fix_elevations,
rng_tol=50., ang_tol=1., azi_start=None,
azi_stop=None):
"""
Get data at particular (ranges, elevations)
Parameters
----------
radar : radar object
the radar object where the data is
field_name : str
name of the field to filter
fix_ranges, fix_elevations: list of floats
List of ranges [m], elevations [deg] couples
rng_tol : float
Tolerance between the nominal range and the radar range [m]
ang_tol : float
Tolerance between the nominal angle and the radar angle [deg]
azi_start, azi_stop: float
Start and stop azimuth angle of the data [deg]
Returns
-------
xvals : list of float arrays
The ranges of each rng, ele pair
yvals : list of float arrays
The values
valid_rng, valid_ele : float arrays
The rng, ele pairs
"""
if azi_start is None:
azi_start = np.min(radar.azimuth['data'])
if azi_stop is None:
azi_stop = np.max(radar.azimuth['data'])
yvals = []
xvals = []
valid_rng = []
valid_ele = []
for rng, ele in zip(fix_ranges, fix_elevations):
ind_rng = find_rng_index(radar.range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No range gate found for fix_range '+str(rng))
continue
if radar.scan_type == 'ppi':
ind_sweep = find_ang_index(
radar.fixed_angle['data'], ele, ang_tol=ang_tol)
if ind_sweep is None:
warn('No elevation angle found for fix_elevation ' +
str(ele))
continue
new_dataset = radar.extract_sweeps([ind_sweep])
else:
try:
new_dataset = pyart.util.cross_section_rhi(
radar, [ele], el_tol=ang_tol)
except EnvironmentError:
warn(
' No data found at range '+str(rng) +
' and elevation '+str(ele))
continue
if azi_start < azi_stop:
azi_mask = np.logical_and(
new_dataset.azimuth['data'] >= azi_start,
new_dataset.azimuth['data'] <= azi_stop)
else:
azi_mask = np.logical_or(
new_dataset.azimuth['data'] >= azi_start,
new_dataset.azimuth['data'] <= azi_stop)
yvals.append(
new_dataset.fields[field_name]['data'][azi_mask, ind_rng])
xvals.append(new_dataset.azimuth['data'][azi_mask])
valid_rng.append(new_dataset.range['data'][ind_rng])
valid_ele.append(new_dataset.elevation['data'][0])
return xvals, yvals, valid_rng, valid_ele
def get_data_along_ele(radar, field_name, fix_ranges, fix_azimuths,
rng_tol=50., ang_tol=1., ele_min=None,
ele_max=None):
"""
Get data at particular (ranges, azimuths)
Parameters
----------
radar : radar object
the radar object where the data is
field_name : str
name of the field to filter
fix_ranges, fix_azimuths: list of floats
List of ranges [m], azimuths [deg] couples
rng_tol : float
Tolerance between the nominal range and the radar range [m]
ang_tol : float
Tolerance between the nominal angle and the radar angle [deg]
ele_min, ele_max: float
Min and max elevation angle [deg]
Returns
-------
xvals : list of float arrays
The ranges of each rng, ele pair
yvals : list of float arrays
The values
valid_rng, valid_ele : float arrays
The rng, ele pairs
"""
if ele_min is None:
ele_min = np.min(radar.elevation['data'])
if ele_max is None:
ele_max = np.max(radar.elevation['data'])
yvals = []
xvals = []
valid_rng = []
valid_azi = []
for rng, azi in zip(fix_ranges, fix_azimuths):
ind_rng = find_rng_index(radar.range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No range gate found for fix_range '+str(rng))
continue
if radar.scan_type == 'ppi':
try:
new_dataset = pyart.util.cross_section_ppi(
radar, [azi], az_tol=ang_tol)
except EnvironmentError:
warn(
' No data found at range '+str(rng) +
' and elevation '+str(azi))
continue
else:
ind_sweep = find_ang_index(
radar.fixed_angle['data'], azi, ang_tol=ang_tol)
if ind_sweep is None:
warn('No azimuth angle found for fix_azimuth '+str(azi))
continue
new_dataset = radar.extract_sweeps([ind_sweep])
ele_mask = np.logical_and(
new_dataset.elevation['data'] >= ele_min,
new_dataset.elevation['data'] <= ele_max)
yvals.append(
new_dataset.fields[field_name]['data'][ele_mask, ind_rng])
xvals.append(new_dataset.elevation['data'][ele_mask])
valid_rng.append(new_dataset.range['data'][ind_rng])
valid_azi.append(new_dataset.elevation['data'][0])
return xvals, yvals, valid_rng, valid_azi
def get_ROI(radar, fieldname, sector):
"""
filter out any data outside the region of interest defined by sector
Parameters
----------
radar : radar object
the radar object where the data is
fieldname : str
name of the field to filter
sector : dict
a dictionary defining the region of interest
Returns
-------
roi_flag : ndarray
a field array with ones in gates that are in the Region of Interest
"""
roi_flag = np.ma.ones((radar.nrays, radar.ngates), dtype=int)
# check for altitude limits
if sector['hmin'] is not None:
roi_flag[radar.gate_altitude['data'] < sector['hmin']] = 0
if sector['hmax'] is not None:
roi_flag[radar.gate_altitude['data'] > sector['hmax']] = 0
# check for range limits
if sector['rmin'] is not None:
roi_flag[:, radar.range['data'] < sector['rmin']] = 0
if sector['rmax'] is not None:
roi_flag[:, radar.range['data'] > sector['rmax']] = 0
# check elevation angle limits
if sector['elmin'] is not None:
roi_flag[radar.elevation['data'] < sector['elmin'], :] = 0
if sector['elmax'] is not None:
roi_flag[radar.elevation['data'] > sector['elmax'], :] = 0
# check min and max azimuth angle
if sector['azmin'] is not None and sector['azmax'] is not None:
if sector['azmin'] <= sector['azmax']:
roi_flag[radar.azimuth['data'] < sector['azmin'], :] = 0
roi_flag[radar.azimuth['data'] > sector['azmax'], :] = 0
if sector['azmin'] > sector['azmax']:
roi_flag[np.logical_and(
radar.azimuth['data'] < sector['azmin'],
radar.azimuth['data'] > sector['azmax']), :] = 0
elif sector['azmin'] is not None:
roi_flag[radar.azimuth['data'] < sector['azmin'], :] = 0
elif sector['azmax'] is not None:
roi_flag[radar.azimuth['data'] > sector['azmax'], :] = 0
return roi_flag
def rainfall_accumulation(t_in_vec, val_in_vec, cum_time=3600.,
base_time=0., dropnan=False):
"""
Computes the rainfall accumulation of a time series over a given period
Parameters
----------
t_in_vec : datetime array
the input date and time array
val_in_vec : float array
the input values array [mm/h]
cum_time : int
accumulation time [s]
base_time : int
base time [s]
dropnan : boolean
if True remove NaN from the time series
Returns
-------
t_out_vec : datetime array
the output date and time array
val_out_vec : float array
the output values array
np_vec : int array
the number of samples at each period
"""
# get the number of samples per interval
t_out_vec, np_vec = time_series_statistics(
t_in_vec, np.ones(len(val_in_vec), dtype=float), avg_time=cum_time,
base_time=base_time, method='sum', dropnan=dropnan)
np_vec[np.isnan(np_vec)] = 0
np_vec = np_vec.astype(int)
t_out_vec, val_out_vec = time_series_statistics(
t_in_vec, val_in_vec, avg_time=cum_time, base_time=base_time,
method='sum', dropnan=dropnan)
t_sample = cum_time/np_vec # find accumulation time of each sample
val_out_vec *= (t_sample/3600.) # conversion to mm in cum_time period
val_out_vec = np.ma.asarray(val_out_vec)
val_out_vec[np.isnan(val_out_vec)] = np.ma.masked
return t_out_vec, val_out_vec, np_vec
def time_series_statistics(t_in_vec, val_in_vec, avg_time=3600,
base_time=1800, method='mean', dropnan=False):
"""
Computes statistics over a time-averaged series. Only of package pandas is
available otherwise returns None
Parameters
----------
t_in_vec : datetime array
the input date and time array
val_in_vec : float array
the input values array
avg_time : int
averaging time [s]
base_time : int
base time [s]
method : str
statistical method
dropnan : boolean
if True remove NaN from the time series
Returns
-------
t_out_vec : datetime array
the output date and time array
val_out_vec : float array
the output values array
"""
if not _PANDAS_AVAILABLE:
warn('Pandas not available. Unable to compute time series statistics')
return None, None
df_in = pd.DataFrame(data=val_in_vec, index=pd.DatetimeIndex(t_in_vec))
df_out = getattr(df_in.resample(
str(avg_time)+'S', closed='right', label='right', base=base_time),
method)()
if dropnan is True:
df_out = df_out.dropna(how='any')
t_out_vec = df_out.index.to_pydatetime()
val_out_vec = df_out.values.flatten()
return t_out_vec, val_out_vec
def find_contiguous_times(times, step=600):
"""
Given and array of ordered times, find those contiguous according to
a maximum time step
Parameters
----------
times : array of datetimes
The array of times
step : float
The time step [s]
Returns
-------
start_times, end_times : array of date times
The start and end of each consecutive time period
"""
run = []
periods = []
expect = None
for time in times:
if expect is None:
run.append(time)
elif time <= expect:
run.append(time)
else:
run = [time]
periods.append(run)
expect = time+datetime.timedelta(seconds=step)
if not periods:
periods = [times]
elif periods[0][0] != times[0]:
periods.insert(0, [times[0]])
print('number of consecutive periods: '+str(len(periods)))
start_times = np.array([], dtype=datetime.datetime)
end_times = np.array([], dtype=datetime.datetime)
for period in periods:
start_times = np.append(
start_times, period[0]-datetime.timedelta(seconds=step))
end_times = np.append(end_times, period[-1])
return start_times, end_times
def join_time_series(t1, val1, t2, val2, dropnan=False):
"""
joins time_series. Only of package pandas is available otherwise returns
None.
Parameters
----------
t1 : datetime array
time of first series
val1 : float array
value of first series
t2 : datetime array
time of second series
val2 : float array
value of second series
dropnan : boolean
if True remove NaN from the time series
Returns
-------
t_out_vec : datetime array
the resultant date time after joining the series
val1_out_vec : float array
value of first series
val2_out_vec : float array
value of second series
"""
if not _PANDAS_AVAILABLE:
warn('Pandas not available. Unable to join time series')
return None, None, None
df1 = pd.DataFrame(data=val1, index=pd.DatetimeIndex(t1))
df2 = pd.DataFrame(data=val2, index=pd.DatetimeIndex(t2))
df_out = pd.concat([df1, df2], join='outer', axis=1)
if dropnan is True:
df_out = df_out.dropna(how='any')
t_out_vec = df_out.index.to_pydatetime()
val1_out_vec = df_out.values[:, 0].flatten()
val2_out_vec = df_out.values[:, 1].flatten()
return t_out_vec, val1_out_vec, val2_out_vec
def get_range_bins_to_avg(rad1_rng, rad2_rng):
"""
Compares the resolution of two radars and determines if and which radar
has to be averaged and the length of the averaging window
Parameters
----------
rad1_rng : array
the range of radar 1
rad2_rng : datetime
the range of radar 2
Returns
-------
avg_rad1, avg_rad2 : Boolean
Booleans specifying if the radar data has to be average in range
avg_rad_lim : array with two elements
the limits to the average (centered on each range gate)
"""
rad1_res = rad1_rng[1]-rad1_rng[0]
rad2_res = rad2_rng[1]-rad2_rng[0]
res_ratio = rad1_res/rad2_res
avg_rad1 = False
avg_rad2 = False
avg_rad_lim = None
if res_ratio > 1.5:
avg_rad2 = True
nbins = int(res_ratio)
if nbins % 2 == 0:
avg_rad_lim = [-int(nbins/2)-1, int(nbins/2)]
else:
avg_rad_lim = [-int((nbins-1)/2), int((nbins-1)/2)]
elif res_ratio < 1./1.5:
avg_rad1 = True
nbins = int(1./res_ratio)
if nbins % 2 == 0:
avg_rad_lim = [-int(nbins/2)-1, int(nbins/2)]
else:
avg_rad_lim = [-int((nbins-1)/2), int((nbins-1)/2)]
return avg_rad1, avg_rad2, avg_rad_lim
def belongs_roi_indices(lat, lon, roi):
"""
Get the indices of points that belong to roi in a list of points
Parameters
----------
lat, lon : float arrays
latitudes and longitudes to check
roi : dict
Dictionary describing the region of interest
Returns
-------
inds : array of ints
list of indices of points belonging to ROI
is_roi : str
Whether the list of points is within the region of interest.
Can be 'All', 'None', 'Some'
"""
if not _SHAPELY_AVAILABLE:
warn('shapely package not available. ' +
'Unable to determine if points belong to Region Of Interest')
return np.asarray([]), 'None'
lon_list = lon.flatten()
lat_list = lat.flatten()
polygon = shapely.geometry.Polygon(list(zip(roi['lon'], roi['lat'])))
points = shapely.geometry.MultiPoint(list(zip(lon_list, lat_list)))
inds = []
if polygon.contains(points):
warn('All points in the region of interest')
is_roi = 'All'
inds = np.indices(np.shape(lon))
elif polygon.disjoint(points):
warn('No points in the region of interest')
is_roi = 'None'
else:
points_roi = points.intersection(polygon)
if points_roi.geom_type == 'Point':
ind = np.where(
np.logical_and(lon == points_roi.x, lat == points_roi.y))
if len(ind) == 1:
ind = ind[0]
inds.extend(ind)
else:
points_roi_list = list(points_roi)
for point in points_roi_list:
ind = np.where(np.logical_and(lon == point.x, lat == point.y))
if len(ind) == 1:
ind = ind[0]
inds.extend(ind)
nroi = len(lat[inds])
npoint = len(lat_list)
warn(str(nroi)+' points out of '+str(npoint) +
' in the region of interest')
is_roi = 'Some'
return np.asarray(inds), is_roi
def find_ray_index(ele_vec, azi_vec, ele, azi, ele_tol=0., azi_tol=0.,
nearest='azi'):
"""
Find the ray index corresponding to a particular elevation and azimuth
Parameters
----------
ele_vec, azi_vec : float arrays
The elevation and azimuth data arrays where to look for
ele, azi : floats
The elevation and azimuth to search
ele_tol, azi_tol : floats
Tolerances [deg]
nearest : str
criteria to define wich ray to keep if multiple rays are within
tolerance. azi: nearest azimuth, ele: nearest elevation
Returns
-------
ind_ray : int
The ray index
"""
ind_ray = np.where(np.logical_and(
np.logical_and(ele_vec <= ele+ele_tol, ele_vec >= ele-ele_tol),
np.logical_and(azi_vec <= azi+azi_tol, azi_vec >= azi-azi_tol)))[0]
if ind_ray.size == 0:
return None
if ind_ray.size == 1:
return ind_ray[0]
if nearest == 'azi':
ind_min = np.argmin(np.abs(azi_vec[ind_ray]-azi))
else:
ind_min = np.argmin(np.abs(ele_vec[ind_ray]-ele))
return ind_ray[ind_min]
def find_rng_index(rng_vec, rng, rng_tol=0.):
"""
Find the range index corresponding to a particular range
Parameters
----------
rng_vec : float array
The range data array where to look for
rng : float
The range to search
rng_tol : float
Tolerance [m]
Returns
-------
ind_rng : int
The range index
"""
dist = np.abs(rng_vec-rng)
ind_rng = np.argmin(dist)
if dist[ind_rng] > rng_tol:
return None
return ind_rng
def find_ang_index(ang_vec, ang, ang_tol=0.):
"""
Find the angle index corresponding to a particular fixed angle
Parameters
----------
ang_vec : float array
The angle data array where to look for
ang : float
The angle to search
ang_tol : float
Tolerance [deg]
Returns
-------
ind_ang : int
The angle index
"""
dist = np.abs(ang_vec-ang)
ind_ang = np.argmin(dist)
if dist[ind_ang] > ang_tol:
return None
return ind_ang
def find_nearest_gate(radar, lat, lon, latlon_tol=0.0005):
"""
Find the radar gate closest to a lat,lon point
Parameters
----------
radar : radar object
the radar object
lat, lon : float
The position of the point
latlon_tol : float
The tolerance around this point
Returns
-------
ind_ray, ind_rng : int
The ray and range index
azi, rng : float
the range and azimuth position of the gate
"""
# find gates close to lat lon point
inds_ray_aux, inds_rng_aux = np.where(np.logical_and(
np.logical_and(
radar.gate_latitude['data'] < lat+latlon_tol,
radar.gate_latitude['data'] > lat-latlon_tol),
np.logical_and(
radar.gate_longitude['data'] < lon+latlon_tol,
radar.gate_longitude['data'] > lon-latlon_tol)))
if inds_ray_aux.size == 0:
warn('No data found at point lat '+str(lat)+' +- ' +
str(latlon_tol)+' lon '+str(lon)+' +- ' +
str(latlon_tol)+' deg')
return None, None, None, None
# find closest latitude
ind_min = np.argmin(np.abs(
radar.gate_latitude['data'][inds_ray_aux, inds_rng_aux]-lat))
ind_ray = inds_ray_aux[ind_min]
ind_rng = inds_rng_aux[ind_min]
azi = radar.azimuth['data'][ind_ray]
rng = radar.range['data'][ind_rng]
return ind_ray, ind_rng, azi, rng
def find_neighbour_gates(radar, azi, rng, delta_azi=None, delta_rng=None):
"""
Find the neighbouring gates within +-delta_azi and +-delta_rng
Parameters
----------
radar : radar object
the radar object
azi, rng : float
The azimuth [deg] and range [m] of the central gate
delta_azi, delta_rng : float
The extend where to look for
Returns
-------
inds_ray_aux, ind_rng_aux : int
The indices (ray, rng) of the neighbouring gates
"""
# find gates close to lat lon point
if delta_azi is None:
inds_ray = np.ma.arange(radar.azimuth['data'].size)
else:
azi_max = azi+delta_azi
azi_min = azi-delta_azi
if azi_max > 360.:
azi_max -= 360.
if azi_min < 0.:
azi_min += 360.
if azi_max > azi_min:
inds_ray = np.where(np.logical_and(
radar.azimuth['data'] < azi_max,
radar.azimuth['data'] > azi_min))[0]
else:
inds_ray = np.where(np.logical_or(
radar.azimuth['data'] > azi_min,
radar.azimuth['data'] < azi_max))[0]
if delta_rng is None:
inds_rng = np.ma.arange(radar.range['data'].size)
else:
inds_rng = np.where(np.logical_and(
radar.range['data'] < rng+delta_rng,
radar.range['data'] > rng-delta_rng))[0]
return inds_ray, inds_rng
def find_colocated_indexes(radar1, radar2, rad1_ele, rad1_azi, rad1_rng,
rad2_ele, rad2_azi, rad2_rng, ele_tol=0.5,
azi_tol=0.5, rng_tol=50.):
"""
Given the theoretical elevation, azimuth and range of the co-located gates
of two radars and a given tolerance returns the indices of the gates for
the current radars
Parameters
----------
radar1, radar2 : radar objects
the two radar objects
rad1_ele, rad1_azi, rad1_rng : array of floats
the radar coordinates of the radar1 gates
rad2_ele, rad2_azi, rad2_rng : array of floats
the radar coordinates of the radar2 gates
ele_tol, azi_tol : floats
azimuth and elevation angle tolerance [deg]
rng_tol : float
range Tolerance [m]
Returns
-------
ind_ray_rad1, ind_rng_rad1, ind_ray_rad2, ind_rng_rad2 : array of ints
the ray and range indexes of each radar gate
"""
ngates = len(rad1_ele)
ind_ray_rad1 = np.ma.masked_all(ngates, dtype=int)
ind_rng_rad1 = np.ma.masked_all(ngates, dtype=int)
ind_ray_rad2 = np.ma.masked_all(ngates, dtype=int)
ind_rng_rad2 = np.ma.masked_all(ngates, dtype=int)
for i in range(ngates):
ind_ray_rad1_aux = find_ray_index(
radar1.elevation['data'], radar1.azimuth['data'], rad1_ele[i],
rad1_azi[i], ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray_rad1_aux is None:
continue
ind_rng_rad1_aux = find_rng_index(
radar1.range['data'], rad1_rng[i], rng_tol=rng_tol)
if ind_rng_rad1_aux is None:
continue
ind_ray_rad2_aux = find_ray_index(
radar2.elevation['data'], radar2.azimuth['data'], rad2_ele[i],
rad2_azi[i], ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray_rad2_aux is None:
continue
ind_rng_rad2_aux = find_rng_index(
radar2.range['data'], rad2_rng[i], rng_tol=rng_tol)
if ind_rng_rad2_aux is None:
continue
ind_ray_rad1[i] = ind_ray_rad1_aux
ind_rng_rad1[i] = ind_rng_rad1_aux
ind_ray_rad2[i] = ind_ray_rad2_aux
ind_rng_rad2[i] = ind_rng_rad2_aux
ind_ray_rad1 = ind_ray_rad1.compressed()
ind_rng_rad1 = ind_rng_rad1.compressed()
ind_ray_rad2 = ind_ray_rad2.compressed()
ind_rng_rad2 = ind_rng_rad2.compressed()
return ind_ray_rad1, ind_rng_rad1, ind_ray_rad2, ind_rng_rad2
def get_target_elevations(radar_in):
"""
Gets RHI target elevations
Parameters
----------
radar_in : Radar object
current radar object
Returns
-------
target_elevations : 1D-array
Azimuth angles
el_tol : float
azimuth tolerance
"""
sweep_start = radar_in.sweep_start_ray_index['data'][0]
sweep_end = radar_in.sweep_end_ray_index['data'][0]
target_elevations = np.sort(
radar_in.elevation['data'][sweep_start:sweep_end+1])
el_tol = np.median(target_elevations[1:]-target_elevations[:-1])
return target_elevations, el_tol
def time_avg_range(timeinfo, avg_starttime, avg_endtime, period):
"""
finds the new start and end time of an averaging
Parameters
----------
timeinfo : datetime
the current volume time
avg_starttime : datetime
the current average start time
avg_endtime: datetime
the current average end time
period: float
the averaging period
Returns
-------
new_starttime : datetime
the new average start time
new_endtime : datetime
the new average end time
"""
new_starttime = deepcopy(avg_starttime)
new_endtime = deepcopy(avg_endtime)
within_range = False
while not within_range:
if timeinfo > new_endtime:
new_starttime += datetime.timedelta(seconds=period)
new_endtime += datetime.timedelta(seconds=period)
else:
within_range = True
return new_starttime, new_endtime
def get_closest_solar_flux(hit_datetime_list, flux_datetime_list,
flux_value_list):
"""
finds the solar flux measurement closest to the sun hit
Parameters
----------
hit_datetime_list : datetime array
the date and time of the sun hit
flux_datetime_list : datetime array
the date and time of the solar flux measurement
flux_value_list: ndarray 1D
the solar flux values
Returns
-------
flux_datetime_closest_list : datetime array
the date and time of the solar flux measurement closest to sun hit
flux_value_closest_list : ndarray 1D
the solar flux values closest to the sun hit time
"""
flux_datetime_closest_list = list()
flux_value_closest_list = np.ma.masked_all(len(hit_datetime_list))
i = 0
for hit_dt in hit_datetime_list:
flux_datetime_closest = min(
flux_datetime_list, key=lambda x: abs(x-hit_dt))
flux_datetime_closest_list.append(flux_datetime_closest)
# solar flux observation within 24h of sun hit
time_diff = abs(flux_datetime_closest-hit_dt).total_seconds()
if time_diff < 86400.:
ind = flux_datetime_list.index(flux_datetime_closest)
flux_value_closest_list[i] = flux_value_list[ind]
else:
warn('Nearest solar flux observation further than ' +
str(time_diff)+' s in time')
i += 1
return flux_datetime_closest_list, flux_value_closest_list
def get_fixed_rng_data(radar, field_names, fixed_rng, rng_tol=50.,
ele_min=None, ele_max=None, azi_min=None,
azi_max=None):
"""
Creates a 2D-grid with (azi, ele) data at a fixed range
Parameters
----------
radar : radar object
The radar object containing the data
field_name : str
The field name
fixed_rng : float
The fixed range [m]
rng_tol : float
The tolerance between the nominal range and the actual radar range [m]
ele_min, ele_max, azi_min, azi_max : float or None
The limits of the grid [deg]. If None the limits will be the limits
of the radar volume
Returns
-------
radar : radar object
The radar object containing only the desired data
"""
radar_aux = deepcopy(radar)
ind_rng = find_rng_index(
radar_aux.range['data'], fixed_rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No range bin at range '+str(fixed_rng)+' with tolerance ' +
str(rng_tol))
return None, None, None
# Determine angle limits
if radar_aux.scan_type == 'ppi':
if ele_min is None:
ele_min = np.min(radar_aux.fixed_angle['data'])
if ele_max is None:
ele_max = np.max(radar_aux.fixed_angle['data'])
if azi_min is None:
azi_min = np.min(radar_aux.azimuth['data'])
if azi_max is None:
azi_max = np.max(radar_aux.azimuth['data'])
else:
if ele_min is None:
ele_min = np.min(radar_aux.elevation['data'])
if ele_max is None:
ele_max = np.max(radar_aux.elevation['data'])
if azi_min is None:
azi_min = np.min(radar_aux.fixed_angle['data'])
if azi_max is None:
azi_max = np.max(radar_aux.fixed_angle['data'])
if radar_aux.scan_type == 'ppi':
# Get radar elevation angles within limits
ele_vec = np.sort(radar_aux.fixed_angle['data'])
ele_vec = ele_vec[
np.logical_and(ele_vec >= ele_min, ele_vec <= ele_max)]
if ele_vec is None:
warn('No elevation angles between '+str(ele_min)+' and ' +
str(ele_max))
return None, None, None
# get sweeps corresponding to the desired elevation angles
ind_sweeps = []
for ele in ele_vec:
ind_sweeps.append(
np.where(radar_aux.fixed_angle['data'] == ele)[0][0])
radar_aux = radar_aux.extract_sweeps(ind_sweeps)
# Get indices of rays within limits
if azi_min < azi_max:
ind_rays = np.where(np.logical_and(
radar_aux.azimuth['data'] >= azi_min,
radar_aux.azimuth['data'] <= azi_max))[0]
else:
ind_rays = np.where(np.logical_or(
radar_aux.azimuth['data'] >= azi_min,
radar_aux.azimuth['data'] <= azi_max))[0]
else:
# Get radar azimuth angles within limits
azi_vec = radar_aux.fixed_angle['data']
if azi_min < azi_max:
azi_vec = np.sort(azi_vec[
np.logical_and(azi_vec >= azi_min, azi_vec <= azi_max)])
else:
azi_vec = azi_vec[
np.logical_or(azi_vec >= azi_min, azi_vec <= azi_max)]
azi_vec = np.append(
np.sort(azi_vec[azi_vec >= azi_min]),
np.sort(azi_vec[azi_vec < azi_min]))
if azi_vec is None:
warn('No azimuth angles between '+str(azi_min)+' and ' +
str(azi_max))
return None, None, None
# get sweeps corresponding to the desired azimuth angles
ind_sweeps = []
for azi in azi_vec:
ind_sweeps.append(
np.where(radar_aux.fixed_angle['data'] == azi)[0][0])
radar_aux = radar_aux.extract_sweeps(ind_sweeps)
# Get indices of rays within limits
ind_rays = np.where(np.logical_and(
radar_aux.elevation['data'] >= ele_min,
radar_aux.elevation['data'] <= ele_max))[0]
# get new sweep start index and stop index
sweep_start_inds = deepcopy(radar_aux.sweep_start_ray_index['data'])
sweep_end_inds = deepcopy(radar_aux.sweep_end_ray_index['data'])
nrays = 0
for j in range(radar_aux.nsweeps):
# get azimuth indices for this elevation
rays_in_sweep = np.size(
ind_rays[np.logical_and(ind_rays >= sweep_start_inds[j],
ind_rays <= sweep_end_inds[j])])
radar_aux.rays_per_sweep['data'][j] = rays_in_sweep
if j == 0:
radar_aux.sweep_start_ray_index['data'][j] = 0
else:
radar_aux.sweep_start_ray_index['data'][j] = int(
radar_aux.sweep_end_ray_index['data'][j-1]+1)
radar_aux.sweep_end_ray_index['data'][j] = (
radar_aux.sweep_start_ray_index['data'][j]+rays_in_sweep-1)
nrays += rays_in_sweep
# Get new fields
for field_name in field_names:
if field_name not in radar_aux.fields:
warn('Field '+field_name+' not available')
continue
radar_aux.fields[field_name]['data'] = (
radar_aux.fields[field_name]['data'][:, ind_rng])
radar_aux.fields[field_name]['data'] = (
radar_aux.fields[field_name]['data'][ind_rays, np.newaxis])
# Update metadata
radar_aux.time['data'] = radar_aux.time['data'][ind_rays]
radar_aux.range['data'] = np.array([fixed_rng])
radar_aux.azimuth['data'] = radar_aux.azimuth['data'][ind_rays]
radar_aux.elevation['data'] = radar_aux.elevation['data'][ind_rays]
radar_aux.init_gate_x_y_z()
radar_aux.init_gate_longitude_latitude()
radar_aux.init_gate_altitude()
radar_aux.nrays = nrays
radar_aux.ngates = 1
return radar_aux
def create_sun_hits_field(rad_el, rad_az, sun_el, sun_az, data, imgcfg):
"""
creates a sun hits field from the position and power of the sun hits
Parameters
----------
rad_el, rad_az, sun_el, sun_az : ndarray 1D
azimuth and elevation of the radar and the sun respectively in degree
data : masked ndarray 1D
the sun hit data
imgcfg: dict
a dictionary specifying the ranges and resolution of the field to
create
Returns
-------
field : masked ndarray 2D
the sun hit field
"""
if data.compressed().size == 0:
warn('No valid sun hits to plot.')
return None
azmin = imgcfg['azmin']
azmax = imgcfg['azmax']
elmin = imgcfg['elmin']
elmax = imgcfg['elmax']
azres = imgcfg['azres']
elres = imgcfg['elres']
mask = np.ma.getmaskarray(data)
rad_el = rad_el[~mask]
rad_az = rad_az[~mask]
sun_el = sun_el[~mask]
sun_az = sun_az[~mask]
data = data[~mask]
d_el = rad_el-sun_el
d_az = (rad_az-sun_az)*np.cos(sun_el*np.pi/180.)
npix_az = int((azmax-azmin)/azres)
npix_el = int((elmax-elmin)/elres)
field = np.ma.masked_all((npix_az, npix_el))
ind_az = ((d_az+azmin)/azres).astype(int)
ind_el = ((d_el+elmin)/elres).astype(int)
field[ind_az, ind_el] = data
return field
def create_sun_retrieval_field(par, field_name, imgcfg, lant=0.):
"""
creates a sun retrieval field from the retrieval parameters
Parameters
----------
par : ndarray 1D
the 5 retrieval parameters
imgcfg: dict
a dictionary specifying the ranges and resolution of the field to
create
Returns
-------
field : masked ndarray 2D
the sun retrieval field
"""
azmin = imgcfg['azmin']
azmax = imgcfg['azmax']
elmin = imgcfg['elmin']
elmax = imgcfg['elmax']
azres = imgcfg['azres']
elres = imgcfg['elres']
npix_az = int((azmax-azmin)/azres)
npix_el = int((elmax-elmin)/elres)
field = np.ma.masked_all((npix_az, npix_el))
d_az = np.array(np.array(range(npix_az))*azres+azmin)
d_el = np.array(np.array(range(npix_el))*elres+elmin)
d_az_mat = np.broadcast_to(d_az.reshape(npix_az, 1), (npix_az, npix_el))
d_el_mat = np.broadcast_to(d_el.reshape(1, npix_el), (npix_az, npix_el))
field = (par[0]+par[1]*d_az_mat+par[2]*d_el_mat+par[3]*d_az_mat*d_az_mat +
par[4]*d_el_mat*d_el_mat)
if field_name in ('sun_est_power_h', 'sun_est_power_v'):
# account for polarization of the antenna and scanning losses
field += 3.+lant
return field
def compute_quantiles(field, quantiles=None):
"""
computes quantiles
Parameters
----------
field : ndarray 2D
the radar field
ray_start, ray_end : int
starting and ending ray indexes
quantiles: float array
list of quantiles to compute
Returns
-------
quantiles : float array
list of quantiles
values : float array
values at each quantile
"""
if quantiles is None:
quantiles = [10., 20., 30., 40., 50., 60., 70., 80., 90., 95.]
warn('No quantiles have been defined. Default ' + str(quantiles) +
' will be used')
nquantiles = len(quantiles)
values = np.ma.masked_all(nquantiles)
data_valid = field.compressed()
if np.size(data_valid) < 10:
warn('Unable to compute quantiles. Not enough valid data')
return quantiles, values
for i in range(nquantiles):
values[i] = np.percentile(data_valid, quantiles[i])
return quantiles, values
def compute_quantiles_from_hist(bin_centers, hist, quantiles=None):
"""
computes quantiles from histograms
Parameters
----------
bin_centers : ndarray 1D
the bins
hist : ndarray 1D
the histogram
quantiles: float array
list of quantiles to compute
Returns
-------
quantiles : float array
list of quantiles
values : float array
values at each quantile
"""
if quantiles is None:
quantiles = [10., 20., 30., 40., 50., 60., 70., 80., 90.]
warn('No quantiles have been defined. Default ' + str(quantiles) +
' will be used')
nquantiles = len(quantiles)
values = np.ma.masked_all(nquantiles)
# check if all elements in histogram are masked values
mask = np.ma.getmaskarray(hist)
if mask.all():
return quantiles, values
np_t = np.ma.sum(hist)
if np_t < 10:
return quantiles, values
freq = hist/np_t
rel_freq = np.ma.cumsum(freq)
percentiles = quantiles/100.
for i in range(nquantiles):
values[i] = bin_centers[rel_freq >= percentiles[i]][0]
return quantiles, values
def compute_quantiles_sweep(field, ray_start, ray_end, quantiles=None):
"""
computes quantiles of a particular sweep
Parameters
----------
field : ndarray 2D
the radar field
ray_start, ray_end : int
starting and ending ray indexes
quantiles: float array
list of quantiles to compute
Returns
-------
quantiles : float array
list of quantiles
values : float array
values at each quantile
"""
if quantiles is None:
quantiles = [10., 20., 30., 40., 50., 60., 70., 80., 90.]
warn('No quantiles have been defined. Default ' + str(quantiles) +
' will be used')
nquantiles = len(quantiles)
values = np.ma.masked_all(nquantiles)
data_valid = field[ray_start:ray_end+1, :].compressed()
if np.size(data_valid) < 10:
warn('Unable to compute quantiles. Not enough valid data')
return quantiles, values
for i in range(nquantiles):
values[i] = np.percentile(data_valid, quantiles[i])
return quantiles, values
def compute_histogram(field, field_name, bin_edges=None, step=None,
vmin=None, vmax=None):
"""
computes histogram of the data
Parameters
----------
field : ndarray 2D
the radar field
field_name: str or none
name of the field
bins_edges :ndarray 1D
the bin edges
step : float
size of bin
vmin, vmax : float
The minimum and maximum value of the histogram
Returns
-------
bin_edges : float array
interval of each bin
values : float array
values at each bin
"""
if bin_edges is None:
if field_name is not None:
bin_edges = get_histogram_bins(
field_name, step=step, vmin=vmin, vmax=vmax)
else:
if vmin is None:
vmin = np.ma.min(field)
if vmax is None:
vmax = np.ma.max(field)
if step is None:
step = (vmax-vmin)/100.
bin_edges = np.arange(vmin, vmax+step, step)
step_aux = bin_edges[1]-bin_edges[0]
bin_centers = bin_edges[0:-1]+step_aux/2.
values = field.compressed()
values[values < bin_centers[0]] = bin_centers[0]
values[values > bin_centers[-1]] = bin_centers[-1]
return bin_edges, values
def compute_histogram_sweep(field, ray_start, ray_end, field_name, step=None,
vmin=None, vmax=None):
"""
computes histogram of the data in a particular sweep
Parameters
----------
field : ndarray 2D
the radar field
ray_start, ray_end : int
starting and ending ray indexes
field_name: str
name of the field
step : float
size of bin
vmin, vmax : float
minimum and maximum values
Returns
-------
bin_edges : float array
interval of each bin
values : float array
values at each bin
"""
bin_edges = get_histogram_bins(
field_name, step=step, vmin=vmin, vmax=vmax)
step_aux = bin_edges[1]-bin_edges[0]
bin_centers = bin_edges[:-1]+step_aux/2.
values = field[ray_start:ray_end+1, :].compressed()
values[values < bin_centers[0]] = bin_centers[0]
values[values > bin_centers[-1]] = bin_centers[-1]
return bin_edges, values
def get_histogram_bins(field_name, step=None, vmin=None, vmax=None):
"""
gets the histogram bins. If vmin or vmax are not define the range limits
of the field as defined in the Py-ART config file are going to be used.
Parameters
----------
field_name: str
name of the field
step : float
size of bin
vmin, vmax : float
The minimum and maximum value of the histogram
Returns
-------
bin_edges : float array
The bin edges
"""
field_dict = pyart.config.get_metadata(field_name)
if 'boundaries' in field_dict:
return np.array(field_dict['boundaries'])
vmin_aux, vmax_aux = pyart.config.get_field_limits(field_name)
if vmin is None:
vmin = vmin_aux
if vmax is None:
vmax = vmax_aux
if step is None:
step = (vmax-vmin)/50.
warn('No step has been defined. Default '+str(step)+' will be used')
return np.linspace(
vmin-step/2., vmax+step/2., num=int((vmax-vmin)/step)+2)
def compute_2d_stats(field1, field2, field_name1, field_name2, step1=None,
step2=None):
"""
computes a 2D histogram and statistics of the data
Parameters
----------
field1, field2 : ndarray 2D
the two fields
field_name1, field_nam2: str
the name of the fields
step1, step2 : float
size of bin
Returns
-------
hist_2d : array
the histogram
bin_edges1, bin_edges2 : float array
The bin edges
stats : dict
a dictionary with statistics
"""
if field1.size == 0 or field2.size == 0:
warn('Unable to compute 2D histogram. Empty field')
stats = {
'npoints': 0,
'meanbias': np.ma.asarray(np.ma.masked),
'medianbias': np.ma.asarray(np.ma.masked),
'quant25bias': np.ma.asarray(np.ma.masked),
'quant75bias': np.ma.asarray(np.ma.masked),
'modebias': np.ma.asarray(np.ma.masked),
'corr': np.ma.asarray(np.ma.masked),
'slope': np.ma.asarray(np.ma.masked),
'intercep': np.ma.asarray(np.ma.masked),
'intercep_slope_1': np.ma.asarray(np.ma.masked)
}
return None, None, None, stats
hist_2d, bin_edges1, bin_edges2 = compute_2d_hist(
field1, field2, field_name1, field_name2, step1=step1, step2=step2)
step_aux1 = bin_edges1[1]-bin_edges1[0]
bin_centers1 = bin_edges1[:-1]+step_aux1/2.
step_aux2 = bin_edges2[1]-bin_edges2[0]
bin_centers2 = bin_edges2[:-1]+step_aux2/2.
npoints = len(field1)
meanbias = 10.*np.ma.log10(
np.ma.mean(np.ma.power(10., 0.1*field2)) /
np.ma.mean(np.ma.power(10., 0.1*field1)))
medianbias = np.ma.median(field2-field1)
quant25bias = np.percentile((field2-field1).compressed(), 25.)
quant75bias = np.percentile((field2-field1).compressed(), 75.)
ind_max_val1, ind_max_val2 = np.where(hist_2d == np.ma.amax(hist_2d))
modebias = bin_centers2[ind_max_val2[0]]-bin_centers1[ind_max_val1[0]]
slope, intercep, corr, _, _ = scipy.stats.linregress(
field1, y=field2)
intercep_slope_1 = np.ma.mean(field2-field1)
stats = {
'npoints': npoints,
'meanbias': np.ma.asarray(meanbias),
'medianbias': np.ma.asarray(medianbias),
'quant25bias': np.ma.asarray(quant25bias),
'quant75bias': np.ma.asarray(quant75bias),
'modebias': np.ma.asarray(modebias),
'corr': np.ma.asarray(corr),
'slope': np.ma.asarray(slope),
'intercep': np.ma.asarray(intercep),
'intercep_slope_1': np.ma.asarray(intercep_slope_1)
}
return hist_2d, bin_edges1, bin_edges2, stats
def compute_1d_stats(field1, field2):
"""
returns statistics of data
Parameters
----------
field1, field2 : ndarray 1D
the two fields to compare
Returns
-------
stats : dict
a dictionary with statistics
"""
if field1.size == 0 or field2.size == 0:
warn('Unable to compute statistics. Empty fields')
stats = {
'npoints': 0,
'NB': np.ma.asarray(np.ma.masked),
'corr': np.ma.asarray(np.ma.masked),
'RMS': np.ma.asarray(np.ma.masked),
'Nash': np.ma.asarray(np.ma.masked)
}
return stats
npoints = len(field1)
mean1 = np.ma.mean(field1)
mean2 = np.ma.mean(field2)
nb = mean2/mean1-1
_, _, corr, _, _ = scipy.stats.linregress(field1, y=field2)
rms = np.ma.sqrt(np.ma.sum(np.ma.power(field2-field1, 2.))/npoints)
nash = (1.-np.ma.sum(np.ma.power(field2-field1, 2.)) /
np.ma.sum(np.ma.power(field1-mean1, 2.)))
stats = {
'npoints': npoints,
'NB': nb,
'corr': corr,
'RMS': rms,
'Nash': nash
}
return stats
def compute_2d_hist(field1, field2, field_name1, field_name2, step1=None,
step2=None):
"""
computes a 2D histogram of the data
Parameters
----------
field1, field2 : ndarray 2D
the radar fields
field_name1, field_name2 : str
field names
step1, step2 : float
size of the bins
Returns
-------
H : float array 2D
The bi-dimensional histogram of samples x and y
xedges, yedges : float array
the bin edges along each dimension
"""
bin_edges1 = get_histogram_bins(field_name1, step=step1)
step_aux1 = bin_edges1[1]-bin_edges1[0]
bin_centers1 = bin_edges1[:-1]+step_aux1/2.
bin_edges2 = get_histogram_bins(field_name2, step=step2)
step_aux2 = bin_edges2[1]-bin_edges2[0]
bin_centers2 = bin_edges2[:-1]+step_aux2/2.
field1[field1 < bin_centers1[0]] = bin_centers1[0]
field1[field1 > bin_centers1[-1]] = bin_centers1[-1]
field2[field2 < bin_centers2[0]] = bin_centers2[0]
field2[field2 > bin_centers2[-1]] = bin_centers2[-1]
fill_value = pyart.config.get_fillvalue()
return np.histogram2d(
field1.filled(fill_value=fill_value),
field2.filled(fill_value=fill_value), bins=[bin_edges1, bin_edges2])
def quantize_field(field, field_name, step, vmin=None, vmax=None):
"""
quantizes data
Parameters
----------
field : ndarray 2D
the radar field
field_name: str
name of the field
step : float
size of bin
vmin, vmax : float
min and max values
Returns
-------
fieldq : ndarray 2D
The quantized field
values : float array
values at each bin
"""
vmin_aux, vmax_aux = pyart.config.get_field_limits(field_name)
if vmin is None:
vmin = vmin_aux
if vmax is None:
vmax = vmax_aux
field[field < vmin] = vmin
field[field > vmax] = vmax
fieldq = ((field+vmin)/step+1).astype(int)
return fieldq.filled(fill_value=0)
def compute_profile_stats(field, gate_altitude, h_vec, h_res,
quantity='quantiles',
quantiles=np.array([0.25, 0.50, 0.75]),
nvalid_min=4, std_field=None, np_field=None,
make_linear=False, include_nans=False):
"""
Compute statistics of vertical profile
Parameters
----------
field : ndarray
the radar field
gate_altitude: ndarray
the altitude at each radar gate [m MSL]
h_vec : 1D ndarray
height vector [m MSL]
h_res : float
heigh resolution [m]
quantity : str
The quantity to compute. Can be
['quantiles', 'mode', 'regression_mean', 'mean'].
If 'mean', the min, max, and average is computed.
quantiles : 1D ndarray
the quantiles to compute
nvalid_min : int
the minimum number of points to consider the stats valid
std_field : ndarray
the standard deviation of the regression at each range gate
np_field : ndarray
the number of points used to compute the regression at each range gate
make_linear : Boolean
If true the data is transformed into linear coordinates before taking
the mean
include_nans : Boolean
If true NaN will be considered as zeros
Returns
-------
vals : ndarray 2D
The resultant statistics
val_valid : ndarray 1D
The number of points to compute the stats used at each height level
"""
nh = h_vec.size
if quantity == 'mean':
vals = np.ma.empty((nh, 3), dtype=float)
vals[:] = np.ma.masked
elif quantity == 'mode':
vals = np.ma.empty((nh, 6), dtype=float)
vals[:, 0] = np.ma.masked
vals[:, 2] = np.ma.masked
vals[:, 4] = np.ma.masked
vals[:, 1] = 0
vals[:, 3] = 0
vals[:, 5] = 0
elif quantity == 'regression_mean':
vals = np.ma.masked_all((nh, 2), dtype=float)
else:
vals = np.ma.masked_all((nh, quantiles.size), dtype=float)
val_valid = np.zeros(nh, dtype=int)
for i, h in enumerate(h_vec):
data = field[np.logical_and(
gate_altitude >= h-h_res/2., gate_altitude < h+h_res/2.)]
if include_nans:
data[np.ma.getmaskarray(data)] = 0.
if data.size == 0:
continue
if quantity == 'mean':
mask = np.ma.getmaskarray(data)
nvalid = np.count_nonzero(
|
np.logical_not(mask)
|
numpy.logical_not
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Equal Earth Projection
======================
This is a :mod:`matplotlib` add-on that adds the Equal Earth Projection
described by <NAME> (@BojanSavric), <NAME> and <NAME>:
Abstract:
"The Equal Earth map projection is a new equal-area pseudocylindrical
projection for world maps. It is inspired by the widely used Robinson
projection, but unlike the Robinson projection, retains the relative size
of areas. The projection equations are simple to implement and fast to
evaluate. Continental outlines are shown in a visually pleasing and
balanced way."
* https://doi.org/10.1080/13658816.2018.1504949
* https://www.researchgate.net/publication/326879978_The_Equal_Earth_map_projection
This projection is similar to the `Eckert IV equal area projection
<https://en.wikipedia.org/wiki/Eckert_IV_projection>`_, but is 2-5x
faster to calculate. It is based on code from:
* https://matplotlib.org/gallery/misc/custom_projection.html
as well as code from @mbostock:
* https://beta.observablehq.com/@mbostock/equal-earth-projection
Requirements
------------
shapefile (from pyshp) is required to read the map data. This is available
from Anaconda, but must be installed first, from the command line::
>>>conda install shapefile
Installation
------------
Only the `EqualEarth.py <https://github.com/dneuman/EqualEarth/blob/master/EqualEarth.py>`_
file is required. You can download the entire repository using the green "Clone
or download" button, or by clicking on the file link, then right-clicking on
the "Raw" tab to download the actual script. The script must be located in a
directory in your `PYTHONPATH <https://scipher.wordpress.com/2010/05/10/setting-
your-pythonpath-environment-variable-linuxunixosx/>`_ list to use it in
another program.
.. note:: Using the :func:`GeoAxes.DrawCoastline` (new in 2.0) function will
create a ``maps`` folder in the same directory and download some maps
(500kb) for drawing, the first time it is called.
New in This Version (2.0)
-------------------------
:func:`GeoAxes.DrawCoastlines`:
World map data from `Natural Earth <https://www.naturalearthdata.com>`_
will download into the ``maps`` folder in the same directory as the
Equal Earth module, the first time this function is called. This is 500kb
on disk, but is downloaded in .zip format and unzipped automatically. Other
maps can be used if you supply the shape files. Once the axes is set up,
you can draw the continents::
>>>ax.DrawCoastlines(facecolor='grey', edgecolor='k', lw=.5)
:func:`GeoAxes.plot_geodesic` Great Circle (geodesic) lines:
Navigation lines can be plotted using the shortest path on the globe. These
lines take plot keywords and wrap around if necessary::
>>>pts = np.array([[-150, 45], [150, 45]])
>>>ax.plot_geodesic(pts, 'b:', linewidth=1, alpha=.8)
:func:`GeoAxes.DrawTissot`:
Draw the Tissot Indicatrix of Distortion on the projection. This is a set
of circles of equal size drawn on the projection, showing how the
projection distorts objects at various positions on the map::
>>>ax.DrawTissot(width=10.)
See `the Wikipedia article <https://en.m.wikipedia.org/wiki/Tissot%27s_indicatrix>`_
for more information.
Usage
-----
Importing the module causes the Equal Earth projection to be registered with
Matplotlib so that it can be used when creating a subplot::
import matplotlib.pyplot as plt
import EqualEarth
longs = [-200, 100, 100, -200]
lats = [40, 40, -40, 40]
fig = plt.figure('Equal Earth Projection')
ax = fig.add_subplot(111, projection='equal_earth', facecolor='lightblue')
ax.plot(longs, lats)
plt.grid(True)
plt.tight_layout()
plt.show()
.. figure:: _static/Equal_Earth_Projection.png
:align: center
.. note:: ax.plot():
Lines drawn by `ax.plot()` method are clipped by the projection if
any portions are outside it due to points being greater than +/- 180°
in longitude. If you want to show lines wrapping around, they must be
drawn twice. The second time will require the outside points put back
into the correct range by adding or subtracting 360 as required.
Note that the default behaviour is to take all data in degrees. If radians
are preferred, use the ``rad=True`` optional keyword in ``fig.add_subplot()``,
ie::
ax = fig.add_subplot(111, projection='equal_earth', rad=True)
All plots must be done in radians at this point.
This example creates a projection map with coastlines using the default
settings, and adds a few shortest-path lines that demonstrate the wrap-around
capabilities::
import matplotlib.pyplot as plt
import EqualEarth
fig = plt.figure('Equal Earth', figsize=(10., 6.))
fig.clear()
ax = fig.add_subplot(111, projection='equal_earth',
facecolor='#CEEAFD')
ax.tick_params(labelcolor=(0,0,0,.25)) # make alpha .25 to lighten
pts = np.array([[-75, 45],
[-123, 49],
[-158, 21],
[116, -32],
[32.5, -26],
[105, 30.5],
[-75, 45]])
ax.DrawCoastlines(zorder=0) # put land under grid
ax.plot(pts[:,0], pts[:,1], 'ro', markersize=4)
ax.plot_geodesic(pts, 'b:', lw=2)
ax.grid(color='grey', lw=.25)
ax.set_title('Equal Earth Projection with Great Circle Lines',
size='x-large')
plt.tight_layout() # make most use of available space
plt.show()
.. figure:: _static/Equal_Earth.png
:align: center
Future
------
Ultimately, the Equal Earth projection should be added to the :mod:`cartopy`
module, which provides a far greater range of features.
@Author: <NAME> (@dan613)
@Version: 2.0
@Date: 13 Sep 2018
EqualEarth API
==============
"""
from __future__ import unicode_literals
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.patches import Circle
from matplotlib.path import Path
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator, Formatter, FixedLocator
from matplotlib.transforms import Affine2D, BboxTransformTo, Transform
from matplotlib.projections import register_projection
import matplotlib.spines as mspines
import matplotlib.axis as maxis
import numpy as np
# Mapping support
from zipfile import ZipFile
import pathlib
import io
from urllib.request import urlopen
import shapefile # available via: conda install shapefile
rcParams = matplotlib.rcParams
# This example projection class is rather long, but it is designed to
# illustrate many features, not all of which will be used every time.
# It is also common to factor out a lot of these methods into common
# code used by a number of projections with similar characteristics
# (see geo.py).
class GeoAxes(Axes):
"""
An abstract base class for geographic projections. Most of these functions
are used only by :mod:`matplotlib`, however :func:`DrawCoastlines` and
:func:`plot_geodesic` are useful for drawing the continents and navigation
lines, respectively.
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, rad, round_to=1.0):
self._round_to = round_to
self._rad = rad
def __call__(self, x, pos=None):
if self._rad: x = np.rad2deg(x)
degrees = np.round(x / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return "%0.0f\N{DEGREE SIGN}" % degrees
RESOLUTION = 75
def __init__(self, *args, rad=True, **kwargs):
self._rad = rad
if self._rad:
self._limit = np.pi * 0.5
else:
self._limit = 90.
super().__init__(*args, **kwargs)
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Do not register xaxis or yaxis with spines -- as done in
# Axes._init_axis() -- until GeoAxes.xaxis.cla() works.
# self.spines['geo'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.grid(rcParams['axes.grid'])
lim = self._limit
Axes.set_xlim(self, -lim * 2., lim * 2.)
Axes.set_ylim(self, -lim, lim)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space (in this case longitude and latitude) to axes
# space. It is separated into a non-affine and affine part so
# that the non-affine part does not have to be recomputed when
# a simple affine change to the figure has been made (such as
# resizing the window or changing the dpi).
# 1) The core transformation from data space into
# rectilinear space defined in the EqualEarthTransform class.
self.transProjection = self._get_core_transform(self.RESOLUTION)
# 2) The above has an output range that is not in the unit
# rectangle, so scale and translate it so it fits correctly
# within the axes. The peculiar calculations of xscale and
# yscale are specific to an Equal Earth projection, so don't
# worry about them too much.
self.transAffine = self._get_affine_transform()
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# The main data transformation is set up. Now deal with
# gridlines and tick labels.
# Longitude gridlines and ticklabels. The input to these
# transforms are in display space in x and axes space in y.
# Therefore, the input values will be in range (-xmin, 0),
# (xmax, 1). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the equator.
lim = self._limit # (pi/2 or 90°)
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, lim * 2.0) \
.translate(0.0, -lim)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# Now set up the transforms for the latitude ticks. The input to
# these transforms are in axes space in x and display space in
# y. Therefore, the input values will be in range (0, -ymin),
# (1, ymax). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the edge of the axes ellipse.
yaxis_stretch = Affine2D().scale(lim * 4, 1).translate(-lim * 2, 0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space +
self.transAffine +
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
lim = self._limit
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((lim * 2, 0))
_, yscale = transform.transform_point((0, lim))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self, which='grid'):
"""
Override this method to provide a transformation for the
x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
if which not in ['tick1', 'tick2', 'grid']:
raise ValueError(
"'which' must be one of 'tick1', 'tick2', or 'grid'")
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
"""
Override this method to provide a transformation for the
secondary x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self, which='grid'):
"""
Override this method to provide a transformation for the
y-axis grid and ticks.
"""
if which not in ['tick1', 'tick2', 'grid']:
raise ValueError(
"'which' must be one of 'tick1', 'tick2', or 'grid'")
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
"""
Override this method to provide a transformation for the
y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
"""
Override this method to provide a transformation for the
secondary y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
"""
Override this method to define the shape that is used for the
background of the plot. It should be a subclass of Patch.
In this case, it is a Circle (that may be warped by the axes
transform into an ellipse). Any data and gridlines will be
clipped to this shape.
"""
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'geo': mspines.Spine.circular_spine(self, (0.5, 0.5), 0.5)}
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
set_xscale = set_yscale
# Prevent the user from changing the axes limits. In our case, we
# want to display the whole sphere all the time, so we override
# set_xlim and set_ylim to ignore any input. This also applies to
# interactive panning and zooming in the GUI interfaces.
def set_xlim(self, *args, **kwargs):
raise TypeError("It is not possible to change axes limits "
"for geographic projections. Please consider "
"using Basemap or Cartopy.")
set_ylim = set_xlim
def format_coord(self, lon, lat):
"""
Override this method to change how the values are displayed in
the status bar.
In this case, we want them to be displayed in degrees N/S/E/W.
"""
if self._rad:
lon, lat = np.rad2deg([lon, lat])
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
return ('%f\N{DEGREE SIGN}%s, %f\N{DEGREE SIGN}%s'
% (abs(lat), ns, abs(lon), ew))
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface to set the
ticking than set_xticks would.
"""
# Skip -180 and 180, which are the fixed limits.
grid = np.arange(-180 + degrees, 180, degrees)
if self._rad: grid = np.deg2rad(grid)
self.xaxis.set_major_locator(FixedLocator(grid))
self.xaxis.set_major_formatter(self.ThetaFormatter(self._rad, degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface than
set_yticks would.
"""
# Skip -90 and 90, which are the fixed limits.
grid = np.arange(-90 + degrees, 90, degrees)
if self._rad: grid = np.deg2rad(grid)
self.yaxis.set_major_locator(FixedLocator(grid))
self.yaxis.set_major_formatter(self.ThetaFormatter(self._rad, degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
Often, in geographic projections, you wouldn't want to draw
longitude gridlines near the poles. This allows the user to
specify the degree at which to stop drawing longitude grids.
This is an example method that is specific to this projection
class -- it provides an interface to something that has no
analogy in the base Axes class.
"""
if self._rad:
self._longitude_cap = np.deg2rad(degrees)
else:
self._longitude_cap = degrees
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
# Interactive panning and zooming is not supported with this projection,
# so we override all of the following methods to disable it.
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
This axes object does not support interactive zoom box.
"""
return False
def can_pan(self):
"""
Return *True* if this axes supports the pan/zoom button functionality.
This axes object does not support interactive pan/zoom.
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
#=====================================================
# Mapping Functions
#=====================================================
# iPython label
# %% Mapping
# These mapping functions will work with any projection based on GeoAxes
_paths = ['maps/ne_110m_land/ne_110m_land',
'maps/ne_110m_coastline/ne_110m_coastline',
'maps/ne_110m_lakes/ne_110m_lakes']
_names = ['land', 'coastline', 'lakes']
def _CheckMaps(self, check_only=False):
"""
Check to see if the maps already exist, otherwise download them from
Natural Earth's content delivery network. It will be downloaded into the
same directory as the EqualEarth module, in the 'maps' subdirectory.
"""
url_template = ('http://naciscdn.org/naturalearth/110m'
'/physical/ne_110m_{name}.zip')
path_template = 'ne_110m_{name}'
p = pathlib.Path(__file__)
pdir = p.parent
print(pdir)
mdir = pdir / 'maps' # module maps directory
if mdir.exists(): return True
if check_only: return False
# Now get the zip files
mdir.mkdir()
for name in self._names:
url = url_template.format(name=name)
mapdir = mdir / path_template.format(name=name)
mapdir.mkdir()
try:
ne_file = urlopen(url)
zfile = ZipFile(io.BytesIO(ne_file.read()), 'r')
zfile.extractall(mapdir)
finally:
zfile.close()
return True
def _DrawEllipse(self, ll, width_deg, resolution=50):
"""
Draw an ellipse. Technically, a circle is drawn (an
ellipse with equal height and width), but this usually becomes
an ellipse on the projection axes.
Parameters
----------
ll : tuple of floats
longitude and latitude coordinates (in degrees) to draw the ellipse
width_deg : float
Width of ellipse in degrees
resolution : int, optional, default: 50
number of points to use in drawing the ellipse
"""
# expect ll in degrees, so must
# change ll to radians if that is the base unit
if self._rad: ll = np.deg2rad(ll)
long, lat = ll
# Width as longitude range gets smaller as you go to the poles, so this
# must be adjusted by the cosine of the latitude.
if self._rad:
height = np.deg2rad(width_deg)/2. # use as radius, not diameter
width = height/np.cos(lat)
else:
height = width_deg/2.
width = height/np.cos(np.deg2rad(lat))
# Use a path instead of the regular Ellipse patch to improve resolution
t = np.linspace(0., 2. * np.pi, resolution)
t = np.r_[t, [0]] # append starting point to close path
longs = width * np.cos(t) + long
lats = height * np.sin(t) + lat
verts = np.column_stack([longs, lats])
patch = patches.Polygon(verts,
facecolor='r', alpha=.4,
edgecolor='none', zorder=5.)
self.add_patch(patch)
def DrawTissot(self, width=10., resolution=50):
"""
Draw Tissot Indicatrices of Deformation over the map projection to show
how the projection deforms equally-sized circles at various points
on the map.
Parameters
----------
width : float, optional, default: 5.
width of circles in degrees of latitude
resolution : int, optional, default: 50
Number of points in circle
"""
degrees = 30
for lat in range(-degrees, degrees+1, degrees):
for long in range(-180, 181, degrees):
self._DrawEllipse([long, lat], width, resolution)
for lat in [-60, 60]:
for long in range(-180, 181, 2*degrees):
self._DrawEllipse([long, lat], width, resolution)
for lat in [-90, 90]:
self._DrawEllipse([0, lat], width, resolution)
def DrawShapes(self, sf, **kwargs):
"""
Draw shapes from the supplied shapefile. At the moment, only polygon
and polyline shapefiles are supported, which are sufficient for
drawing land-masses and coastlines. Coastlines are drawn separately
from land-masses since the land-mass may have slices to allow internal
bodies of water (e.g. Caspian Sea).
Parameters
----------
sf : shapefile.Reader object
The shapefile containing the shapes to draw
kwargs : optional
Keyword arguments to send to the patch object. This will generally
be edge and face colors, line widths, alpha, etc.
"""
# Map points are in degrees, so must be converted if underlying
# projection is in radians. Use a null function that does nothing
# if the projection is in degrees.
def null_convert(vals):
return vals
if self._rad:
convert = np.deg2rad
else:
convert = null_convert
if sf.shapeType == shapefile.POLYGON:
for shape in sf.shapes():
verts = convert(shape.points)
patch = patches.Polygon(verts, **kwargs)
self.add_patch(patch)
elif sf.shapeType == shapefile.POLYLINE:
for shape in sf.shapes():
verts = convert(shape.points)
path = patches.mlines.Path(verts)
patch = patches.PathPatch(path, **kwargs)
self.add_patch(patch)
def DrawCoastlines(self, paths=None, edgecolor='k', facecolor='#FEFEE6',
linewidth=.25, **kwargs):
"""
Draw land masses, coastlines, and major lakes. Colors and linewidth
can be supplied. Coastlines are drawn separately from land-masses
since the land-mass may have slices to allow internal bodies of water
(e.g. Caspian Sea).
Parameters
----------
paths : list of str, optional, default: None
List of paths to map data, if they aren't in the default location. The
paths may be fully-specified or relative, and must be in order:
['land path', 'coastline path', 'lake path']
edgecolor, ec : color, optional, default: black
Color for coastlines and lake edges. ``ec`` can be used as a shortcut.
facecolor, fc : color, optional, default: yellow
Color for land. ``fc`` can be used as a shortcut.
linewidth, lw : float, optional, default: .25
Line width of coastlines and lake edges.
"""
# Check that maps exist and download if necessary
if not self._CheckMaps():
print('maps not available')
return
# Set up colors, overriding defaults if shortcuts given
bc = self.get_facecolor() # background color
ec = kwargs.pop('ec', edgecolor) # edge color
fc = kwargs.pop('fc', facecolor) # face color
lw = kwargs.pop('lw', linewidth) # line width
# land coast lakes
edges = ['none', ec, ec]
faces = [fc, 'none', bc]
if not paths:
paths = self._paths
for path, f, e in zip(paths, faces, edges):
sf = shapefile.Reader(path)
self.DrawShapes(sf, linewidth=lw,
edgecolor=e, facecolor=f, **kwargs)
# %% Geodesic
def Get_geodesic_heading_distance(self, ll1, ll2):
"""
Return the heading and angular distance between two points. Angular
distance is the angle between two points with Earth centre. To get actual
distance, multiply the angle (in radians) by Earth radius. Heading is the
angle between the path and true North.
Math is found at http://en.wikipedia.org/wiki/Great-circle_navigation
Parameters
----------
ll1, ll2 : tuples of 2 floats
start and end points as (longitude, latitude) tuples or lists
"""
# Notation: *0 refers to node 0 where great circle intersects equator
# *1 refers to first point
# *01 refers to angle between node 0 and point 1
# Heading is the angle between the path and true North.
if not self._rad:
ll1, ll2 = np.deg2rad((ll1, ll2))
# simplify math notation
cos = np.cos
sin = np.sin
atan = np.arctan2 # handles quadrants better than np.arctan
# unpack longitudes and latitudes
lon1, lat1 = ll1
lon2, lat2 = ll2
lon12 = lon2 - lon1 # longitudinal angle between the two points
if lon12 > np.pi:
lon12 -= np.pi * 2.
elif lon12 < -np.pi:
lon12 += np.pi * 2.
y1 = cos(lat2) * sin(lon12)
x1 = (cos(lat1) * sin(lat2)) - (sin(lat1) * cos(lat2) * cos(lon12))
h1 = atan(y1, x1) # heading of path
y12 = np.sqrt((cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(lon12))**2 + \
(cos(lat2)*sin(lon12))**2)
x12 = sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon12)
d12 = atan(y12, x12) # angular distance in radians
if not self._rad:
ll1 = np.rad2deg(ll1)
h1, d12 = np.rad2deg((h1, d12))
return ll1, h1, d12
def Get_geodesic_waypoints(self, ll1, h1, d12):
"""
Return an array of waypoints on the geodesic line given the start location,
the heading, and the distance. The array will be in the native units
(radians or degrees).
Math is found at http://en.wikipedia.org/wiki/Great-circle_navigation
Parameters
----------
ll1 : tuple or list of floats
The longitude and latitude of the start point
h1 : float
Heading (angle from North) from the start point
d12 : float
Angular distance to destination point
"""
# Notation: *0 refers to node 0 where great circle intersects equator
# *1 refers to first point
# *01 refers to angle between node 0 and point 1
# Angular distance is the angle between two points with Earth centre. To
# get actual distance, multiply the angle (in radians) by Earth radius.
# Heading is the angle between the path and true North.
if not self._rad:
ll1 = np.deg2rad(ll1)
h1, d12 = np.deg2rad((h1, d12))
lon1, lat1 = ll1
# simplify math notation
cos = np.cos
sin = np.sin
tan = np.tan
atan = np.arctan2 # handles quadrants better than np.arctan
# calculate where great circle crosses equator (node 0)
y0 = sin(h1) * cos(lat1)
x0 = np.sqrt(cos(h1)**2 + (sin(h1) * sin(lat1))**2)
h0 = atan(y0, x0) # heading at crossing point
d01 = atan(tan(lat1), cos(h1)) # angular distance from node 0 to pt 1
lon01 = atan(sin(h0) * sin(d01), cos(d01))
lon0 = lon1 - lon01
# create array of angular distances from node 0 to use
ds = np.linspace(d01, d01+d12, self.RESOLUTION)
# now calculate the latitudes and longitudes
ys = cos(h0) * sin(ds)
xs = np.sqrt(cos(ds)**2 + (sin(h0) * sin(ds))**2)
lats = atan(ys, xs)
lons = atan(sin(h0) * sin(ds), cos(ds)) + lon0
if (np.abs(lons) > np.pi).any(): # check if any points outside map
lons = (lons + np.pi) % (2. * np.pi) - np.pi
result = np.column_stack([lons, lats]) # lons (x) go first
if not self._rad: result =
|
np.rad2deg(result)
|
numpy.rad2deg
|
from pathlib import Path
import math
import os
import subprocess
import shutil
import numpy as np
from .Shapes.shape2D import Shape2D, BoundingRectangle
from .point_list import PointList
from .cell_list import CellList
from .face_list import FaceList
from .boundary_editor import BoundaryEditor
from .printer import Printer
class Stack():
def __init__(self, cell_edge: float, verbose: bool = False):
"""Specifies a volume that is made of a stack of solids
Parameters
----------
cell_edge : float
Used as x and y dimensions for all cells of the mesh.
verbose : bool, optional
If True, outputs information about the progress of mesh construction, by default False.
"""
self.edge = cell_edge
self._print = Printer(verbose)
self.verbose = verbose
self.br = BoundingRectangle(0., 0., 0., 0.)
self.z_cell_coords = [0.]
self.shapes = []
self.n_layers = []
def add_solid(self, shape2d: Shape2D, height: float = None, n_layers: int = None):
if height is None and n_layers is None:
raise ValueError('Either height or n_layers must be specified')
if n_layers is not None and not np.issubdtype(type(n_layers), np.integer):
raise TypeError('n_layers must be an integer or None')
if n_layers is None:
n_layers = int(round(height / self.edge))
if height is None:
height = self.edge * n_layers
self.shapes.append(shape2d)
self.n_layers.append(n_layers)
# Append new z_cell_coords
current_top = self.z_cell_coords[-1]
new_top = current_top + height
vertical_spacing = np.linspace(current_top, new_top, n_layers + 1).tolist()
self.z_cell_coords.extend(vertical_spacing[1:])
# Adjust bounding rectangle
sbr = shape2d.bounding_rectangle
self.br = BoundingRectangle(
min_x=min(self.br.min_x, sbr.min_x),
max_x=max(self.br.max_x, sbr.max_x),
min_y=min(self.br.min_y, sbr.min_y),
max_y=max(self.br.max_y, sbr.max_y),
)
def build_mesh(self):
self._print("Generating list of active cells")
self.isin = self._who_is_in()
self._print("Generating wireframe")
self.vertex = self._build_vertex()
self._print("Generating list of active points")
self.pointlist = PointList(self.isin, self.vertex)
self._print("Indexing active cells")
self.celllist = CellList(self.isin, self.pointlist)
self._print(f"Number of active cells{len(self.celllist)} of {self.isin.flatten().shape[0]}")
self._print("Generating list of faces")
self.facelist = FaceList(
isin=self.isin,
celllist=self.celllist,
n_layers=self.n_layers,
verbose=self.verbose,
)
def get_boundary_editor(self):
return BoundaryEditor(bound_list=self.facelist.boundary_list, point_list=self.pointlist)
def export(self, mesh_dir: str, run_renumberMesh: bool = False):
if os.path.exists(mesh_dir):
shutil.rmtree(mesh_dir)
Path(mesh_dir).mkdir(parents=True, exist_ok=False)
self._print("Exporting point list")
self.pointlist.export(mesh_dir)
self._print("Exporting face list")
self.facelist.export(mesh_dir)
self._print("Done exporting")
if run_renumberMesh:
self._print("Running renumberMesh")
case_dir = os.path.join(mesh_dir, '..', '..')
# Had to add this chdir here, because running renumberMesh
# with -case was causing problems while reading csv files
os.chdir(case_dir)
process = subprocess.Popen(
['renumberMesh', '-overwrite'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = process.communicate()
if process.poll() != 0:
print(stdout)
raise RuntimeError(stderr)
self._print(stdout)
self._print("renumberMesh has finished")
@property
def n_patches(self):
"""
Number of patches
"""
return self.facelist.n_boundaries
def _who_is_in(self):
# Create the horizontal grid
x_cell_centers = self._get_vertical_cell_centers(self.br.min_x, self.br.max_x)
y_cell_centers = self._get_vertical_cell_centers(self.br.min_y, self.br.max_y)
cx, cy = np.meshgrid(x_cell_centers, y_cell_centers)
centers_2D = np.array([cx, cy])
centers_2D = np.swapaxes(centers_2D, 0, 2)
total_n_layers = sum(self.n_layers)
isin = np.zeros((centers_2D.shape[0], centers_2D.shape[1], total_n_layers), dtype=bool)
k = 0
for shape2d, n_layers in zip(self.shapes, self.n_layers):
shape2d_isin = shape2d.who_is_in(centers_2D)
isin[:, :, k:k+n_layers] = shape2d_isin[:, :, np.newaxis]
k += n_layers
return isin
def _get_vertical_cell_centers(self, min_c, max_c):
n_cells = math.ceil((max_c - min_c) / self.edge)
half_spam = (n_cells - 1) * self.edge / 2.
cell_coords =
|
np.linspace(-half_spam, half_spam, n_cells)
|
numpy.linspace
|
import numpy as np
a =
|
np.arange(15)
|
numpy.arange
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Orthogonal Random Forest.
Orthogonal Random Forest (ORF) is an algorithm for heterogenous treatment effect
estimation. Orthogonal Random Forest combines orthogonalization,
a technique that effectively removes the confounding effect in two-stage estimation,
with generalized random forests, a flexible method for estimating treatment
effect heterogeneity.
This file consists of classes that implement the following variants of the ORF method:
- The `ContinuousTreatmentOrthoForest`, a two-forest approach for learning continuous treatment effects
using kernel two stage estimation.
- The `DiscreteTreatmentOrthoForest`, a two-forest approach for learning discrete treatment effects
using kernel two stage estimation.
For more details on these methods, see our paper [Oprescu2018]_.
"""
import abc
import inspect
import numpy as np
import warnings
from joblib import Parallel, delayed
from sklearn import clone
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LassoCV, Lasso, LinearRegression, LogisticRegression, \
LogisticRegressionCV, ElasticNet
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, PolynomialFeatures
from sklearn.utils import check_random_state, check_array, column_or_1d
from .cate_estimator import LinearCateEstimator
from .causal_tree import CausalTree
from .utilities import reshape_Y_T, MAX_RAND_SEED, check_inputs, WeightedModelWrapper, cross_product
def _build_tree_in_parallel(Y, T, X, W,
nuisance_estimator,
parameter_estimator,
moment_and_mean_gradient_estimator,
min_leaf_size, max_depth, random_state):
tree = CausalTree(nuisance_estimator=nuisance_estimator,
parameter_estimator=parameter_estimator,
moment_and_mean_gradient_estimator=moment_and_mean_gradient_estimator,
min_leaf_size=min_leaf_size,
max_depth=max_depth,
random_state=random_state)
# Create splits of causal tree
tree.create_splits(Y, T, X, W)
return tree
def _fit_weighted_pipeline(model_instance, X, y, sample_weight):
if not isinstance(model_instance, Pipeline):
model_instance.fit(X, y, sample_weight)
else:
last_step_name = model_instance.steps[-1][0]
model_instance.fit(X, y, **{"{0}__sample_weight".format(last_step_name): sample_weight})
def _cross_fit(model_instance, X, y, split_indices, sample_weight=None, predict_func_name='predict'):
model_instance1 = clone(model_instance, safe=False)
model_instance2 = clone(model_instance, safe=False)
split_1, split_2 = split_indices
predict_func1 = getattr(model_instance1, predict_func_name)
predict_func2 = getattr(model_instance2, predict_func_name)
if sample_weight is None:
model_instance2.fit(X[split_2], y[split_2])
pred_1 = predict_func2(X[split_1])
model_instance1.fit(X[split_1], y[split_1])
pred_2 = predict_func1(X[split_2])
else:
_fit_weighted_pipeline(model_instance2, X[split_2], y[split_2], sample_weight[split_2])
pred_1 = predict_func2(X[split_1])
_fit_weighted_pipeline(model_instance1, X[split_1], y[split_1], sample_weight[split_1])
pred_2 = predict_func1(X[split_2])
# Must make sure indices are merged correctly
sorted_split_indices = np.argsort(np.concatenate(split_indices), kind='mergesort')
return np.concatenate((pred_1, pred_2))[sorted_split_indices]
def _group_cross_fit(model_instance, X, y, t, split_indices, sample_weight=None, predict_func_name='predict'):
# Require group assignment t to be one-hot-encoded
model_instance1 = clone(model_instance, safe=False)
model_instance2 = clone(model_instance, safe=False)
split_1, split_2 = split_indices
n_groups = t.shape[1]
predict_func1 = getattr(model_instance1, predict_func_name)
predict_func2 = getattr(model_instance2, predict_func_name)
Xt = np.concatenate((X, t), axis=1)
# Define an inner function that iterates over group predictions
def group_predict(split, predict_func):
group_pred = []
zero_t = np.zeros((len(split), n_groups - 1))
for i in range(n_groups):
pred_i = predict_func(
np.concatenate((X[split], np.insert(zero_t, i, 1, axis=1)), axis=1)
)
group_pred.append(pred_i)
# Convert rows to columns
return np.asarray(group_pred).T
# Get predictions for the 2 splits
if sample_weight is None:
model_instance2.fit(Xt[split_2], y[split_2])
pred_1 = group_predict(split_1, predict_func2)
model_instance1.fit(Xt[split_1], y[split_1])
pred_2 = group_predict(split_2, predict_func1)
else:
_fit_weighted_pipeline(model_instance2, Xt[split_2], y[split_2], sample_weight[split_2])
pred_1 = group_predict(split_1, predict_func2)
_fit_weighted_pipeline(model_instance1, Xt[split_1], y[split_1], sample_weight[split_1])
pred_2 = group_predict(split_2, predict_func1)
# Must make sure indices are merged correctly
sorted_split_indices = np.argsort(np.concatenate(split_indices), kind='mergesort')
return
|
np.concatenate((pred_1, pred_2))
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import tempfile
import shutil
import atexit
import numpy as np
from nose.tools import eq_ as eq, assert_is_none, assert_is_instance, \
assert_raises
from numpy.testing import assert_array_equal
from zarr.creation import array, empty, zeros, ones, full, open_array, \
empty_like, zeros_like, ones_like, full_like, open_like, create
from zarr.sync import ThreadSynchronizer
from zarr.core import Array
from zarr.storage import DirectoryStore
from zarr.hierarchy import open_group
from zarr.errors import PermissionError
from zarr.codecs import Zlib
# something bcolz-like
class MockBcolzArray(object):
def __init__(self, data, chunklen):
self.data = data
self.chunklen = chunklen
def __getattr__(self, item):
return getattr(self.data, item)
def __getitem__(self, item):
return self.data[item]
# something h5py-like
class MockH5pyDataset(object):
def __init__(self, data, chunks):
self.data = data
self.chunks = chunks
def __getattr__(self, item):
return getattr(self.data, item)
def __getitem__(self, item):
return self.data[item]
def test_array():
# with numpy array
a = np.arange(100)
z = array(a, chunks=10)
eq(a.shape, z.shape)
eq(a.dtype, z.dtype)
assert_array_equal(a, z[:])
# with array-like
a = list(range(100))
z = array(a, chunks=10)
eq((100,), z.shape)
eq(np.asarray(a).dtype, z.dtype)
assert_array_equal(np.asarray(a), z[:])
# with another zarr array
z2 = array(z)
eq(z.shape, z2.shape)
eq(z.chunks, z2.chunks)
eq(z.dtype, z2.dtype)
assert_array_equal(z[:], z2[:])
# with chunky array-likes
b = np.arange(1000).reshape(100, 10)
c = MockBcolzArray(b, 10)
z3 = array(c)
eq(c.shape, z3.shape)
eq((10, 10), z3.chunks)
b = np.arange(1000).reshape(100, 10)
c = MockH5pyDataset(b, chunks=(10, 2))
z4 = array(c)
eq(c.shape, z4.shape)
eq((10, 2), z4.chunks)
c = MockH5pyDataset(b, chunks=None)
z5 = array(c)
eq(c.shape, z5.shape)
assert_is_instance(z5.chunks, tuple)
# with dtype=None
a = np.arange(100, dtype='i4')
z = array(a, dtype=None)
assert_array_equal(a[:], z[:])
eq(a.dtype, z.dtype)
def test_empty():
z = empty(100, chunks=10)
eq((100,), z.shape)
eq((10,), z.chunks)
def test_zeros():
z = zeros(100, chunks=10)
eq((100,), z.shape)
eq((10,), z.chunks)
assert_array_equal(np.zeros(100), z[:])
def test_ones():
z = ones(100, chunks=10)
eq((100,), z.shape)
eq((10,), z.chunks)
assert_array_equal(np.ones(100), z[:])
def test_full():
z = full(100, chunks=10, fill_value=42, dtype='i4')
eq((100,), z.shape)
eq((10,), z.chunks)
assert_array_equal(np.full(100, fill_value=42, dtype='i4'), z[:])
# nan
z = full(100, chunks=10, fill_value=np.nan, dtype='f8')
assert np.all(np.isnan(z[:]))
# "NaN"
z = full(100, chunks=10, fill_value='NaN', dtype='U3')
assert np.all(z[:] == 'NaN')
def test_open_array():
store = 'example'
# mode == 'w'
z = open_array(store, mode='w', shape=100, chunks=10)
z[:] = 42
assert_is_instance(z, Array)
assert_is_instance(z.store, DirectoryStore)
eq((100,), z.shape)
eq((10,), z.chunks)
assert_array_equal(np.full(100, fill_value=42), z[:])
# mode in 'r', 'r+'
open_group('example_group', mode='w')
for mode in 'r', 'r+':
with assert_raises(KeyError):
open_array('doesnotexist', mode=mode)
with assert_raises(KeyError):
open_array('example_group', mode=mode)
z = open_array(store, mode='r')
assert_is_instance(z, Array)
assert_is_instance(z.store, DirectoryStore)
eq((100,), z.shape)
eq((10,), z.chunks)
assert_array_equal(np.full(100, fill_value=42), z[:])
with assert_raises(PermissionError):
z[:] = 43
z = open_array(store, mode='r+')
assert_is_instance(z, Array)
assert_is_instance(z.store, DirectoryStore)
eq((100,), z.shape)
eq((10,), z.chunks)
assert_array_equal(np.full(100, fill_value=42), z[:])
z[:] = 43
assert_array_equal(np.full(100, fill_value=43), z[:])
# mode == 'a'
shutil.rmtree(store)
z = open_array(store, mode='a', shape=100, chunks=10)
z[:] = 42
assert_is_instance(z, Array)
assert_is_instance(z.store, DirectoryStore)
eq((100,), z.shape)
eq((10,), z.chunks)
assert_array_equal(np.full(100, fill_value=42), z[:])
with assert_raises(KeyError):
open_array('example_group', mode='a')
# mode in 'w-', 'x'
for mode in 'w-', 'x':
shutil.rmtree(store)
z = open_array(store, mode=mode, shape=100, chunks=10)
z[:] = 42
assert_is_instance(z, Array)
assert_is_instance(z.store, DirectoryStore)
eq((100,), z.shape)
eq((10,), z.chunks)
assert_array_equal(np.full(100, fill_value=42), z[:])
with assert_raises(KeyError):
open_array(store, mode=mode)
with assert_raises(KeyError):
open_array('example_group', mode=mode)
# with synchronizer
z = open_array(store, synchronizer=ThreadSynchronizer())
assert_is_instance(z, Array)
# with path
z = open_array(store, shape=100, path='foo/bar', mode='w')
assert_is_instance(z, Array)
eq('foo/bar', z.path)
def test_empty_like():
# zarr array
z = empty(100, chunks=10, dtype='f4', compressor=Zlib(5),
order='F')
z2 = empty_like(z)
eq(z.shape, z2.shape)
eq(z.chunks, z2.chunks)
eq(z.dtype, z2.dtype)
eq(z.compressor.get_config(), z2.compressor.get_config())
eq(z.fill_value, z2.fill_value)
eq(z.order, z2.order)
# numpy array
a = np.empty(100, dtype='f4')
z3 = empty_like(a)
eq(a.shape, z3.shape)
eq((100,), z3.chunks)
eq(a.dtype, z3.dtype)
assert_is_none(z3.fill_value)
# something slightly silly
a = [0] * 100
z3 = empty_like(a, shape=200)
eq((200,), z3.shape)
# other array-likes
b = np.arange(1000).reshape(100, 10)
c = MockBcolzArray(b, 10)
z = empty_like(c)
eq(b.shape, z.shape)
eq((10, 10), z.chunks)
c = MockH5pyDataset(b, chunks=(10, 2))
z = empty_like(c)
eq(b.shape, z.shape)
eq((10, 2), z.chunks)
c = MockH5pyDataset(b, chunks=None)
z = empty_like(c)
eq(b.shape, z.shape)
assert_is_instance(z.chunks, tuple)
def test_zeros_like():
# zarr array
z = zeros(100, chunks=10, dtype='f4', compressor=Zlib(5),
order='F')
z2 = zeros_like(z)
eq(z.shape, z2.shape)
eq(z.chunks, z2.chunks)
eq(z.dtype, z2.dtype)
eq(z.compressor.get_config(), z2.compressor.get_config())
eq(z.fill_value, z2.fill_value)
eq(z.order, z2.order)
# numpy array
a = np.empty(100, dtype='f4')
z3 = zeros_like(a, chunks=10)
eq(a.shape, z3.shape)
eq((10,), z3.chunks)
eq(a.dtype, z3.dtype)
eq(0, z3.fill_value)
def test_ones_like():
# zarr array
z = ones(100, chunks=10, dtype='f4', compressor=Zlib(5),
order='F')
z2 = ones_like(z)
eq(z.shape, z2.shape)
eq(z.chunks, z2.chunks)
eq(z.dtype, z2.dtype)
eq(z.compressor.get_config(), z2.compressor.get_config())
eq(z.fill_value, z2.fill_value)
eq(z.order, z2.order)
# numpy array
a = np.empty(100, dtype='f4')
z3 = ones_like(a, chunks=10)
eq(a.shape, z3.shape)
eq((10,), z3.chunks)
eq(a.dtype, z3.dtype)
eq(1, z3.fill_value)
def test_full_like():
z = full(100, chunks=10, dtype='f4', compressor=Zlib(5),
fill_value=42, order='F')
z2 = full_like(z)
eq(z.shape, z2.shape)
eq(z.chunks, z2.chunks)
eq(z.dtype, z2.dtype)
eq(z.compressor.get_config(), z2.compressor.get_config())
eq(z.fill_value, z2.fill_value)
eq(z.order, z2.order)
# numpy array
a =
|
np.empty(100, dtype='f4')
|
numpy.empty
|
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_full, sym_storage=False)
_ok = nm.allclose(vt, _vt_full, rtol=0.0, atol=1e-14)
self.report('volumetric tensor full: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_sym, sym_storage=True)
_ok = nm.allclose(vt, _vt_sym, rtol=0.0, atol=1e-14)
self.report('volumetric tensor sym: %s' % _ok)
ok = ok and _ok
dev = tn.get_deviator(a_full, sym_storage=False)
_ok = nm.allclose(dev, _dev_full, rtol=0.0, atol=1e-14)
self.report('deviator full: %s' % _ok)
ok = ok and _ok
aux = (dev *
|
nm.transpose(dev, (0, 2, 1))
|
numpy.transpose
|
from skmultiflow.data.observer.result_observer import MetricsResultObserver
from mockito import kwargs, verify, when, mock
from mockito.matchers import any
import numpy as np
def test_metrics_result_observer():
metrics = mock()
reporter = mock()
when(metrics).get_accuracy().thenReturn(0.9)
when(reporter).report(any, any).thenReturn(metrics.get_accuracy())
result_observer = MetricsResultObserver(metrics, reporter)
y_true = np.concatenate((
|
np.ones(85)
|
numpy.ones
|
import numpy as np
from numpy.core.numeric import zeros_like
from scipy.ndimage.measurements import center_of_mass
import cv2 as cv
import scipy.ndimage as ndi
from .. import *
def get_morphology(tcfcell:TCFcell):
# get cellmask slice
cellmask = tcfcell['mask']
def _itr_or(array):
# boolean array
z = array.shape[0]
if z == 1:
return np.squeeze(array,0)
if z == 2:
return np.logical_or(array[0,:,:],array[1,:,:],out=array[0,:,:])
else:
zhalf = z//2
return np.logical_or(_itr_or(array[:zhalf]),_itr_or(array[zhalf:]))
cellmask_slice = ndi.binary_fill_holes(_itr_or(cellmask)).astype(np.uint8)
cellmask_slice[cellmask_slice > 0] = 255
# find morphologies
countour, hierarchy = cv.findContours(cellmask_slice,cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
cnt = countour[0]
center_rect,size_rect,angle_rect = cv.minAreaRect(cnt) # angle is in degree
tcfcell['centerR'] = center_rect
tcfcell['sizeR'] = size_rect
tcfcell['angleR'] = angle_rect
if len(cnt) > 5:
ellipse = cv.fitEllipse(cnt)
tcfcell['centerE'] = ellipse[0]
tcfcell['rotE'] = ellipse[2]
tcfcell['widthE'] = ellipse[1][0]
tcfcell['heightE'] = ellipse[1][1]
def get_ellipsoid(tcfcell:TCFcell):
cellmask = tcfcell['mask'].astype(np.uint8)
cellmask[cellmask > 0] = 255
# find contours
points = []
for z in range(cellmask.shape[0]):
cellmask_slice = cellmask[z,...]
contour, hierarchy = cv.findContours(cellmask_slice,cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
if len(contour) == 0:
continue
else:
points_slice = np.empty((contour[0].shape[0],3),dtype=np.uint16)
points_slice[:,0] = z
points_slice[:,1] = contour[0][:,0,1] #y
points_slice[:,2] = contour[0][:,0,0] #x
points.append(points_slice)
points = np.concatenate(points)
center, evecs, radii = ellipsoid_fit(points)
tcfcell['center_Ellipsoid'] = tuple(center)
tcfcell['evecs_Ellipsoid'] = tuple(evecs)
tcfcell['radii_Ellipsoid'] = tuple(radii)
# https://github.com/aleksandrbazhin/ellipsoid_fit_python
def ellipsoid_fit(X):
x = X[:, 0]
y = X[:, 1]
z = X[:, 2]
D = np.array([x * x + y * y - 2 * z * z,
x * x + z * z - 2 * y * y,
2 * x * y,
2 * x * z,
2 * y * z,
2 * x,
2 * y,
2 * z,
1 - 0 * x])
d2 = np.array(x * x + y * y + z * z).T # rhs for LLSQ
u = np.linalg.solve(D.dot(D.T), D.dot(d2))
a = np.array([u[0] + 1 * u[1] - 1])
b = np.array([u[0] - 2 * u[1] - 1])
c = np.array([u[1] - 2 * u[0] - 1])
v = np.concatenate([a, b, c, u[2:]], axis=0).flatten()
A = np.array([[v[0], v[3], v[4], v[6]],
[v[3], v[1], v[5], v[7]],
[v[4], v[5], v[2], v[8]],
[v[6], v[7], v[8], v[9]]])
center = np.linalg.solve(- A[:3, :3], v[6:9])
translation_matrix =
|
np.eye(4)
|
numpy.eye
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
import math
image = Image.open('image.jpeg', 'r') #Membaca gambar
image = image.convert('L') #convert gambar ke greyscale
data = np.asarray(image) #konversi gambar ke array
fig,ax = plt.subplots(1)
ax.imshow(data, cmap='gray')
n_rows = len(data)
n_columns = len(data[0])
left = []
top = []
for i in range(0,n_rows):
for j in range(0, n_columns):
if(data[i][j] > 120):
top.append(i)
left.append(j)
xmin = np.min(left)
xmax = np.max(left)
ymin = np.min(top)
ymax =
|
np.max(top)
|
numpy.max
|
import os
import numpy as np
import astropy.table as table
import astropy
import astropy.units as u
import orbitize
import orbitize.read_input as read_input
import orbitize.kepler as kepler
import orbitize.system as system
import orbitize.basis as basis
import orbitize.priors as priors
import orbitize.driver as driver
### Skip this test on Windows since REBOUND doesn't work on Windows ###
import sys
import pytest
if sys.platform.startswith("win"):
pytest.skip("Skipping REBOUND tests on Windows", allow_module_level=True)
try:
import rebound
except ImportError:
pytest.skip("Skipping REBOUND tests because REBOUND is not installed", allow_module_level=True)
def test_1planet():
"""
Sanity check that things agree for 1 planet case
"""
# generate a planet orbit
sma = 1
ecc = 0.1
inc = np.radians(45)
aop = np.radians(45)
pan = np.radians(45)
tau = 0.5
plx = 1
mtot = 1
tau_ref_epoch = 0
mjup = u.Mjup.to(u.Msun)
mass_b = 12 * mjup
epochs = np.linspace(0, 300, 100) + tau_ref_epoch # nearly the full period, MJD
ra_model, dec_model, vz_model = kepler.calc_orbit(
epochs, sma, ecc, inc, aop, pan, tau, plx, mtot,
tau_ref_epoch=tau_ref_epoch
)
# generate some fake measurements just to feed into system.py to test bookkeeping
t = table.Table([epochs, np.ones(epochs.shape, dtype=int), ra_model, np.zeros(ra_model.shape), dec_model, np.zeros(dec_model.shape)],
names=["epoch", "object" ,"raoff", "raoff_err","decoff","decoff_err"])
filename = os.path.join(orbitize.DATADIR, "rebound_1planet.csv")
t.write(filename)
# create the orbitize system and generate model predictions using the ground truth
astrom_dat = read_input.read_file(filename)
sys = system.System(1, astrom_dat, mtot, plx, tau_ref_epoch=tau_ref_epoch)
params = np.array([sma, ecc, inc, aop, pan, tau, plx, mtot])
radec_orbitize, _ = sys.compute_model(params)
ra_orb = radec_orbitize[:, 0]
dec_orb = radec_orbitize[:, 1]
# now project the orbit with rebound
manom = basis.tau_to_manom(epochs[0], sma, mtot, tau, tau_ref_epoch)
sim = rebound.Simulation()
sim.units = ('yr', 'AU', 'Msun')
# add star
sim.add(m=mtot - mass_b)
# add one planet
sim.add(m=mass_b, a=sma, e=ecc, M=manom, omega=aop, Omega=pan+np.pi/2, inc=inc)
ps = sim.particles
sim.move_to_com()
# Use Wisdom Holman integrator (fast), with the timestep being < 5% of inner planet's orbital period
sim.integrator = "ias15"
sim.dt = ps[1].P/1000.
# integrate and measure star/planet separation
ra_reb = []
dec_reb = []
for t in epochs:
sim.integrate(t/365.25)
ra_reb.append(-(ps[1].x - ps[0].x)) # ra is negative x
dec_reb.append(ps[1].y - ps[0].y)
ra_reb = np.array(ra_reb)
dec_reb = np.array(dec_reb)
diff_ra = ra_reb - ra_orb/plx
diff_dec = dec_reb - dec_orb/plx
assert np.all(np.abs(diff_ra) < 1e-9)
assert np.all(np.abs(diff_dec) < 1e-9)
# clean up
os.system('rm {}'.format(filename))
def test_2planet_massive():
"""
Compare multiplanet to rebound for planets with mass.
"""
# generate a planet orbit
mjup = u.Mjup.to(u.Msun)
mass_b = 12 * mjup
mass_c = 9 * mjup
params = np.array([10, 0.1, np.radians(45), np.radians(45), np.radians(45), 0.5,
3, 0.1, np.radians(45), np.radians(190), np.radians(45), 0.2,
50, mass_b, mass_c, 1.5 - mass_b - mass_c])
params_noc = np.array([10, 0.1, np.radians(45), np.radians(45), np.radians(45), 0.5,
3, 0.1, np.radians(45), np.radians(190), np.radians(45), 0.2,
50, mass_b, 0, 1.5 - mass_b])
tau_ref_epoch = 0
epochs = np.linspace(0, 365.25*10, 100) + tau_ref_epoch # nearly the full period, MJD
# doesn't matter that this is right, just needs to be the same size. below doesn't include effect of c
# just want to generate some measurements of plaent b to test compute model
b_ra_model, b_dec_model, b_vz_model = kepler.calc_orbit(
epochs, params[0], params[1], params[2], params[3], params[4], params[5],
params[-2], params[-1], tau_ref_epoch=tau_ref_epoch
)
# generate some fake measurements of planet b, just to feed into system.py to test bookkeeping
t = table.Table([epochs, np.ones(epochs.shape, dtype=int), b_ra_model, np.zeros(b_ra_model.shape), b_dec_model, np.zeros(b_dec_model.shape)],
names=["epoch", "object" ,"raoff", "raoff_err","decoff","decoff_err"])
filename = os.path.join(orbitize.DATADIR, "rebound_2planet_outer.csv")
t.write(filename)
#### TEST THE OUTER PLANET ####
# create the orbitize system and generate model predictions using the ground truth
astrom_dat = read_input.read_file(filename)
sys = system.System(2, astrom_dat, params[-1], params[-4], tau_ref_epoch=tau_ref_epoch, fit_secondary_mass=True)
# generate measurement
radec_orbitize, _ = sys.compute_model(params)
b_ra_orb = radec_orbitize[:, 0]
b_dec_orb = radec_orbitize[:, 1]
# debug, generate measurement without c having any mass
radec_orb_noc, _ = sys.compute_model(params_noc)
b_ra_orb_noc = radec_orb_noc[:,0]
b_dec_orb_noc = radec_orb_noc[:,1]
# check that planet c's perturbation is imprinted (nonzero))
assert np.all(b_ra_orb_noc != b_ra_orb)
# now project the orbit with rebound
b_manom = basis.tau_to_manom(epochs[0], params[0], params[-1]+params[-3], params[5], tau_ref_epoch)
c_manom = basis.tau_to_manom(epochs[0], params[0+6], params[-1]+params[-2], params[5+6], tau_ref_epoch)
sim = rebound.Simulation()
sim.units = ('yr', 'AU', 'Msun')
# add star
sim.add(m=params[-1])
# add two planets
sim.add(m=mass_c, a=params[0+6], e=params[1+6], M=c_manom, omega=params[3+6], Omega=params[4+6]+np.pi/2, inc=params[2+6])
sim.add(m=mass_b, a=params[0], e=params[1], M=b_manom, omega=params[3], Omega=params[4]+np.pi/2, inc=params[2])
ps = sim.particles
sim.move_to_com()
# Use Wisdom Holman integrator (fast), with the timestep being < 5% of inner planet's orbital period
sim.integrator = "ias15"
sim.dt = ps[1].P/1000.
# integrate and measure star/planet separation
b_ra_reb = []
b_dec_reb = []
for t in epochs:
sim.integrate(t/365.25)
b_ra_reb.append(-(ps[2].x - ps[0].x)) # ra is negative x
b_dec_reb.append(ps[2].y - ps[0].y)
b_ra_reb = np.array(b_ra_reb)
b_dec_reb = np.array(b_dec_reb)
diff_ra = b_ra_reb - b_ra_orb/params[6*2]
diff_dec = b_dec_reb - b_dec_orb/params[6*2]
# we placed the planets far apart to minimize secular interactions but there are still some, so relax precision
assert np.all(np.abs(diff_ra)/(params[0]) < 1e-3)
assert np.all(np.abs(diff_dec)/(params[0]) < 1e-3)
# clean up
os.system('rm {}'.format(filename))
###### NOW TEST THE INNER PLANET #######
# generate some fake measurements of planet c, just to feed into system.py to test bookkeeping
t = table.Table([epochs, np.ones(epochs.shape, dtype=int)*2, b_ra_model, np.zeros(b_ra_model.shape), b_dec_model, np.zeros(b_dec_model.shape)],
names=["epoch", "object" ,"raoff", "raoff_err","decoff","decoff_err"])
filename = os.path.join(orbitize.DATADIR, "rebound_2planet_inner.csv")
t.write(filename)
# create the orbitize system and generate model predictions using the ground truth
astrom_dat = read_input.read_file(filename)
sys = system.System(2, astrom_dat, params[-1], params[-2], tau_ref_epoch=tau_ref_epoch, fit_secondary_mass=True)
# generate measurement
radec_orbitize, _ = sys.compute_model(params)
c_ra_orb = radec_orbitize[:, 0]
c_dec_orb = radec_orbitize[:, 1]
# start the REBOUND sim again
sim = rebound.Simulation()
sim.units = ('yr', 'AU', 'Msun')
# add star
sim.add(m=params[-1])
# add two planets
sim.add(m=mass_c, a=params[0+6], e=params[1+6], M=c_manom, omega=params[3+6], Omega=params[4+6]+np.pi/2, inc=params[2+6])
sim.add(m=mass_b, a=params[0], e=params[1], M=b_manom, omega=params[3], Omega=params[4]+np.pi/2, inc=params[2])
ps = sim.particles
sim.move_to_com()
# Use Wisdom Holman integrator (fast), with the timestep being < 5% of inner planet's orbital period
sim.integrator = "ias15"
sim.dt = ps[1].P/1000.
# integrate and measure star/planet separation
c_ra_reb = []
c_dec_reb = []
for t in epochs:
sim.integrate(t/365.25)
c_ra_reb.append(-(ps[1].x - ps[0].x)) # ra is negative x
c_dec_reb.append(ps[1].y - ps[0].y)
c_ra_reb = np.array(c_ra_reb)
c_dec_reb = np.array(c_dec_reb)
diff_ra = c_ra_reb - c_ra_orb/params[6*2]
diff_dec = c_dec_reb - c_dec_orb/params[6*2]
# planet is 3 times closer, so roughly 3 times larger secular errors.
assert np.all(np.abs(diff_ra)/(params[0]) < 3e-3)
assert np.all(np.abs(diff_dec)/(params[0]) < 3e-3)
# clean up
os.system('rm {}'.format(filename))
def test_2planet_massive_reverse_order():
"""
Compare multiplanet to rebound for planets with mass. reverse planet indicies to check bookkeeping.
"""
# generate a planet orbit
mjup = u.Mjup.to(u.Msun)
mass_b = 12 * mjup
mass_c = 9 * mjup
params = np.array([3, 0.1, np.radians(45), np.radians(190), np.radians(45), 0.2,
10, 0.1, np.radians(45), np.radians(45), np.radians(45), 0.5,
50, mass_c, mass_b, 1.5 - mass_b - mass_c])
tau_ref_epoch = 0
epochs =
|
np.linspace(0, 365.25*10, 100)
|
numpy.linspace
|
# -*- coding: utf-8 -*-
"""
test_utils
~~~~~~~~~~
Test `utils` module of `crosswalk` package.
"""
import numpy as np
import pytest
import crosswalk.utils as utils
@pytest.mark.parametrize("x",
[[1]*3,
np.ones(3),
np.arange(3),
np.zeros(3) + 0j,
np.array([np.nan, 0.0, 0.0]),
np.array([np.inf, 0.0, 0.0]),
np.array(['a', 'b', 'c'])])
@pytest.mark.parametrize("shape", [None, (3,), (4,)])
def test_is_numerical_array(x, shape):
ok = utils.is_numerical_array(x, shape=shape)
if (
not isinstance(x, np.ndarray) or
not np.issubdtype(x.dtype, np.number) or
np.isnan(x).any() or
np.isinf(x).any() or
(shape is not None and shape != (3,))
):
assert not ok
else:
assert ok
@pytest.mark.parametrize("sizes", [np.array([1, 2, 3])])
@pytest.mark.parametrize("indices", [[range(0, 1), range(1, 3), range(3, 6)]])
def test_sizes_to_indices(sizes, indices):
my_indices = utils.sizes_to_indices(sizes)
assert all([my_indices[i] == indices[i] for i in range(len(sizes))])
@pytest.mark.parametrize("x", [np.array([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("num_x", [3])
@pytest.mark.parametrize("x_sizes", [np.array([2, 2, 1])])
@pytest.mark.parametrize("unique_x", [np.array([1, 2, 3])])
def test_array_structure(x, num_x, x_sizes, unique_x):
my_num_x, my_x_sizes, my_unique_x = utils.array_structure(x)
assert my_num_x == num_x
assert (my_x_sizes == x_sizes).all()
assert (my_unique_x == unique_x).all()
@pytest.mark.parametrize("input", [None, np.ones(1)])
@pytest.mark.parametrize("default", [np.zeros(1)])
def test_default_input(input, default):
my_input = utils.default_input(input, default=default)
if input is None:
assert (my_input == 0.0).all()
else:
assert (my_input == 1.0).all()
@pytest.mark.parametrize("log_mean", [np.random.randn(5)])
@pytest.mark.parametrize("log_sd", [np.random.rand(5)])
def test_log_linear(log_mean, log_sd):
linear_mean, linear_sd = utils.log_to_linear(log_mean, log_sd)
my_log_mean, my_log_sd = utils.linear_to_log(linear_mean, linear_sd)
assert np.allclose(log_mean, my_log_mean)
assert np.allclose(log_sd, my_log_sd)
@pytest.mark.parametrize("logit_mean", [np.random.randn(5)])
@pytest.mark.parametrize("logit_sd", [np.random.rand(5)])
def test_logit_linear(logit_mean, logit_sd):
linear_mean, linear_sd = utils.logit_to_linear(logit_mean, logit_sd)
my_logit_mean, my_logit_sd = utils.linear_to_logit(linear_mean, linear_sd)
assert np.allclose(logit_mean, my_logit_mean)
assert
|
np.allclose(logit_sd, my_logit_sd)
|
numpy.allclose
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.