prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import cv2
import os
import sys
import pcl
from math import copysign, log10
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import ConvexHull
import random
import math
import itertools
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KDTree
SYMMETRY_MEASURE_CLOUD_NORMALS_TRADEOFF= 0.2 # scaling factor for difference in normals wrt. difference in position for points,
# when computing difference between two point clouds.
# 0 => Only look at difference between point and its closest point
# Higher value => matching normals are more important than point distances
SMOOTHNESS_MEASURE_NUMBINS = 8 # Number of bins in histogram. We found 8 to work best quite consistently.
NNRADIUS = 0.004 # Used in Local Convexity and Smoothness measure for local neighborhood finding
dir_name=os.path.dirname(__file__)
image_path = os.path.join(dir_name, "object")
def get_image(filename):
path=os.path.join(image_path,filename)
if "depthcrop" in filename or 'maskcrop' in filename:
im = cv2.imread(path,0)
else:
im = cv2.imread(path)
return im
def apply_mask(mask,image):
i=image.copy()
i[mask == 0]=0
return i
def depth_to_meter(depth):
depth=depth.astype(float)
try:
return 1/((depth * 4 * -0.0030711016) + 3.3309495161)
except:
return 0.0
# just return mean of distances from points in cloud1 to their nearest neighbors in cloud2
def cloudAlignmentScoreDense(cloud1, cloud2):
tree = KDTree(cloud2)
N=cloud1.shape[0]
accum=0.0
result = tree.query(cloud1, k=1)
for i,(dist, ind) in enumerate(zip(*result)):
accum += dist[0]
return accum/N
def cloudAlignmentScoreDenseWithNormalsNormalized(cloud1, normals1, cloud2, normals2, relweight, dnormalize):
tree = KDTree(cloud2)
N=cloud1.shape[0]
accum=0.0
result = tree.query(cloud1, k=1)
for i,(dist, ind) in enumerate(zip(*result)):
accum += dist[0] / dnormalize
dot = np.dot(normals1[i],normals2[ind[0]])
accum += relweight*(1.0 - dot)
return accum/N
def calculate_compactness_3d(points):
max_length = np.max(points,axis=0)[0]
min_length = np.min(points,axis=0)[0]
return points.shape[0] / (max(max_length-min_length, 0.0000001)**2)
def calculate_symmetry_3d(points_np, normals, relweight=SYMMETRY_MEASURE_CLOUD_NORMALS_TRADEOFF):
mins=points_np.min(axis=0)
maxes=points_np.max(axis=0)
ranges = maxes - mins
ranges /= ranges.sum()
score=0.0
for i,vector in enumerate(np.array([[-1,1,1],[1,-1,1],[1,1,-1]])):
dest=points_np*vector
normdest=normals*vector
overlap = cloudAlignmentScoreDenseWithNormalsNormalized(points_np, normals, dest, normdest, relweight, ranges[i])\
+cloudAlignmentScoreDenseWithNormalsNormalized(dest, normdest, points_np, normals, relweight, ranges[i])
score += ranges[i]*overlap
return -score
def calculate_global_convexity_3d(points):
hull=ConvexHull(points)
overlap= cloudAlignmentScoreDense(points, hull.points[hull.vertices])
return -overlap
def calculate_local_convexity_and_smoothness_3d(points, normals, NNradius=NNRADIUS, NUMBINS=SMOOTHNESS_MEASURE_NUMBINS):
tree = KDTree(points)
N=points.shape[0]
score=0.0
Hs=0.0
bins=np.ones(NUMBINS)
neighbors = tree.query_radius(points, NNradius)
for i,(p1,n1,neighbors_current) in enumerate(zip(points,normals,neighbors)):
binsum = NUMBINS
n2=(np.random.rand(3)+1)/2
n2=n2-np.dot(n1,n2)*n1
d = np.linalg.norm(n2)
n2 /= d
n3 = np.cross(n1,n2)
dot=0.0
nc=0
for j in neighbors_current:
if j==i:
continue
v = p1-points[j]
d = np.linalg.norm(v)
v/=d
dot = np.dot(n1,v)
if dot > 0.0:
nc += 1
dot1 = np.dot(n2,v)/d
dot2 = np.dot(n3,v)/d
theta = ((np.arctan2(dot1, dot2)+np.pi)/2)/np.pi # angle in range 0->1
binid = int((theta-0.001)*NUMBINS)
bins[binid] += 1
binsum+=1
score += (1.0*nc)/len(neighbors_current)
bins/=binsum
H=-(bins*np.log(bins)).sum()
if not np.isnan(H):
Hs += H
return score/N,Hs/N
def calculate_local_convexity_3d(points, normals, NNradius=NNRADIUS):
tree = KDTree(points)
N=points.shape[0]
score=0.0
neighbors = tree.query_radius(points, NNradius)
for i,(p,normal,neighbors_current) in enumerate(zip(points,normals,neighbors)):
dot=0.0
nc=0
for j in neighbors_current:
if j==i:
continue
v = p-points[j]
d = np.linalg.norm(v)
v/=d
dot = np.dot(normal,v)
if dot > 0.0:
nc += 1
score += (1.0*nc)/len(neighbors_current)
return score/N
def calculate_smoothness_3d(points, normals, NNradius=NNRADIUS, NUMBINS=SMOOTHNESS_MEASURE_NUMBINS):
Hs=0.0
tree = KDTree(points)
N=points.shape[0]
bins=np.ones(NUMBINS)
neighbors = tree.query_radius(points, NNradius)
for i, (p1,n1,neighbors_current) in enumerate(zip(points,normals,neighbors)):
#print("{:.2f}%".format(i*100/len(points)))
binsum = NUMBINS
n2=(np.random.rand(3)+1)/2
dot=np.dot(n1,n2)
n2=n2-dot*n1
d = np.linalg.norm(n2)
n2 /= d
n3 = np.cross(n1,n2)
for j in neighbors_current:
if j==i:
continue
p2=points[j]
v = p1-p2
d = np.linalg.norm(v)
v/=d
dot1 = np.dot(n2,v)/d
dot2 = np.dot(n3,v)/d
theta = ((np.arctan2(dot1, dot2)+np.pi)/2)/np.pi # angle in range 0->1
binid = int((theta-0.001)*NUMBINS)
bins[binid] += 1
binsum+=1
bins/=binsum
H=-(bins*np.log(bins)).sum()
if not np.isnan(H):
Hs += H
return Hs/N # high entropy = good.
def pad_image(depth,result_shape=(480,640)):
top=int((result_shape[0]-depth.shape[0])/2)
bottom=result_shape[0]-top-depth.shape[0]
left=int((result_shape[1]-depth.shape[1])/2)
right=result_shape[1]-left-depth.shape[1]
return np.pad(depth, ((top, bottom), (left, right)), 'constant')
def cvtDepthColor2Cloud(depth):
cameraMatrix = np.array(
[[525., 0., 320.0],
[0., 525., 240.0],
[0., 0., 1.]])
inv_fx = 1.0 / cameraMatrix[0, 0]
inv_fy = 1.0 / cameraMatrix[1, 1]
ox = cameraMatrix[0, 2]
oy = cameraMatrix[1, 2]
rows, cols = depth.shape
cloud = np.zeros((depth.size, 3), dtype=np.float32)
for y in range(rows):
for x in range(cols):
x1 = float(x)
y1 = float(y)
dist = depth[y][x]
cloud[y * cols + x][0] = np.float32((x1 - ox) * dist * inv_fx)
cloud[y * cols + x][1] = np.float32((y1 - oy) * dist * inv_fy)
cloud[y * cols + x][2] = np.float32(dist)
return pcl.PointCloud().from_array(cloud)
def depth_to_cloud(depth_original):
depth=depth_to_meter(depth_original)
depth=pad_image(depth)
depth_original=pad_image(depth_original)
cameraMatrix = np.array(
[[525., 0., 320.0],
[0., 525., 240.0],
[0., 0., 1.]])
inv_fx = 1.0 / cameraMatrix[0, 0]
inv_fy = 1.0 / cameraMatrix[1, 1]
ox = cameraMatrix[0, 2]
oy = cameraMatrix[1, 2]
xyz_offset=[0,0,depth.min()]
array = []
xy=np.argwhere((depth_original<255) & (depth_original>0))
xy=xy.astype(float)
z=depth[np.where((depth_original<255) & (depth_original>0))]
z=z.astype(float)
a=((xy[:,0]-ox)*z*inv_fx)
b=((xy[:,1]-oy)*z*inv_fy)
xy[:,0]=b
xy[:,1]=a
xyz=np.insert(xy, 2, values=z, axis=1)
xyz = | np.float32(xyz) | numpy.float32 |
# -*- coding: utf-8 -*-
# Tests for module mosaic.immutable_model
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import unittest
import numpy as N
import immutable.np as IN
import mosaic.immutable_model as M
from mosaic.api import is_valid
def make_water_fragment(nsites=1):
return M.fragment("water", (),
(("H1", M.atom(M.element("H"), nsites)),
("H2", M.atom(M.element("H"), nsites)),
("O", M.atom(M.element("O"), nsites))),
(("H1", "O", "single"), ("H2", "O", "single")))
class AtomDescriptorTest(unittest.TestCase):
def test_singleton(self):
self.assertTrue(M.dummy() is M.dummy())
self.assertTrue(M.dummy('a') is M.dummy('a'))
self.assertTrue(M.dummy('a') is not M.dummy('b'))
self.assertTrue(M.dummy('a') is not M.unknown('a'))
self.assertTrue(M.dummy('C') is not M.element('C'))
self.assertTrue(M.element('C') is M.element('C'))
def test_name(self):
for name in ['a', 'b', 'c']:
self.assertEqual(M.unknown(name).name, name)
def test_type(self):
self.assertEqual(M.dummy().type, "dummy")
self.assertEqual(M.unknown().type, "")
self.assertEqual(M.element('O').type, "element")
self.assertEqual(M.cgparticle('ala').type, "cgparticle")
class WaterTest(unittest.TestCase):
def setUp(self):
self.mol = make_water_fragment()
def test_basics(self):
self.assertEqual(self.mol.number_of_atoms, 3)
self.assertEqual(self.mol.number_of_sites, 3)
self.assertEqual(self.mol.number_of_bonds, 2)
self.assertEqual(self.mol.species, "water")
def test_equality(self):
same_mol = make_water_fragment()
changed_bond_order = M.fragment("water", (),
(("H1", M.atom(M.element("H"))),
("H2", M.atom(M.element("H"))),
("O", M.atom(M.element("O")))),
(("O", "H2", "single"),
("O", "H1", "single")))
changed_atom_order = M.fragment("water", (),
(("O", M.atom(M.element("O"))),
("H1", M.atom(M.element("H"))),
("H2", M.atom(M.element("H")))),
(("O", "H1", "single"),
("O", "H2", "single")))
self.assertEqual(self.mol, self.mol)
self.assertEqual(self.mol, same_mol)
self.assertEqual(self.mol, changed_bond_order)
self.assertNotEqual(self.mol, changed_atom_order)
class PeptideTest(unittest.TestCase):
def _make_molecule(self):
C = M.element('C')
H = M.element('H')
N = M.element('N')
O = M.element('O')
peptide_group = M.fragment('peptide',
(),
(('CA', M.atom(C)),
('HA', M.atom(H)),
('H', M.atom(H)),
('N', M.atom(N)),
('C', M.atom(C)),
('O', M.atom(O))),
(('N', 'H', "single"),
('N', 'CA', "single"),
('CA', 'HA', "single"),
('CA', 'C', "single"),
('C', 'O', "double")))
ala_sidechain = M.fragment('ala_sidechain',
(),
(('CB', M.atom(C)),
('HB1', M.atom(H)),
('HB2', M.atom(H)),
('HB3', M.atom(H))),
(('CB', 'HB1', "single"),
('CB', 'HB2', "single"),
('CB', 'HB3', "single"),))
ala = M.fragment('alanine',
(('peptide', peptide_group),
('sidechain', ala_sidechain)),
(),
(('peptide.CA', 'sidechain.CB', "single"),))
return M.polymer('alanine_dipeptide',
(('ALA1', ala),
('ALA2', ala)),
(('ALA1.peptide.C', 'ALA2.peptide.N', "single"),),
'polypeptide')
def test_basic(self):
mol = self._make_molecule()
self.assertEqual(mol.number_of_atoms, 20)
self.assertEqual(mol.number_of_sites, 20)
self.assertEqual(mol.number_of_bonds, 19)
self.assertEqual(mol.polymer_type, "polypeptide")
def test_equality(self):
self.assertEqual(self._make_molecule(),
self._make_molecule())
def test_iterators(self):
mol = self._make_molecule()
mol_ref = M.FragmentRef('x', mol)
atoms = tuple(mol_ref.recursive_atom_iterator())
self.assertEqual(len(atoms), mol.number_of_atoms)
bonds = tuple(mol_ref.recursive_bond_iterator())
self.assertEqual(len(bonds), mol.number_of_bonds)
for a1, a2, order in bonds:
for a in a1, a2:
node = mol
for p in a.split('.'):
node = node[p]
self.assertTrue(isinstance(node, M.Atom))
paths = tuple(mol_ref.recursive_atom_path_iterator())
self.assertEqual(len(paths), mol.number_of_atoms)
for ap in paths:
node = mol
for p in ap.split('.'):
node = node[p]
self.assertTrue(isinstance(node, M.Atom))
class ErrorCheckingTest(unittest.TestCase):
def test_atom_descriptor(self):
self.assertRaises(TypeError, lambda: M.dummy(42))
self.assertRaises(ValueError, lambda: M.element(42))
self.assertRaises(ValueError, lambda: M.element("X"))
def test_atom(self):
carbon = M.element("C")
self.assertRaises(TypeError, lambda: M.atom('C', 1))
self.assertRaises(ValueError, lambda: M.atom(carbon, 0))
def test_fragment(self):
carbon = M.atom(M.element("C"))
# Illegal fragments
self.assertRaises(TypeError,
lambda: M.fragment('m', None, (("C", carbon),), ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', [1, 2], (("C", carbon),), ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (("C", carbon),), (), ()))
# Illegal atoms
self.assertRaises(TypeError,
lambda: M.fragment('m', (), None, ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), [1, 2], ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (carbon,), ()))
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(("C", carbon),
("C", carbon)),
()))
# Illegal bond lists
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),), None))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
[1, 2, 3]))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
(('X', 'X'))))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
(['X', 'X', 'single'])))
def test_bonds(self):
carbon = M.atom(M.element("C"))
# Bond specified by only one atom
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', ),)))
# Bond specified by two atoms but no bond order
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C2'),)))
# Bond specified by two identical atoms
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C1', ''),)))
# Bond specified by an atom name that is undefined
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C3', ''),)))
# Bond specified at the wrong fragment level
f = M.fragment('x', (), (('C1', carbon), ('C2', carbon)), ())
self.assertRaises(ValueError,
lambda: M.fragment('m', (('x', f),),
(('C3', carbon),),
(('x.C1', 'x.C2', ''),)))
def test_universe(self):
mol = M.fragment("water", (),
(("H1", M.atom(M.element("H"), 8)),
("H2", M.atom(M.element("H"), 8)),
("O", M.atom(M.element("O"), 2))),
(("H1", "O", "single"), ("H2", "O", "single")))
self.assertRaises(TypeError,
lambda: M.universe(0, [(mol, 'water', 10)]))
self.assertRaises(ValueError,
lambda: M.universe('strange', [(mol, 'water', 10)]))
self.assertRaises(ValueError,
lambda: M.universe('strange', [(mol, 10)]))
self.assertRaises(TypeError,
lambda: M.universe('infinite', mol))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [("water", 10)]))
self.assertRaises(TypeError,
lambda: M.universe('infinite', [mol]))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [(10, mol)]))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [(mol, 'water', 10)],
[(IN.zeros((3,3), N.float64),
IN.zeros((3,), N.float64))]))
def test_configuration(self):
mol = make_water_fragment()
universe = M.universe('cube', [(mol, 'water', 10)])
# Missing data
self.assertRaises(TypeError,
lambda: M.Configuration(universe))
# Positions but no cell parameters
self.assertRaises(TypeError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32)))
# Positions and cell parameters of different dtype
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32),
N.float64(10.)))
# Positions not an array
self.assertRaises(TypeError,
lambda: M.Configuration(universe,
list(IN.zeros((30, 3),
N.float32)),
N.float32(10.)))
# Positions of wrong shape
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((25, 3), N.float32),
N.float32(10.)))
# Cell parameters of wrong shape
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32),
IN.zeros((3,), N.float32)))
def test_selection(self):
mol = make_water_fragment(nsites=2)
universe = M.universe('cube', [(mol, 'water', 5)])
# Index occurs twice
self.assertRaises(ValueError,
lambda: M.AtomSelection(universe,
IN.zeros((2,), N.uint16)))
# Atom index too large
self.assertRaises(ValueError,
lambda: M.AtomSelection(universe,
IN.array([20], N.uint16)))
# Template atom index too large
self.assertRaises(ValueError,
lambda: M.TemplateAtomSelection(universe,
IN.array([3], N.uint8)))
# Site index too large
self.assertRaises(ValueError,
lambda: M.SiteSelection(universe,
IN.array([40], N.uint16)))
# Template site index too large
self.assertRaises(ValueError,
lambda: M.TemplateSiteSelection(universe,
IN.array([8], N.uint8)))
class UniverseTest(unittest.TestCase):
def setUp(self):
mol = make_water_fragment(2)
self.universe = M.universe('infinite', [(mol, 'water', 10)],
convention='my_own')
def test_basics(self):
self.assertTrue(is_valid(self.universe))
self.assertEqual(self.universe.number_of_molecules, 10)
self.assertEqual(self.universe.number_of_atoms, 30)
self.assertEqual(self.universe.number_of_sites, 60)
self.assertEqual(self.universe.number_of_bonds, 20)
self.assertEqual(self.universe.cell_shape, "infinite")
self.assertEqual(self.universe.convention, "my_own")
def test_properties(self):
masses = M.TemplateAtomProperty(self.universe,
"masses", "amu",
IN.array([1., 1., 16.], N.float32))
self.assertTrue(is_valid(masses))
self.assertEqual(masses.type, 'template_atom')
self.assertTrue(masses.universe == self.universe)
self.assertEqual(masses.element_shape, ())
self.assertEqual(masses.data.shape, (3,))
bead_masses = M.TemplateSiteProperty(self.universe,
"mass", "amu",
IN.array([1., 1.,
1., 1.,
8., 8.], N.float32))
self.assertTrue(is_valid(bead_masses))
self.assertEqual(bead_masses.type, 'template_site')
self.assertTrue(bead_masses.universe is self.universe)
self.assertEqual(bead_masses.element_shape, ())
self.assertEqual(bead_masses.data.shape, (6,))
velocities = M.SiteProperty(self.universe,
"velocity", "nm ps-1",
IN.zeros((60, 3), dtype=N.float64))
self.assertTrue(is_valid(velocities))
self.assertEqual(velocities.type, 'site')
self.assertTrue(velocities.universe is self.universe)
self.assertEqual(velocities.data.shape, (60, 3))
self.assertEqual(velocities.element_shape, (3,))
foo = M.AtomProperty(self.universe,
"foo", "",
IN.zeros((30, 2, 2), dtype=N.int16))
self.assertTrue(is_valid(foo))
self.assertEqual(foo.type, 'atom')
self.assertTrue(foo.universe is self.universe)
self.assertEqual(foo.data.shape, (30, 2, 2))
self.assertEqual(foo.element_shape, (2, 2))
def test_labels(self):
labels = tuple(a.name
for f, n in self.universe.molecules
for a in f.recursive_atom_iterator())
el = M.TemplateAtomLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_template_atoms)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for _ in range(n)
for a in f.recursive_atom_iterator())
el = M.AtomLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_atoms)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for a in f.recursive_atom_iterator()
for _ in range(a.number_of_sites))
el = M.TemplateSiteLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_template_sites)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for _ in range(n)
for a in f.recursive_atom_iterator()
for __ in range(a.number_of_sites))
el = M.SiteLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_sites)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
def test_bonds(self):
bonds = self.universe.bond_index_array()
self.assertEqual(len(bonds), self.universe.number_of_bonds)
self.assertTrue((bonds >= 0).all())
self.assertTrue((bonds < self.universe.number_of_atoms).all())
for i in range(10):
self.assertEqual(bonds[2*i, 0], 3*i)
self.assertEqual(bonds[2*i, 1], 3*i+2)
self.assertEqual(bonds[2*i+1, 0], 3*i+1)
self.assertEqual(bonds[2*i+1, 1], 3*i+2)
def test_index_mappings(self):
mol = self.universe.molecules[0][0]
s2a = mol.site_to_atom_index_mapping()
self.assertTrue((s2a == | N.array([0, 0, 1, 1, 2, 2]) | numpy.array |
#!/usr/bin/env python
""" This file is a Python translation of the MATLAB file acm.m
Python version by RDL 29 Mar 2012
Copyright notice from acm.m:
copyright 1996, by <NAME>. For use with the book
"Statistical Digital Signal Processing and Modeling"
(John Wiley & Sons, 1996).
"""
from __future__ import print_function,division
import numpy as np
from convm import convm
def acm(x,p):
""" Find an all-pole model using the autocorrelation method
Usage: a,err = acm(x,p)
The input sequence x is modeled as the unit sample response of
a filter having a system function of the form
H(z) = b(0)/A(z)
where the coefficients of A(z) are contained in the vector
a=[1, a(1), ... a(p)]
The input p defines the number of poles in the model.
The modeling error is returned in err.
The numerator b(0) is typically set equal to the square
root of err.
"""
x = x.flatten()
N = len(x)
if p > N:
print('ERROR: model order too large')
else:
X = convm(x, p+1)
Xq = X[0:N+p-1,0:p]
xq1 = -X[1:N+p, 0]
a = | np.linalg.lstsq(Xq, xq1) | numpy.linalg.lstsq |
# coding: utf-8
# In[ ]:
import numpy as np
'''Handles analytically deconvoving two Gaussians and finding of a common beam.'''
def quadratic2elliptic(A,B,C,D=0,E=0,F=-np.log(2)):
"""Invert:
(A0 cos^2 phi + c0 sin^2 phi)k = A
(A0-C0)sin 2phi k = B
(a0 sin^2 phi + c0 cos^2 phi) k = C
returns bmaj,bmin,bpa[,xc,y if D,E != 0]"""
if (np.isinf(A) and np.isinf(B) and np.isinf(C)):#delta function
return 0., 0., 0.
assert (B**2 - 4*A*C) != 0, "It is parabolic, not elliptic or hyperbolic"
if A!=C:#not a circle so should be able to solve second equation
#A-C = A0 cos^2 phi + c0 sin^2 phi - a0 sin^2 phi - c0 cos^2 phi
#A-C = A0 (cos^2 phi - sin^2 phi)- c0 (-sin^2 phi + c0 cos^2 phi) = (A0 - C0) cos 2phi
#(cos^2 phi - sin^2 phi) = cos 2 phi
phi = np.arctan2(B,(A-C))/2.#choose your own modulus
else:#circle
phi = np.pi/4.#arbitrary, nonzero cos and sin for the rest
phi = 0.
#rotate x,y to phi to x',y' = xcos - ysin, xsin + ycos
#then expand A(x' - xc)^2 + B(x'-xc)(y'-yc) + C(y' - yc)^2 + D(x' - xc) + E(y' - yc) + F = 0
#then now the coord sys is unrotated relative to the quadratic paramters
c = np.cos(phi)
s = np.sin(phi)
c2 = c*c
s2 = s*s
A1 = A*c2 + B*c*s + C*s2
B1 = 2.*(C-A)*s*c+B*(c2-s2)#should be zero since there's no cross term in the unroated
#print "Rotated cross term: {0} =? 0".format(B1)
C1 = A*s2 - B*c*s + C*c2
D1 = D*c + E*s
E1 = -D*s + E*c
assert (A1 != 0) and (C1 != 0), "degenerate between ellipse and hyperbola"
#complete square for x's and y's
#A1(x-xc)^2 + C1(y-yc)^2 + F = A1xx - 2A1xxc + A1xcxc + C1yy - 2C1yyc + C1ycyc = A1() + D1() + C1() + E1() + A1xcxc + C1ycyc + F =0
xc1 = D1/(-2.*A1)
yc1 = E1/(-2.*C1)#solutions (in rotated frame)
#now unrotate them
xc = xc1*c - yc1*s#might be xc1*c + yc1*s
yc = xc1*s + yc1*c#might be -xc1*s + yc1*c
#bring the remainder of completed squares to rhs with F
rhs = -F + D1**2/(4.*A1) + E1**2/(4.*C1)
#(x-xc)^2/(bmaj/2)^2 + (y-yc)^2/(bmin/2)^2 = 1
#A1(x-xc)^2 + C1*y-yc)^2 = rhs
A0 = A1/rhs
C0 = C1/rhs
bmaj = np.sign(A0)*2.*np.sqrt(1./np.abs(A0))
bmin = np.sign(C0)*2.*np.sqrt(1./np.abs(C0))
assert bmaj*bmin > 0, "Hyperbolic solution ;) inversion success but not physical."
#return None,None,None
if bmin > bmaj:
temp = bmin
bmin = bmaj
bmaj = temp
bpa = phi# - np.pi/2.#starts at y
if E==0 and D==0:
return bmaj,bmin,bpa*180./np.pi
return bmaj,bmin,bpa*180./np.pi,xc,yc
def elliptic2quadratic(bmaj,bmin,pa,xc=0,yc=0,k=np.log(2)):
'''a*x**2 + b*x*y + c*y**2 + d*x + e*y + f = 0
pa in deg
return A,B,C[,D,E,F if xc,yc!=0]'''
#unrotated solution
a0 = k/(bmaj/2.)**2
c0 = k/(bmin/2.)**2
theta = (pa + 90.)*np.pi/180.
#Rotated Solution
cos2 = np.cos(theta)**2
sin2 = np.sin(theta)**2
A = (a0*cos2 + c0*sin2)
C = (c0*cos2 + a0*sin2)
B = (a0 - c0 )*np.sin(2.*theta)
#Now move center
D = -2.*A*xc - B*yc
E = -2.*C*yc - B*xc
F = A*xc**2 + B*xc*yc + C*yc**2 - 1./k
if xc==0 and yc==0:
return A,B,C
return A,B,C,D,E,F
def deconvolve(A1,B1,C1,A2,B2,C2):
'''Solves analytically G(A1,B1,C1) = convolution(G(A2,B2,C2), G(Ak,Bk,Ck))
Returns Ak,Bk,Ck
A,B,C are quadratic parametrization.
If you have bmaj,bmin,bpa, then get A,B,C = ecliptic2quadratic(0,0,bmaj,bmin,bpa)
Returns (np.inf,np.inf,np.inf) if solution is delta function'''
D = B1**2 - 2*B1*B2 + B2**2 - 4*A1*C1 + 4* A2* C1 + 4* A1* C2 - 4* A2* C2
if (np.abs(D) < 10*(1-2./3.-1./3.)):
return np.inf,np.inf,np.inf#delta function
if (D<0.):
#print "Inverse Gaussian, discriminant D:",D
#ie. hyperbolic solution, still valid but elliptic representation is impossible instead you get hyperbolic parameters: negative bmaj/bmin
pass
Ak = (-A2* B1**2 + A1* B2**2 + 4* A1* A2* C1 - 4* A1* A2* C2)/D
Bk = (-B1**2 *B2 + B1* B2**2 + 4* A1* B2* C1 - 4* A2* B1* C2)/D
Ck = (B2**2 *C1 - B1**2 *C2 + 4* A1* C1* C2 - 4* A2* C1* C2)/D
assert (Bk*Bk - 4*Ak*Ck) != 0, "Indifinite deconvolution det = 0"
return Ak,Bk,Ck
def convolve(A1,B1,C1,A2,B2,C2):
'''
Convolves two gaussians with quadratic parametrization:
A,B,C are quadratic parametrization.
If you have bmaj,bmin,bpa, then get A,B,C = elliptic2quadratic(0,0,bmaj,bmin,bpa)
Where g = factor*Exp(-A*X**2 - B*X*Y - C*Y**2)
'''
D1 = 4.*A1*C1 - B1**2
D2 = 4.*A2*C2 - B2**2
D3 = -2.*B1 * B2 + 4.*A2*C1 + 4.*A1*C2 + D1+D2
D4 = C2*D1+C1*D2
#Non-solvable cases
if (D1*D2*D3*D4 == 0):
print ("Can't convolve...")
return (None,None,None)
if (D3 < 0):#always imaginary
print ("D3 < 0, Imaginary solution",D3)
return (None,None,None)
factor = 2.*np.pi*np.sqrt(D1 + 0j)*np.sqrt(D2 + 0j)/np.sqrt(D3/D4 + 0j)/np.sqrt(D4/(D1*D2) + 0j)
if np.abs(np.imag(factor)) > 10.*(7./3 - 4./3 - 1.):
print ("Imaginary result somehow...")
return (None,None,None)
factor = np.real(factor)
A = (A2*D1 + A1 * D2)/D3
B = (B2*D1+B1*D2)/D3
C = D4/D3
k = np.log(factor*2.)
return A,B,C#,factor
def findCommonBeam(beams, debugplots=False,confidence=0.005):
'''Given a list `beams` where each element of beams is a list of elliptic beam parameters (bmaj_i,bmin_i, bpa_i)
with bpa in degrees
return the beam parameters of the common beam of minimal area.
Common beam means that all beams can be convolved to the common beam.
`confidence` parameter is basically how confident you want solution. So 0.01 is knowing solution to 1%.
Specifically it's how long to sample so that there are enough statistics to properly sample likelihood with required accuracy.
default is 0.005. Computation time scale inversely with it.'''
def beamArea(bmaj,bmin,bpa=None):
return bmaj*bmin*np.pi/4./np.log(2.)
def isCommonBeam(beamCandQuad,beamsQuad):
for beamQuad in beamsQuad:
try:
Ak,Bk,Ck = deconvolve(*beamCandQuad,*beamQuad)
bmaj,bmin,bpa = quadratic2elliptic(Ak,Bk,Ck)
except:
return False
return True
def samplePrior(beamLast, beamsQuad):
iter = 0
while True:
std = 1.5
beam = [beamLast[0]*np.exp(np.log(std)*np.random.uniform(low=-1,high=1.)),
beamLast[1]*np.exp(np.log(std)*np.random.uniform(low=-1,high=1.)),
beamLast[2] + np.random.uniform(low=-5,high=5)]
#beam[0] = np.abs(beam[0])
#beam[1] = np.abs(beam[1])
if beam[1] > beam[0]:
temp = beam[1]
beam[1] = beam[0]
beam[0] = temp
while beam[2] > 90.:
beam[2] -= 180.
while beam[2] < -90.:
beam[2] += 180.
A,B,C = elliptic2quadratic(*beam)
if isCommonBeam((A,B,C),beamsQuad):
return beam
iter += 1
def misfit(beam,areaLargest):
area = beamArea(*beam)
L2 = (area - areaLargest)**2/2.
return L2
#Get beam areas
N = len(beams)
areas = []
beamsQuad = []
i = 0
while i < N:
areas.append(beamArea(*beams[i]))
beamsQuad.append(elliptic2quadratic(*beams[i]))
i += 1
beam0 = beams[np.argmax(areas)]
areaLargest = np.max(areas)
beam0Quad = elliptic2quadratic(*beam0)
if isCommonBeam(beam0Quad,beamsQuad):
return beam0
else:
bmajMax = np.max(beams,axis=0)[0]
beam0 = [bmajMax,bmajMax,0.]
#MC search, 1/binning = confidence
binning = int(1./confidence)
Nmax = 1e6
beamsMH = np.zeros([binning*binning,3],dtype=np.double)
beamsMul = np.zeros(binning*binning,dtype=np.double)
beamsMH[0,:] = beam0
beamsMul[0] = 1
accepted = 1
Si = misfit(beam0,areaLargest)
Li = np.exp(-Si)
maxL = Li
maxLBeam = beam0
iter = 0
while accepted < binning**2 and iter < Nmax:
beam_j = samplePrior(beamsMH[accepted-1], beamsQuad)
Sj = misfit(beam_j,areaLargest)
Lj = np.exp(-Sj)
#print("Sj = {}".format(Sj))
if Sj < Si or np.log(np.random.uniform()) < Si - Sj:
Si = Sj
beamsMH[accepted,:] = beam_j
beamsMul[accepted] += 1
#print("Accepted")
accepted += 1
else:
beamsMul[accepted-1] += 1
if Lj > maxL:
maxL = Lj
maxLBeam = beam_j
iter += 1
if accepted == binning**2:
pass
#print("Converged in {} steps with an acceptance rate of {}".format(iter,float(accepted)/iter))
else:
beamsMH = beamsMH[:iter,:]
beamsMul = beamsMul[:iter]
if debugplots:
import pylab as plt
# plt.hist(beamsMH[:,0],bins=binning)
# plt.show()
# plt.hist(beamsMH[:,1],bins=binning)
# plt.show()
# plt.hist(beamsMH[:,2],bins=binning)
# plt.show()
from matplotlib.patches import Ellipse
ax = plt.subplot(1,1,1)
ax.add_artist(Ellipse(xy=(0,0), width=maxLBeam[0], height=maxLBeam[1], angle=maxLBeam[2], facecolor="none",edgecolor='red',alpha=1,label='common beam'))
for beam in beams:
ax.add_artist(Ellipse(xy=(0,0), width=beam[0], height=beam[1], angle=beam[2], facecolor="none",edgecolor='black',ls='--',alpha=1))
ax.set_xlim(-0.5,0.5)
ax.set_ylim(-0.5,0.5)
plt.legend(frameon=False)
plt.show()
# meanBeam = np.sum(beamsMH.T*beamsMul,axis=1)/np.sum(beamsMul)
# stdBeam = np.sqrt(np.sum(beamsMH.T**2*beamsMul,axis=1)/np.sum(beamsMul) - meanBeam**2)
# print ("(Gaussian) beam is {} +- {}".format(meanBeam,stdBeam))
# logmeanBmajBmin = np.sum(np.log(beamsMH[:,:2]).T*beamsMul,axis=1)/np.sum(beamsMul)
# logstdBmajBmin = np.sqrt(np.sum(np.log(beamsMH[:,:2]).T**2*beamsMul,axis=1)/np.sum(beamsMul) - logmeanBmajBmin**2)
# logstdBmajBminu = np.exp(logmeanBmajBmin + logstdBmajBmin) - np.exp(logmeanBmajBmin)
# logstdBmajBminl = np.exp(logmeanBmajBmin) - np.exp(logmeanBmajBmin - logstdBmajBmin)
# logmeanBmajBmin = np.exp(logmeanBmajBmin)
# print("(Lognormal) bmaj/bmin is {} + {} - {}".format(logmeanBmajBmin,logstdBmajBminu,logstdBmajBminl))
# print ("Max Likelihood beam is {}".format(maxLBeam))
return maxLBeam
def fftGaussian(A,B,C,X,Y):
D = 4*A*C-B**2
return 2*np.pi/np.sqrt(D)*np.exp(-4*np.pi/D*(-C*X**2 +B*X*Y -A*Y**2))
def gaussian(A,B,C,X,Y):
return np.exp(-A*X**2 - B*X*Y - C*Y**2)
def psfTGSS1(dec):
'''input declination in degrees
return bmaj(arcsec), bmin(arcsec), bpa(degrees)'''
if dec > 19.0836824:
return 25.,25.,0.
else:
return 25.,25./np.cos(np.pi*(dec-19.0836824)/180.),0.
def test_elliptic2quadratic():
for i in range(100):
bpa = np.random.uniform()*180.-90.#deg
bmaj = np.random.uniform()
bmin = np.random.uniform()*bmaj
A,B,C = elliptic2quadratic(bmaj,bmin,bpa)
bmaj2,bmin2,bpa2 = quadratic2elliptic(A,B,C)
assert np.isclose(bmaj,bmaj2) and np.isclose(bmin,bmin2) and np.isclose(bpa,bpa2), "Failed to pass {},{},{} != {},{},{}".format(bmaj,bmin,bpa,bmaj2,bmin2,bpa2)
return True
def test_convolvedeconvolve(N=100):
for i in range(N):
bpa = np.random.uniform()*180.-90.#deg
bmaj = np.random.uniform()
bmin = np.random.uniform()*bmaj
A1,B1,C1 = elliptic2quadratic(bmaj,bmin,bpa)
bpa2 = np.random.uniform()*180.-90.#deg
bmaj2 = np.random.uniform()
bmin2 = np.random.uniform()*bmaj2
A2,B2,C2 = elliptic2quadratic(bmaj2,bmin2,bpa2)
Ac,Bc,Cc = convolve(A1,B1,C1,A2,B2,C2)
Ak,Bk,Ck = deconvolve(Ac,Bc,Cc,A1,B1,C1)
bmaj2_,bmin2_,bpa2_ = quadratic2elliptic(Ak,Bk,Ck)
assert np.isclose(bmaj2_,bmaj2) and np.isclose(bmin2_,bmin2) and np.isclose(bpa2_,bpa2), "Failed to pass {},{},{} != {},{},{}".format(bmaj2_,bmin2_,bpa2_,bmaj2,bmin2,bpa2)
return True
def test_deltaFunctionDeconvolve():
bpa = np.random.uniform()*180.-90.#deg
bmaj = np.random.uniform()
bmin = np.random.uniform()*bmaj
A1,B1,C1 = elliptic2quadratic(bmaj,bmin,bpa)
#deconv same beam
Ak,Bk,Ck = deconvolve(A1,B1,C1,A1,B1,C1)
bmaj_d, bmin_d, bpa_d = quadratic2elliptic(Ak,Bk,Ck)
assert bmaj_d==0 and bmin_d==0 and bpa_d==0,"Supposed to be the delta"
return True
def test_timing():
from time import clock
i = 0
t1 = clock()
for i in range(10000):
bpa = np.random.uniform()*180.-90.#deg
bmaj = np.random.uniform()
bmin = np.random.uniform()*bmaj
A1,B1,C1 = elliptic2quadratic(bmaj,bmin,bpa)
bpa2 = np.random.uniform()*180.-90.#deg
bmaj2 = np.random.uniform()
bmin2 = np.random.uniform()*bmaj2
A2,B2,C2 = elliptic2quadratic(bmaj2,bmin2,bpa2)
Ac,Bc,Cc = convolve(A1,B1,C1,A2,B2,C2)
Ak,Bk,Ck = deconvolve(Ac,Bc,Cc,A1,B1,C1)
bmaj2_,bmin2_,bpa2_ = quadratic2elliptic(Ak,Bk,Ck)
print("Time avg. ~ {} seconds".format((clock()-t1)/10000))
def test_findCommonBeam():
np.random.seed(1234)
for i in range(10):
beams = []
for i in range(3):
bpa = | np.random.uniform() | numpy.random.uniform |
# -*- coding: utf-8 -*-
"""
scripts to test the repair strategy
"""
__version__ = '1.0'
__author__ = '<NAME>'
import numpy as np
import pandas as pd
import time
import sys
sys.path.append(r'C:\RELAY')
from src.constraints import Constraints
from src.materials import Material
from src.parameters import Parameters
from src.ABD import A_from_lampam, B_from_lampam, D_from_lampam, filter_ABD
from src.excel import autofit_column_widths
from src.excel import delete_file
from src.save_set_up import save_constraints_LAYLA
from src.save_set_up import save_materials
from src.repair_diso_contig import repair_diso_contig
from src.repair_flexural import repair_flexural
from src.lampam_functions import calc_lampam
from src.repair_10_bal import repair_10_bal
from src.repair_10_bal import calc_mini_10
from src.repair_10_bal import is_equal
from src.repair_membrane import repair_membrane
from src.repair_membrane_1_no_ipo import calc_lampamA_ply_queue
from src.pretty_print import print_lampam, print_ss
#==============================================================================
# Input file
#==============================================================================
guidelines = 'none'
n_plies = 150
fibre_angles = 'trad'
fibre_angles = '3060'
fibre_angles = '15'
file_to_open = '/RELAY/pop/'\
+ fibre_angles + '-' + guidelines + '-' + str(n_plies) + 'plies.xlsx'
result_filename = 'repair-' + fibre_angles + '-' + guidelines \
+ '-' + str(n_plies) + 'plies.xlsx'
delete_file(result_filename)
#==============================================================================
# Material properties
#==============================================================================
data = pd.read_excel(
file_to_open, sheet_name='Materials', index_col=0, header=None)
data = data.transpose()
E11 = data.loc[1, 'E11']
E22 = data.loc[1, 'E22']
nu12 = data.loc[1, 'nu12']
G12 = data.loc[1, 'G12']
ply_t = data.loc[1, 'ply thickness']
mat = Material(E11=E11, E22=E22, G12=G12, nu12=nu12, ply_t=ply_t)
#print(data)
#==============================================================================
# Design & manufacturing constraints
#==============================================================================
data = pd.read_excel(
file_to_open, sheet_name='Constraints', index_col=0, header=None)
data = data.transpose()
#print(data)
sym = data.loc[1, 'symmetry']
bal = True
ipo = True
oopo = data.loc[1, 'out-of-plane orthotropy']
dam_tol = data.loc[1, 'damage tolerance']
rule_10_percent = True
percent_0 = 10
percent_90 = 10
percent_45 = 0
percent_135 = 0
percent_45_135 = 10
diso = True
delta_angle = 45
contig = True
n_contig = 4
set_of_angles = np.array(data.loc[1, 'fibre orientations'].split()).astype(int)
constraints = Constraints(
sym=sym,
bal=bal,
ipo=ipo,
oopo=oopo,
dam_tol=dam_tol,
rule_10_percent=rule_10_percent,
percent_0=percent_0,
percent_45=percent_45,
percent_90=percent_90,
percent_135=percent_135,
percent_45_135=percent_45_135,
diso=diso,
contig=contig,
n_contig=n_contig,
delta_angle=delta_angle,
set_of_angles=set_of_angles)
#==============================================================================
# Parameters
#==============================================================================
# lamination parameter weightings during membrane property refinement
in_plane_coeffs = np.array([1, 1, 0, 0])
# percentage of laminate thickness for plies that can be modified during
# the refinement of membrane properties
p_A = 80
# number of plies in the last permutation during repair for disorientation
# and/or contiguity
n_D1 = 6
# number of ply shifts tested at each step of the re-designing process during
# refinement of flexural properties
n_D2 = 10
# number of times the algorithms 1 and 2 are repeated during the flexural
# property refinement
n_D3 = 2
# lamination parameter weightings during flexural property refinement
out_of_plane_coeffs = np.array([1, 1, 1, 0])
table_param = pd.DataFrame()
table_param.loc[0, 'in_plane_coeffs'] \
= ' '.join(np.array(in_plane_coeffs, dtype=str))
table_param.loc[0, 'out_of_plane_coeffs'] \
= ' '.join(np.array(out_of_plane_coeffs, dtype=str))
table_param.loc[0, 'p_A'] = p_A
table_param.loc[0, 'n_D1'] = n_D1
table_param.loc[0, 'n_D2'] = n_D2
table_param.loc[0, 'n_D3'] = n_D3
table_param = table_param.transpose()
parameters = Parameters(
constraints=constraints,
p_A=p_A,
n_D1=n_D1,
n_D2=n_D2,
n_D3=n_D3,
repair_membrane_switch=True,
repair_flexural_switch=True)
#==============================================================================
# Tests
#==============================================================================
table_10_bal = pd.DataFrame()
table_membrane = pd.DataFrame()
table_diso_contig = pd.DataFrame()
table_flexural = pd.DataFrame()
data = pd.read_excel(file_to_open, sheet_name='stacks', index_col=0)
#print(data)
t_cummul_10_bal = 0
t_cummul_membrane = 0
t_cummul_diso_contig = 0
t_cummul_flexural = 0
table_10_bal.loc[0, 'average time repair-10-bal (s)'] = 0
table_membrane.loc[0, 'average time repair-membrane (s)'] = 0
table_diso_contig.loc[0, 'average time repair diso-contig (s)'] = 0
table_flexural.loc[0, 'average time repair flexural (s)'] = 0
table_diso_contig.loc[0, 'success rate inward repair diso contig'] = 0
table_diso_contig.loc[0, 'success rate overall repair diso contig'] = 0
n_success_inward_repair_diso_contig = 0
n_success_outward_repair_diso_contig = 0
for ind in range(0, 50):
print('ind', ind)
#==========================================================================
# Read inputs
#==========================================================================
n_plies = data.loc[ind, 'ply_count']
lampam_ini = np.empty((12,), float)
lampam_ini[0] = data.loc[ind, 'lampam[1]']
lampam_ini[1] = data.loc[ind, 'lampam[2]']
lampam_ini[2] = data.loc[ind, 'lampam[3]']
lampam_ini[3] = data.loc[ind, 'lampam[4]']
lampam_ini[4] = data.loc[ind, 'lampam[5]']
lampam_ini[5] = data.loc[ind, 'lampam[6]']
lampam_ini[6] = data.loc[ind, 'lampam[7]']
lampam_ini[7] = data.loc[ind, 'lampam[8]']
lampam_ini[8] = data.loc[ind, 'lampam[9]']
lampam_ini[9] = data.loc[ind, 'lampam[10]']
lampam_ini[10] = data.loc[ind, 'lampam[11]']
lampam_ini[11] = data.loc[ind, 'lampam[12]']
lampam_target = lampam_ini
ss_ini = np.array(data.loc[ind, 'ss'].split()).astype(int)
# print('ss_ini')
# print_ss(ss_ini, 200)
A11_ini = data.loc[ind, 'A11']
A22_ini = data.loc[ind, 'A22']
A12_ini = data.loc[ind, 'A12']
A66_ini = data.loc[ind, 'A66']
A16_ini = data.loc[ind, 'A16']
A26_ini = data.loc[ind, 'A26']
D11_ini = data.loc[ind, 'D11']
D22_ini = data.loc[ind, 'D22']
D12_ini = data.loc[ind, 'D12']
D66_ini = data.loc[ind, 'D66']
D16_ini = data.loc[ind, 'D16']
D26_ini = data.loc[ind, 'D26']
#==========================================================================
# Repair for balance and 10% rule
#==========================================================================
t = time.time()
mini_10 = calc_mini_10(constraints, ss_ini.size)
ss, ply_queue = repair_10_bal(ss_ini, mini_10, constraints)
# print('ss after repair balance/10')
# print_ss(ss)
# print(ply_queue)
elapsed_10_bal = time.time() - t
t_cummul_10_bal += elapsed_10_bal
lampamA = calc_lampamA_ply_queue(ss, n_plies, ply_queue, constraints)
table_10_bal.loc[ind, 'ply_count'] = n_plies
table_10_bal.loc[ind, 'time (s)'] = elapsed_10_bal
table_10_bal.loc[ind, 'no change in ss'] = is_equal(
ss, ply_queue, ss_ini, constraints.sym)
table_10_bal.loc[ind, 'f_A ini'] = sum(
in_plane_coeffs * ((lampam_ini[0:4] - lampam_target[0:4]) ** 2))
table_10_bal.loc[ind, 'f_A solution'] = sum(
in_plane_coeffs * ((lampamA - lampam_target[0:4]) ** 2))
table_10_bal.loc[ind, 'diff lampam 1'] = abs(lampam_ini[0]-lampamA[0])
table_10_bal.loc[ind, 'diff lampam 2'] = abs(lampam_ini[1]-lampamA[1])
table_10_bal.loc[ind, 'diff lampam 3'] = abs(lampam_ini[2]-lampamA[2])
table_10_bal.loc[ind, 'diff lampam 4'] = abs(lampam_ini[3]-lampamA[3])
table_10_bal.loc[ind, 'lampam[1]'] = lampamA[0]
table_10_bal.loc[ind, 'lampam[2]'] = lampamA[1]
table_10_bal.loc[ind, 'lampam[3]'] = lampamA[2]
table_10_bal.loc[ind, 'lampam[4]'] = lampamA[3]
table_10_bal.loc[ind, 'lampam_ini[1]'] = lampam_ini[0]
table_10_bal.loc[ind, 'lampam_ini[2]'] = lampam_ini[1]
table_10_bal.loc[ind, 'lampam_ini[3]'] = lampam_ini[2]
table_10_bal.loc[ind, 'lampam_ini[4]'] = lampam_ini[3]
ss_flatten = ' '.join(np.array(ss, dtype=str))
table_10_bal.loc[ind, 'ss'] = ss_flatten
ply_queue_flatten = ' '.join(np.array(ply_queue, dtype=str))
table_10_bal.loc[ind, 'ply_queue'] = ply_queue_flatten
ss_ini_flatten = ' '.join(np.array(ss_ini, dtype=str))
table_10_bal.loc[ind, 'ss_ini'] = ss_ini_flatten
A = A_from_lampam(lampamA, mat)
filter_ABD(A=A)
A11 = A[0, 0]
A22 = A[1, 1]
A12 = A[0, 1]
A66 = A[2, 2]
A16 = A[0, 2]
A26 = A[1, 2]
if A11_ini:
table_10_bal.loc[ind, 'diff A11 percentage'] \
= 100 * abs((A11 - A11_ini)/A11_ini)
else:
table_10_bal.loc[ind, 'diff A11 percentage'] = 0
if A22_ini:
table_10_bal.loc[ind, 'diff A22 percentage'] \
= 100 * abs((A22 - A22_ini)/A22_ini)
else:
table_10_bal.loc[ind, 'diff A22 percentage'] = 0
if A12_ini:
table_10_bal.loc[ind, 'diff A12 percentage'] \
= 100 * abs((A12 - A12_ini)/A12_ini)
else:
table_10_bal.loc[ind, 'diff A12 percentage'] = 0
if A66_ini:
table_10_bal.loc[ind, 'diff A66 percentage'] \
= 100 * abs((A66 - A66_ini)/A66_ini)
else:
table_10_bal.loc[ind, 'diff A66 percentage'] = 0
if A16_ini:
table_10_bal.loc[ind, 'diff A16 percentage'] \
= 100 * abs((A16 - A16_ini)/A16_ini)
else:
table_10_bal.loc[ind, 'diff A16 percentage'] = 0
if A26_ini:
table_10_bal.loc[ind, 'diff A26 percentage'] \
= 100 * abs((A26 - A26_ini)/A26_ini)
else:
table_10_bal.loc[ind, 'diff A26 percentage'] = 0
#==========================================================================
# Refinement for membrane properties
#==========================================================================
t = time.time()
ss_list, ply_queue_list, lampamA2_list = repair_membrane(
ss=ss,
ply_queue=ply_queue,
mini_10=mini_10,
in_plane_coeffs=in_plane_coeffs,
parameters=parameters,
constraints=constraints,
lampam_target=lampam_target)
ss2 = ss_list[0]
ply_queue2 = ply_queue_list[0]
lampamA2 = lampamA2_list[0]
# print('ss after repair membrane')
# print_ss(ss2)
# print(ply_queue2)
elapsed_membrane = time.time() - t
t_cummul_membrane += elapsed_membrane
lampamA2_check = calc_lampamA_ply_queue(
ss2, n_plies, ply_queue2, constraints)
if not (abs(lampamA2_check - lampamA2) < 1e-10).all():
raise Exception('This should not happen')
table_membrane.loc[ind, 'ply_count'] = n_plies
table_membrane.loc[ind, 'time (s)'] = elapsed_membrane
table_membrane.loc[ind, 'no change in ss'] \
= (abs(lampamA - lampamA2) < 1e-10).all()
f_A_ini = sum(in_plane_coeffs * ((lampamA - lampam_target[0:4]) ** 2))
table_membrane.loc[ind, 'f_A ini'] = f_A_ini
f_A_sol = sum(in_plane_coeffs * ((lampamA2 - lampam_target[0:4]) ** 2))
table_membrane.loc[ind, 'f_A solution'] = f_A_sol
table_membrane.loc[ind, 'diff lampam 1 solution'] = abs(
lampamA2[0]-lampam_target[0])
table_membrane.loc[ind, 'diff lampam 2 solution'] = abs(
lampamA2[1]-lampam_target[1])
table_membrane.loc[ind, 'diff lampam 3 solution'] = abs(
lampamA2[2]-lampam_target[2])
table_membrane.loc[ind, 'diff lampam 4 solution'] = abs(
lampamA2[3]-lampam_target[3])
table_membrane.loc[ind, 'diff lampam 1 before'] = abs(
lampam_target[0]-lampamA[0])
table_membrane.loc[ind, 'diff lampam 2 before'] = abs(
lampam_target[1]-lampamA[1])
table_membrane.loc[ind, 'diff lampam 3 before'] = abs(
lampam_target[2]-lampamA[2])
table_membrane.loc[ind, 'diff lampam 4 before'] = abs(
lampam_target[3]-lampamA[3])
table_membrane.loc[ind, 'lampam[1]'] = lampamA2[0]
table_membrane.loc[ind, 'lampam[2]'] = lampamA2[1]
table_membrane.loc[ind, 'lampam[3]'] = lampamA2[2]
table_membrane.loc[ind, 'lampam[4]'] = lampamA2[3]
table_membrane.loc[ind, 'lampam_target[1]'] = lampam_target[0]
table_membrane.loc[ind, 'lampam_target[2]'] = lampam_target[1]
table_membrane.loc[ind, 'lampam_target[3]'] = lampam_target[2]
table_membrane.loc[ind, 'lampam_target[4]'] = lampam_target[3]
table_membrane.loc[ind, 'lampam_before[1]'] = lampamA[0]
table_membrane.loc[ind, 'lampam_before[2]'] = lampamA[1]
table_membrane.loc[ind, 'lampam_before[3]'] = lampamA[2]
table_membrane.loc[ind, 'lampam_before[4]'] = lampamA[3]
ss_flatten = ' '.join(np.array(ss2, dtype=str))
table_membrane.loc[ind, 'ss'] = ss_flatten
ply_queue_flatten = ' '.join(np.array(ply_queue2, dtype=str))
table_membrane.loc[ind, 'ply_queue'] = ply_queue_flatten
ss_flatten = ' '.join(np.array(ss, dtype=str))
table_membrane.loc[ind, 'ss_ini'] = ss_flatten
ply_queue_flatten = ' '.join(np.array(ply_queue, dtype=str))
table_membrane.loc[ind, 'ply_queue_ini'] = ply_queue_flatten
A2 = A_from_lampam(lampamA2, mat)
filter_ABD(A=A)
A11_2 = A2[0, 0]
A22_2 = A2[1, 1]
A12_2 = A2[0, 1]
A66_2 = A2[2, 2]
A16_2 = A2[0, 2]
A26_2 = A2[1, 2]
if A11_ini:
table_membrane.loc[ind, 'diff A11 percentage'] \
= 100 * abs((A11_2 - A11_ini)/A11_ini)
else:
table_membrane.loc[ind, 'diff A11 percentage'] = 0
if A22_ini:
table_membrane.loc[ind, 'diff A22 percentage'] \
= 100 * abs((A22_2 - A22_ini)/A22_ini)
else:
table_membrane.loc[ind, 'diff A22 percentage'] = 0
if A12_ini:
table_membrane.loc[ind, 'diff A12 percentage'] \
= 100 * abs((A12_2 - A12_ini)/A12_ini)
else:
table_membrane.loc[ind, 'diff A12 percentage'] = 0
if A66_ini:
table_membrane.loc[ind, 'diff A66 percentage'] \
= 100 * abs((A66_2 - A66_ini)/A66_ini)
else:
table_membrane.loc[ind, 'diff A66 percentage'] = 0
if A16_ini:
table_membrane.loc[ind, 'diff A16 percentage'] \
= 100 * abs((A16_2 - A16_ini)/A16_ini)
else:
table_membrane.loc[ind, 'diff A16 percentage'] = 0
if A26_ini:
table_membrane.loc[ind, 'diff A26 percentage'] \
= 100 * abs((A26_2 - A26_ini)/A26_ini)
else:
table_membrane.loc[ind, 'diff A26 percentage'] = 0
#==========================================================================
# Repair for disorientation and contiguity
#==========================================================================
t = time.time()
ss, inward, outward = repair_diso_contig(
ss2, ply_queue2, constraints, n_D1)
elapsed_diso_contig = time.time() - t
if inward:
n_success_inward_repair_diso_contig += 1
table_diso_contig.loc[ind, 'inward_repair_succes'] = True
else:
table_diso_contig.loc[ind, 'inward_repair_succes'] = False
if not outward:
table_diso_contig.loc[ind, 'outward_repair_succes'] = False
table_diso_contig.loc[ind, 'ply_count'] = n_plies
table_diso_contig.loc[ind, 'time (s)'] = elapsed_diso_contig
table_diso_contig.loc[ind, 'lampam_ini[9]'] = lampam_ini[8]
table_diso_contig.loc[ind, 'lampam_ini[10]'] = lampam_ini[9]
table_diso_contig.loc[ind, 'lampam_ini[11]'] = lampam_ini[10]
table_diso_contig.loc[ind, 'lampam_ini[12]'] = lampam_ini[11]
ss_ini_flatten = ' '.join(np.array(ss_ini, dtype=str))
table_diso_contig.loc[ind, 'ss_ini'] = ss_ini_flatten
else:
n_success_outward_repair_diso_contig += 1
table_diso_contig.loc[ind, 'outward_repair_succes'] = True
t_cummul_diso_contig += elapsed_diso_contig
lampam = calc_lampam(ss, constraints)
table_diso_contig.loc[ind, 'ply_count'] = n_plies
table_diso_contig.loc[ind, 'time (s)'] = elapsed_diso_contig
table_diso_contig.loc[ind, 'no change in ss'] \
= (abs(lampam - lampam_ini) < 1e-10).all()
f_D_ini = sum(
out_of_plane_coeffs*((lampam_ini[8:12] - lampam_target[8:12])**2))
table_diso_contig.loc[ind, 'f_D ini'] = f_D_ini
f_D_sol = sum(
out_of_plane_coeffs * ((lampam[8:12] - lampam_target[8:12]) ** 2))
table_diso_contig.loc[ind, 'f_D solution'] = f_D_sol
table_diso_contig.loc[ind, 'diff lampam 9'] = abs(
lampam_ini[8]-lampam[8])
table_diso_contig.loc[ind, 'diff lampam 10'] = abs(
lampam_ini[9]-lampam[9])
table_diso_contig.loc[ind, 'diff lampam 11'] = abs(
lampam_ini[10]-lampam[10])
table_diso_contig.loc[ind, 'diff lampam 12'] = abs(
lampam_ini[11]-lampam[11])
table_diso_contig.loc[ind, 'lampam[9]'] = lampam[8]
table_diso_contig.loc[ind, 'lampam[10]'] = lampam[9]
table_diso_contig.loc[ind, 'lampam[11]'] = lampam[10]
table_diso_contig.loc[ind, 'lampam[12]'] = lampam[11]
table_diso_contig.loc[ind, 'lampam_ini[9]'] = lampam_ini[8]
table_diso_contig.loc[ind, 'lampam_ini[10]'] = lampam_ini[9]
table_diso_contig.loc[ind, 'lampam_ini[11]'] = lampam_ini[10]
table_diso_contig.loc[ind, 'lampam_ini[12]'] = lampam_ini[11]
ss_flatten = ' '.join(np.array(ss, dtype=str))
table_diso_contig.loc[ind, 'ss'] = ss_flatten
ss_ini_flatten = ' '.join( | np.array(ss_ini, dtype=str) | numpy.array |
'''predefined patches.'''
from matplotlib import patches, transforms
from matplotlib.path import Path
import matplotlib.pyplot as plt
from functools import reduce
import numpy as np
import pdb
from numpy.linalg import norm
from .utils import rotate
from .setting import node_setting
_basic_prop_list = ['ls', 'facecolor', 'edgecolor', 'lw', 'zorder']
def affine(pp, offset=(0,0), scale=1, angle=0):
'''rotate path/patch by angle'''
if isinstance(pp, (np.ndarray, list, tuple)):
return rotate(pp, angle)*scale + offset
# define the transformation
_affine = transforms.Affine2D()
if angle!=0: _affine.rotate(angle)
if scale!=1: _affine.scale(scale)
if not np.allclose(offset, 0): _affine.translate(*offset)
if hasattr(pp, 'vertices'):
# for path
pp = pp.transformed(_affine)
else:
# for patch
pp.set_transform(_affine+plt.gca().transData)
return pp
def rounded_path(vertices, roundness, close=False):
'''make rounded path from vertices.'''
vertices = np.asarray(vertices)
if roundness == 0:
vertices = vertices if not close else np.concatenate([vertices, vertices[:1]],axis=0)
return Path(vertices, codes=[Path.MOVETO]+[Path.LINETO]*(len(vertices)-1))
if close:
vertices = | np.concatenate([vertices, vertices[:2]], axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 15:45:56 2020
@author: ZongSing_NB
Main reference:
http://www.ejournal.org.cn/EN/10.3969/j.issn.0372-2112.2019.10.020
"""
import numpy as np
import matplotlib.pyplot as plt
class EGolden_SWOA():
def __init__(self, fitness, D=30, P=20, G=500, ub=1, lb=0,
b=1, a_max=2, a_min=0, a2_max=-1, a2_min=-2, l_max=1, l_min=-1):
self.fitness = fitness
self.D = D
self.P = P
self.G = G
self.ub = ub
self.lb = lb
self.a_max = a_max
self.a_min = a_min
self.a2_max = a2_max
self.a2_min = a2_min
self.l_max = l_max
self.l_min = l_min
self.b = b
self.gbest_X = | np.zeros([self.D]) | numpy.zeros |
import numpy as np
from copy import deepcopy
def golden_section(A, C, x, delta, taudecimal, function, tol):
iters = 0
dict = {}
diff = 1e9
opt_score = None
opt_tau = None
B = A + delta * np.abs(C - A)
D = C - delta * np.abs(C - A)
while np.abs(diff) > tol and iters < 200:
iters += 1
B = np.round(B, taudecimal)
D = np.round(D, taudecimal)
if B in dict:
score_B = dict[B]
else:
score_B = function(x, B)
dict[B] = score_B
if D in dict:
score_D = dict[D]
else:
score_D = function(x, D)
dict[D] = score_D
if score_B <= score_D:
opt_score = score_B
opt_tau = B
C = D
D = B
B = A + delta * np.abs(C - A)
else:
opt_score = score_D
opt_tau = D
A = B
B = D
D = C - delta * np.abs(C - A)
diff = score_B - score_D
return opt_tau, opt_score
def twod_golden_section(a, c, A, C, delta, function,
tol, max_iter, bwdecimal, taudecimal, verbose = False):
b = a + delta * np.abs(c - a)
d = c - delta * | np.abs(c - a) | numpy.abs |
"""
__author__: <NAME>
"""
import datetime
import pytorch_lightning as pl
import segmentation_models_pytorch as smp
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.modules import BCEWithLogitsLoss
from tqdm import tqdm
from wtfml.engine.image.embedder.utils import (
get_mean_prediction_from_mask,
get_recall_from_mask,
hflip_batch,
l2_norm,
)
class SegmentationPLEngine(pl.LightningModule):
def __init__(
self,
face_encoder,
location_decoder,
is_train_encoder=False,
lr=0.001,
image_loss_method="MSE",
class_loss_fn=nn.BCEWithLogitsLoss(),
lamb=1,
F_score_metrix=smp.utils.metrics.Fscore(threshold=0.5),
normalize = False,
):
super(SegmentationPLEngine, self).__init__()
self.face_encoder = face_encoder
if not is_train_encoder:
for name, module in self.face_encoder._modules.items():
module.requires_grad = False # 全ての層を凍結
self.location_decoder = location_decoder
self.scaler = None
if image_loss_method == "MSE":
self.image_loss_fn = nn.MSELoss()
elif image_loss_method == "BCE":
self.image_loss_fn = nn.BCEWithLogitsLoss()
elif image_loss_method == "L1":
self.image_loss_fn = nn.L1Loss()
else:
raise ValueError("image_loss_method should be MSE or L1 or BCE")
self.class_loss_fn = class_loss_fn
self.F_score_metrix = F_score_metrix
self.lamb = lamb
self.lr = lr
self.normalize = normalize
def forward(self, x):
self.face_encoder.eval()
with torch.no_grad():
fliped = hflip_batch(x)
emb_batch = self.face_encoder(x) + self.face_encoder(fliped)
if self.normalize:
representations = l2_norm(emb_batch)
if not self.normalize:
representations = emb_batch /2
x = self.location_decoder(representations)
return x
def training_step(self, batch, batch_idx):
# REQUIRED
image, mask, target = batch
# target = main_target + sub_target * self.sub_adjustment
pred_batch_train = self.forward(image)
train_loss = self.image_loss_fn(pred_batch_train, mask)
F_score_image = self.F_score_metrix(pred_batch_train, mask)
# pred_class, _ = get_mean_prediction_from_mask(pred_batch_train)
# class_loss = self.class_loss_fn(pred_class, target)
self.log(
"train_batch_F_score_image",
F_score_image,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"train_batch_loss",
train_loss,
prog_bar=True,
on_epoch=True,
on_step=True,
logger=True,
)
return {"loss": train_loss}
def validation_step(self, batch, batch_idx):
image, mask, target = batch
# target = main_target + sub_target * self.sub_adjustment
pred_batch_valid = self.forward(image)
recall_list_just_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=1, metrix="max")
recall_list_wide_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=2, metrix="max")
if self.current_epoch < 35:
recall_list_just = 0
loss = np.inf
recall_list_wide = 0
acc_just = 0
acc_wide = 0
else:
recall_list_just = sum(recall_list_just_list) / len(recall_list_just_list)
loss = self.image_loss_fn(pred_batch_valid, mask)
recall_list_wide = sum(recall_list_wide_list) / len(recall_list_wide_list)
acc_just = len(np.where(np.array(recall_list_just_list) > 0)[0]) / len(recall_list_just_list)
acc_wide = len(np.where(np.array(recall_list_wide_list) > 0)[0]) / len(recall_list_wide_list)
self.log(
"recall_list_just",
recall_list_just,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"valid_loss",
loss,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"recall_list_wide",
recall_list_wide,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"acc_wide",
acc_wide,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"acc_just",
acc_just,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
return {
"val_loss": loss,
# "acc": acc,
}
def configure_optimizers(self):
# REQUIRED
opt = optim.Adam(
self.location_decoder.parameters(),
lr=self.lr,
)
sch = optim.lr_scheduler.CosineAnnealingLR(opt, T_max= 20)
return [opt], [sch]
class SegmentationPLEngineWithEye(pl.LightningModule):
def __init__(
self,
face_encoder,
location_decoder,
eye_encoder,
is_train_encoder=False,
lr=0.001,
image_loss_method="MSE",
class_loss_fn=nn.BCEWithLogitsLoss(),
lamb=1,
F_score_metrix=smp.utils.metrics.Fscore(threshold=0.5),
normalize = False,
):
super(SegmentationPLEngineWithEye, self).__init__()
self.face_encoder = face_encoder
if not is_train_encoder:
for name, module in self.face_encoder._modules.items():
module.requires_grad = False # 全ての層を凍結
self.location_decoder = location_decoder
self.scaler = None
if image_loss_method == "MSE":
self.image_loss_fn = nn.MSELoss()
elif image_loss_method == "BCE":
self.image_loss_fn = nn.BCEWithLogitsLoss()
elif image_loss_method == "L1":
self.image_loss_fn = nn.L1Loss()
else:
raise ValueError("image_loss_method should be MSE or L1 or BCE")
self.eye_encoder = eye_encoder
self.class_loss_fn = class_loss_fn
self.F_score_metrix = F_score_metrix
self.lamb = lamb
self.lr = lr
self.normalize = normalize
def forward(self, face_image : torch.Tensor, right_eye_image : torch.Tensor, left_eye_image : torch.Tensor):
self.face_encoder.eval()
with torch.no_grad():
fliped = hflip_batch(face_image,)
emb_batch = self.face_encoder(face_image,) + self.face_encoder(fliped)
if self.normalize:
representations = l2_norm(emb_batch)
if not self.normalize:
representations = emb_batch /2
right_vector = self.eye_encoder(right_eye_image)
left_vector = self.eye_encoder(left_eye_image)
eye_vector = (right_vector + left_vector)/2
representations = torch.cat([representations, eye_vector], dim = 1)
x = self.location_decoder(representations)
return x
def training_step(self, batch, batch_idx):
# REQUIRED
image,right_eye_image, left_eye_image, mask, target = batch
pred_batch_train = self.forward(image, right_eye_image, left_eye_image)
# target = main_target + sub_target * self.sub_adjustment
train_loss = self.image_loss_fn(pred_batch_train, mask)
F_score_image = self.F_score_metrix(pred_batch_train, mask)
self.log(
"train_batch_F_score_image",
F_score_image,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"train_batch_loss",
train_loss,
prog_bar=True,
on_epoch=True,
on_step=True,
logger=True,
)
return {"loss": train_loss}
def validation_step(self, batch, batch_idx):
image,right_eye_image, left_eye_image, mask, target = batch
pred_batch_valid = self.forward(image, right_eye_image, left_eye_image)
recall_list_just_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=1, metrix="max")
recall_list_wide_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=2, metrix="max")
if self.current_epoch < 35:
recall_list_just = 0
loss = np.inf
recall_list_wide = 0
acc_just = 0
acc_wide = 0
else:
recall_list_just = sum(recall_list_just_list) / len(recall_list_just_list)
loss = self.image_loss_fn(pred_batch_valid, mask)
recall_list_wide = sum(recall_list_wide_list) / len(recall_list_wide_list)
acc_just = len(np.where(np.array(recall_list_just_list) > 0)[0]) / len(recall_list_just_list)
acc_wide = len(np.where( | np.array(recall_list_wide_list) | numpy.array |
"""Module dedicated to extraction of Concept Metafeatures."""
import typing as t
import numpy as np
import scipy.spatial
import sklearn
class MFEConcept:
"""Keep methods for metafeatures of ``Concept`` group.
The convention adopted for metafeature extraction related methods is to
always start with ``ft_`` prefix to allow automatic method detection. This
prefix is predefined within ``_internal`` module.
All method signature follows the conventions and restrictions listed below:
1. For independent attribute data, ``X`` means ``every type of attribute``,
``N`` means ``Numeric attributes only`` and ``C`` stands for
``Categorical attributes only``. It is important to note that the
categorical attribute sets between ``X`` and ``C`` and the numerical
attribute sets between ``X`` and ``N`` may differ due to data
transformations, performed while fitting data into MFE model,
enabled by, respectively, ``transform_num`` and ``transform_cat``
arguments from ``fit`` (MFE method).
2. Only arguments in MFE ``_custom_args_ft`` attribute (set up inside
``fit`` method) are allowed to be required method arguments. All other
arguments must be strictly optional (i.e., has a predefined default
value).
3. The initial assumption is that the user can change any optional
argument, without any previous verification of argument value or its
type, via kwargs argument of ``extract`` method of MFE class.
4. The return value of all feature extraction methods should be a single
value or a generic List (preferably a :obj:`np.ndarray`) type with
numeric values.
There is another type of method adopted for automatic detection. It is
adopted the prefix ``precompute_`` for automatic detection of these
methods. These methods run while fitting some data into an MFE model
automatically, and their objective is to precompute some common value
shared between more than one feature extraction method. This strategy is a
trade-off between more system memory consumption and speeds up of feature
extraction. Their return value must always be a dictionary whose keys are
possible extra arguments for both feature extraction methods and other
precomputation methods. Note that there is a share of precomputed values
between all valid feature-extraction modules (e.g., ``class_freqs``
computed in module ``statistical`` can freely be used for any
precomputation or feature extraction method of module ``landmarking``).
"""
@classmethod
def precompute_concept_dist(
cls, N: np.ndarray, concept_dist_metric: str = "euclidean", **kwargs
) -> t.Dict[str, t.Any]:
"""Precompute some useful things to support complexity measures.
Parameters
----------
N : :obj:`np.ndarray`, optional
Numerical fitted data.
concept_dist_metric : str, optional
Metric used to compute distance between each pair of examples. See
cdist from scipy for more options.
**kwargs
Additional arguments. May have previously precomputed before this
method from other precomputed methods, so they can help speed up
this precomputation.
Returns
-------
:obj:`dict`
With following precomputed items:
- ``concept_distances`` (:obj:`np.ndarray`): Distance matrix of
examples from N.
"""
precomp_vals = {}
if N is not None and "concept_distances" not in kwargs:
# 0-1 scaling
N = sklearn.preprocessing.MinMaxScaler(
feature_range=(0, 1)
).fit_transform(N)
# distance matrix
concept_distances = scipy.spatial.distance.cdist(
N, N, metric=concept_dist_metric
)
precomp_vals["concept_distances"] = concept_distances
return precomp_vals
@classmethod
def ft_conceptvar(
cls,
N: np.ndarray,
y: np.ndarray,
conceptvar_alpha: float = 2.0,
concept_dist_metric: str = "euclidean",
concept_minimum: float = 10e-10,
concept_distances: t.Optional[np.ndarray] = None,
) -> np.ndarray:
"""Compute the concept variation that estimates the variability of
class labels among examples.
Parameters
----------
N : :obj:`np.ndarray`
Numerical fitted data.
y : :obj:`np.ndarray`
Target attribute.
conceptvar_alpha : float, optional
The alpha value to adjust the weight. The higher the alpha less
is the effect of the weight in the computation.
concept_dist_metric : str, optional
Metric used to compute distance between each pair of examples. See
cdist from scipy for more options. Used only if the argument
``concept_distances`` is None.
concept_minimum: float, optional
This variable is the minimum value considered in the computation.
It will be sum when necessary to avoid division by zero.
concept_distances : :obj:`np.ndarray`, optional
Distance matrix of examples from N. Argument used to take
advantage of precomputations.
Returns
-------
:obj:`np.ndarray`
An array with the concept variation for each example.
References
----------
.. [1] <NAME>. (1999). Understanding accuracy performance through
concept characterization and algorithm analysis. In Proceedings of
the ICML-99 workshop on recent advances in meta-learning and future
work (pp. 3-9).
"""
if concept_distances is None:
sub_dic = cls.precompute_concept_dist(N, concept_dist_metric)
concept_distances = sub_dic["concept_distances"]
n_col = N.shape[1]
div = np.sqrt(n_col) - concept_distances
div[div <= 0] = concept_minimum # to guarantee that minimum will be 0
weights = np.power(2, -conceptvar_alpha * (concept_distances / div))
np.fill_diagonal(weights, 0.0)
rep_class_matrix = np.repeat(np.expand_dims(y, 0), y.shape[0], axis=0)
# check if class is different
class_diff = np.not_equal(rep_class_matrix.T, rep_class_matrix).astype(
int
)
conceptvar_by_example = np.sum(weights * class_diff, axis=0) / np.sum(
weights, axis=0
)
# The original meta-feature is the mean of the return.
# It will be done by the summary functions.
return conceptvar_by_example
@classmethod
def ft_wg_dist(
cls,
N: np.ndarray,
wg_dist_alpha: float = 2.0,
concept_dist_metric: str = "euclidean",
concept_minimum: float = 10e-10,
concept_distances: t.Optional[np.ndarray] = None,
) -> np.ndarray:
"""Compute the weighted distance, that captures how dense or sparse
is the example distribution.
Parameters
----------
N : :obj:`np.ndarray`
Numerical fitted data.
wg_dist_alpha : float, optional
The alpha value to adjust the weight. The higher the alpha less
is the effect of the weight in the computation.
concept_dist_metric : str, optional
Metric used to compute distance between each pair of examples. See
cdist from scipy for more options. Used only if the argument
``concept_distances`` is None.
concept_minimum : float, optional
This variable is the minimum value considered in the computation.
It will be sum when necessary to avoid division by zero.
concept_distances : :obj:`np.ndarray`, optional
Distance matrix of examples from N. Argument used to take
advantage of precomputations.
Returns
-------
:obj:`np.ndarray`
An array with the weighted distance for each example.
References
----------
.. [1] <NAME>. (1999). Understanding accuracy performance through
concept characterization and algorithm analysis. In Proceedings of
the ICML-99 workshop on recent advances in meta-learning and future
work (pp. 3-9).
"""
if concept_distances is None:
sub_dic = cls.precompute_concept_dist(N, concept_dist_metric)
concept_distances = sub_dic["concept_distances"]
n_col = N.shape[1]
div = np.sqrt(n_col) - concept_distances
div[div <= 0] = concept_minimum # to guarantee that minimum will be 0
weights = np.power(2, -wg_dist_alpha * (concept_distances / div))
np.fill_diagonal(weights, 0.0)
wg_dist_example = np.sum(weights * concept_distances, axis=0) / np.sum(
weights, axis=0
)
# The original meta-feature is the mean of the return.
# It will be done by summary functions.
return wg_dist_example
@classmethod
def ft_impconceptvar(
cls,
N: np.ndarray,
y: np.ndarray,
impconceptvar_alpha: float = 1.0,
concept_dist_metric: str = "euclidean",
concept_distances: t.Optional[np.ndarray] = None,
) -> np.ndarray:
"""Compute the improved concept variation that estimates the
variability of class labels among examples.
Parameters
----------
N : :obj:`np.ndarray`
Numerical fitted data.
y : :obj:`np.ndarray`
Target attribute.
impconceptvar_alpha : float, optional
The alpha value to adjust the weight. The higher the alpha less
is the effect of the weight in the computation.
concept_dist_metric : str, optional
Metric used to compute distance between each pair of examples. See
cdist from scipy for more options. Used only if the argument
``concept_distances`` is None.
concept_distances : :obj:`np.ndarray`, optional
Distance matrix of examples from ``N``. Argument used to take
advantage of precomputations.
Returns
-------
:obj:`np.ndarray`
An array with the improved concept variation for each example.
References
----------
.. [1] <NAME> and <NAME> (2002). A Characterization of Difficult
Problems in Classification. Proceedings of the 2002 International
Conference on Machine Learning and Applications (pp. 133-138).
"""
if concept_distances is None:
sub_dic = cls.precompute_concept_dist(N, concept_dist_metric)
concept_distances = sub_dic["concept_distances"]
radius = np.ceil(concept_distances).astype(int)
radius[radius == 0] = 1
weights = np.power(2, -impconceptvar_alpha * radius)
np.fill_diagonal(weights, 0.0)
rep_class_matrix = np.repeat(np.expand_dims(y, 0), y.shape[0], axis=0)
# check if class is different
class_diff = np.not_equal(rep_class_matrix.T, rep_class_matrix).astype(
int
)
impconceptvar_by_example = np.sum(weights * class_diff, axis=0)
# The original meta-feature is the mean of the return.
# It will be done by summary functions.
return impconceptvar_by_example
@classmethod
def ft_cohesiveness(
cls,
N: np.ndarray,
cohesiveness_alpha: float = 1.0,
concept_dist_metric: str = "euclidean",
concept_distances: t.Optional[np.ndarray] = None,
) -> np.ndarray:
"""Compute the improved version of the weighted distance, that
captures how dense or sparse is the example distribution.
Parameters
----------
N : :obj:`np.ndarray`
Numerical fitted data.
cohesiveness_alpha : float, optional
The alpha value to adjust the weight. The higher the alpha less
is the effect of the weight in the computation.
concept_dist_metric : str, optional
Metric used to compute distance between each pair of examples. See
cdist from scipy for more options. Used only if the argument
``concept_distances`` is None.
concept_distances : :obj:`np.ndarray`, optional
Distance matrix of examples from ``N``. Argument used to take
advantage of precomputations.
Returns
-------
:obj:`np.ndarray`
An array with the cohesiveness for each example.
References
----------
.. [1] <NAME> and <NAME> (2002). A Characterization of Difficult
Problems in Classification. Proceedings of the 2002 International
Conference on Machine Learning and Applications (pp. 133-138).
"""
if concept_distances is None:
sub_dic = cls.precompute_concept_dist(N, concept_dist_metric)
concept_distances = sub_dic["concept_distances"]
radius = | np.ceil(concept_distances) | numpy.ceil |
# PyVot Python Variational Optimal Transportation
# Author: <NAME> <<EMAIL>>
# Date: April 28th 2020
# Licence: MIT
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from vot_numpy import VOT
import utils
| np.random.seed(19) | numpy.random.seed |
"""
Implementation of DOGRE / WOGRE approach.
Notice projection==embedding.
"""
import numpy as np
from sklearn.linear_model import Ridge
import time
from state_of_the_art.state_of_the_art_embedding import final
from math import pow
from utils import get_initial_proj_nodes_by_k_core, get_initial_proj_nodes_by_degrees
import heapq
import networkx as nx
def user_print(item, user_wish):
"""
A function to show the user the state of the our_embeddings_methods. If you want a live update of the current state
of code and some details: set user wish to True else False
"""
if user_wish is True:
print(item, sep=' ', end='', flush=True)
time.sleep(3)
print(" ", end='\r')
def create_sub_G(proj_nodes, G):
"""
Creating a new graph from the nodes in the initial embedding so we can do the initial embedding on it
:param proj_nodes: The nodes in the initial embedding
:param G: Our graph
:return: A sub graph of G that its nodes are the nodes in the initial embedding.
"""
sub_G = G.subgraph(list(proj_nodes))
return sub_G
def create_dict_neighbors(G):
"""
Create a dictionary of neighbors.
:param G: Our graph
:return: neighbors_dict where key==node and value==set_of_neighbors (both incoming and outgoing)
"""
G_nodes = list(G.nodes())
neighbors_dict = {}
for i in range(len(G_nodes)):
node = G_nodes[i]
neighbors_dict.update({node: set(G[node])})
return neighbors_dict
def create_dicts_same_nodes(my_set, neighbors_dict, node, dict_out, dict_in):
"""
A function to create useful dictionaries to represent connections between nodes that have the same type, i.e between
nodes that are in the embedding and between nodes that aren't in the embedding. It depends on the input.
:param my_set: Set of the nodes that aren't currently in the embedding OR Set of the nodes that are currently in
the embedding
:param neighbors_dict: Dictionary of all nodes and neighbors (both incoming and outgoing)
:param node: Current node
:param dict_out: explained below
:param dict_in: explained below
:return: There are 4 possibilities (2 versions, 2 to every version):
A) 1. dict_node_node_out: key == nodes not in embedding, value == set of outgoing nodes not in embedding
(i.e there is a directed edge (i,j) when i is the key node and j isn't in the embedding)
2. dict_node_node_in: key == nodes not in embedding , value == set of incoming nodes not in embedding
(i.e there is a directed edge (j,i) when i is the key node and j isn't in the embedding)
B) 1. dict_enode_enode_out: key == nodes in embedding , value == set of outgoing nodes in embedding
(i.e there is a directed edge (i,j) when i is the key node and j is in the embedding)
2. dict_enode_enode_in: key == nodes in embedding , value == set of incoming nodes in embedding
(i.e there is a directed edge (j,i) when i is the key node and j is in the embedding)
"""
set1 = neighbors_dict[node].intersection(my_set)
count_1 = 0
count_2 = 0
if (len(set1)) > 0:
count_1 += 1
dict_out.update({node: set1})
neigh = list(set1)
for j in range(len(neigh)):
if dict_in.get(neigh[j]) is None:
dict_in.update({neigh[j]: set([node])})
else:
dict_in[neigh[j]].update(set([node]))
else:
count_2 += 1
return dict_out, dict_in
def create_dict_node_enode(set_proj_nodes, neighbors_dict, H, node, dict_node_enode, dict_enode_node):
"""
A function to create useful dictionaries to represent connections between nodes that are in the embedding and
nodes that are not in the embedding.
:param set_proj_nodes: Set of the nodes that are in the embedding
:param neighbors_dict: Dictionary of all nodes and neighbors (both incoming and outgoing)
:param H: H is the undirected version of our graph
:param node: Current node
:param dict_node_enode: explained below
:param dict_enode_node: explained below
:return: 1. dict_node_enode: key == nodes not in embedding, value == set of outdoing nodes in embedding (i.e
there is a directed edge (i,j) when i is the key node and j is in the embedding)
2. dict_enode_node: key == nodes not in embedding, value == set of incoming nodes in embedding (i.e
there is a directed edge (j,i) when i is the key node and j is in the embedding)
"""
set2 = neighbors_dict[node].intersection(set_proj_nodes)
set_all = set(H[node]).intersection(set_proj_nodes)
set_in = set_all - set2
if len(set2) > 0:
dict_node_enode.update({node: set2})
if len(set_in) > 0:
dict_enode_node.update({node: set_in})
return dict_node_enode, dict_enode_node
def create_dicts_of_connections(set_proj_nodes, set_no_proj_nodes, neighbors_dict, G):
"""
A function that creates 6 dictionaries of connections between different types of nodes.
:param set_proj_nodes: Set of the nodes that are in the projection
:param set_no_proj_nodes: Set of the nodes that aren't in the projection
:param neighbors_dict: Dictionary of neighbours
:return: 6 dictionaries, explained above (in the two former functions)
"""
dict_node_node_out = {}
dict_node_node_in = {}
dict_node_enode = {}
dict_enode_node = {}
dict_enode_enode_out = {}
dict_enode_enode_in = {}
list_no_proj = list(set_no_proj_nodes)
list_proj = list(set_proj_nodes)
H = G.to_undirected()
for i in range(len(list_no_proj)):
node = list_no_proj[i]
dict_node_node_out, dict_node_node_in = create_dicts_same_nodes(set_no_proj_nodes, neighbors_dict, node,
dict_node_node_out, dict_node_node_in)
dict_node_enode, dict_enode_node = create_dict_node_enode(set_proj_nodes, neighbors_dict, H, node,
dict_node_enode, dict_enode_node)
for i in range(len(list_proj)):
node = list_proj[i]
dict_enode_enode_out, dict_enode_enode_in = create_dicts_same_nodes(set_proj_nodes, neighbors_dict, node,
dict_enode_enode_out, dict_enode_enode_in)
return dict_node_node_out, dict_node_node_in, dict_node_enode, dict_enode_node, dict_enode_enode_out, dict_enode_enode_in
def calculate_average_projection_second_order(dict_proj, node, dict_enode_enode, average_two_order_proj, dim, G):
"""
A function to calculate the average embeddings of the second order neighbors, both outgoing and incoming,
depends on the input.
:param dict_proj: Dict of embeddings (key==node, value==its embedding)
:param node: Current node we're dealing with
:param dict_enode_enode: key==node in embedding , value==set of neighbors that are in the embedding. Direction
(i.e outgoing or incoming depends on the input)
:param average_two_order_proj: explained below
:return: Average embedding of second order neighbours, outgoing or incoming.
"""
two_order_neighs = dict_enode_enode.get(node)
k2 = 0
# if the neighbors in the projection also have neighbors in the projection calculate the average projection
if two_order_neighs is not None:
two_order_neighs_in = list(two_order_neighs)
k2 += len(two_order_neighs_in)
two_order_projs = []
for i in range(len(two_order_neighs_in)):
if G[node].get(two_order_neighs_in[i]) is not None:
two_order_proj = G[node][two_order_neighs_in[i]]["weight"]*dict_proj[two_order_neighs_in[i]]
else:
two_order_proj = G[two_order_neighs_in[i]][node]["weight"]*dict_proj[two_order_neighs_in[i]]
two_order_projs.append(two_order_proj)
two_order_projs = np.array(two_order_projs)
two_order_projs = np.mean(two_order_projs, axis=0)
# else, the average projection is 0
else:
two_order_projs = np.zeros(dim)
average_two_order_proj.append(two_order_projs)
return average_two_order_proj, k2
def calculate_projection_of_neighbors(current_node, proj_nodes, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G):
"""
A function to calculate average degree of first order neighbors and second order neighbors, direction
(outgoing or incoming) depends on the input.
:param proj_nodes: Neighbors that are in the embedding, direction depends on the input.
:param dict_proj: Dict of embeddings (key==node, value==embedding)
:return: Average degree of first order neighbors and second order neighbors
"""
proj = []
# average projections of the two order neighbors, both incoming and outgoing
average_two_order_proj_in = []
average_two_order_proj_out = []
list_proj_nodes = list(proj_nodes)
# the number of first order neighbors
k1 = len(proj_nodes)
# to calculate the number of the second order neighbors
k2_in = 0
k2_out = 0
# to calculate the average projection of the second order neighbors
for k in range(len(list_proj_nodes)):
node = list_proj_nodes[k]
average_two_order_proj_in, a = calculate_average_projection_second_order(dict_proj, node, dict_enode_enode_in,
average_two_order_proj_in, dim, G)
k2_in += a
average_two_order_proj_out, b = calculate_average_projection_second_order(dict_proj, node, dict_enode_enode_out,
average_two_order_proj_out, dim, G)
k2_out += b
proj.append(dict_proj[node])
if G[current_node].get(node) is not None:
proj.append(G[current_node][node]["weight"] * dict_proj[node])
else:
proj.append(G[node][current_node]["weight"] * dict_proj[node])
# for every neighbor we have the average projection of its neighbors, so now do average on all of them
average_two_order_proj_in = np.array(average_two_order_proj_in)
average_two_order_proj_in = np.mean(average_two_order_proj_in, axis=0)
average_two_order_proj_out = np.array(average_two_order_proj_out)
average_two_order_proj_out = np.mean(average_two_order_proj_out, axis=0)
proj = np.array(proj)
# find the average proj
proj = np.mean(proj, axis=0)
return proj, average_two_order_proj_in, average_two_order_proj_out, k1, k2_in, k2_out
def calculate_projection(current_node, G, proj_nodes_in, proj_nodes_out, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim,
alpha1, alpha2, beta_11, beta_12, beta_21, beta_22):
"""
A function to calculate the final embedding of the node by D-VERSE method as explained in the pdf file in github.
:param proj_nodes_in: embeddings of first order incoming neighbors.
:param proj_nodes_out: embeddings of first order outgoing neighbors.
:param dict_proj: Dict of embeddings (key==node, value==projection)
:param dim: Dimension of the embedding
:param alpha1, alpha2, beta_11, beta_12, beta_21, beta_22: Parameters to calculate the final projection.
:return: The final projection of our node.
"""
if len(proj_nodes_in) > 0:
x_1, z_11, z_12, k1, k2_in_in, k2_in_out = calculate_projection_of_neighbors(current_node,
proj_nodes_in, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G)
else:
x_1, z_11, z_12 = np.zeros(dim), np.zeros(dim), np.zeros(dim)
if len(proj_nodes_out) > 0:
x_2, z_21, z_22, k1, k2_in_out, k2_out_out = calculate_projection_of_neighbors(current_node,
proj_nodes_out, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G)
else:
x_2, z_21, z_22 = np.zeros(dim), np.zeros(dim), np.zeros(dim)
# the final projection of the node
final_proj = alpha1*x_1+alpha2*x_2 - beta_11*z_11 - beta_12*z_12 - \
beta_21*z_21 - beta_22*z_22
return final_proj
def calculate_projection_weighted(current_node, G, proj_nodes_in, proj_nodes_out, dict_proj, dict_enode_enode_in, dict_enode_enode_out,
dim, params):
"""
A function to calculate the final embedding of the node by We-VERSE method as explained in the pdf file in github.
:param proj_nodes_in: embeddings of first order incoming neighbors.
:param proj_nodes_out: embeddings of first order outgoing neighbors.
:param dict_proj: Dict of embeddings (key==node, value==projection)
:param dict_enode_enode_in: key == nodes in embedding , value == set of incoming nodes in embedding
(i.e there is a directed edge (j,i) when i is the key node and j is in the embedding)
:param dict_enode_enode_out: key == nodes in embedding , value == set of outgoing nodes in embedding
(i.e there is a directed edge (i,j) when i is the key node and j is in the embedding)
:param dim: Dimension of the embedding space
:param params: Optimal parameters to calculate the embedding
:return: The final projection of our node.
"""
if len(proj_nodes_in) > 0:
x_1, z_11, z_12, k1_in, k2_in_in, k2_in_out = calculate_projection_of_neighbors(current_node,
proj_nodes_in, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G)
else:
x_1, z_11, z_12 = np.zeros(dim), np.zeros(dim), | np.zeros(dim) | numpy.zeros |
import numpy as np
class Planeta(object):
"""
La clase Planeta representa planetas.
Un planeta posee los siguientes valores asociados:
y_actual (np.array) las condiciones actuales de posicion y velocidad
t_actual (float) el tiempo actual
alpha (float) el valor de alpha
Ademas se tienen metodos para avanzar "un paso" (resolviendo la EDO
asociada) con 3 metodos numericos distintos: runge-kutta de orden 4,
beeman y verlet
"""
def __init__(self, condicion_inicial, alpha=0):
"""
__init__ es un método especial que se usa para inicializar las
instancias de una clase.
Recibe una condicion inicial (un vector de 4 valores) y opcionalmente
el valor de alpha (si no se ingresa, el default es 0)
Ej. de uso:
>> mercurio = Planeta([x0, y0, vx0, vy0])
>> print(mercurio.alpha)
>> 0.
"""
self.y_actual = np.array(condicion_inicial)
self.t_actual = 0.
self.alpha = alpha
def ecuacion_de_movimiento(self):
"""
Implementa la ecuación de movimiento, como sistema de ecuaciónes de
primer orden.
"""
x, y, vx, vy = self.y_actual
r = np.sqrt(x**2 + y**2)
aux = - r**(-3) + 2*self.alpha*r**(-4)
fx = x * aux
fy = y * aux
return np.array([vx, vy, fx, fy])
def avanza_rk4(self, dt):
"""
Toma la condición actual del planeta y avanza su posicion y velocidad
en un intervalo de tiempo dt usando el método de RK4. El método no
retorna nada, pero modifica los valores de self.y_actual y de
self.t_actual.
"""
k1_n = dt * self.ecuacion_de_movimiento()
self.t_actual += dt/2
self.y_actual += k1_n/2
k2_n = dt * self.ecuacion_de_movimiento()
self.y_actual -= k1_n/2
self.y_actual += k2_n/2
k3_n = dt * self.ecuacion_de_movimiento()
self.t_actual += dt/2
self.y_actual += k3_n
self.y_actual -= k2_n/2
k4_n = dt * self.ecuacion_de_movimiento()
ans = self.y_actual + (k1_n + 2*k2_n + 2*k3_n + k4_n)/6
self.y_actual = ans
def avanza_verlet(self, dt):
"""
Similar a avanza_rk4, pero usando Verlet (Velocity Verlet)
"""
x, y, vx, vy = self.y_actual
x_n = np.array([x, y])
v_n = np.array([vx, vy])
mov = self.ecuacion_de_movimiento()
a_n = np.array([mov[2], mov[3]])
x_n1 = x_n + v_n*dt + a_n*(dt**2)/2
self.y_actual[0] = x_n1[0]
self.y_actual[1] = x_n1[1]
mov1 = self.ecuacion_de_movimiento()
a_n1 = np.array([mov1[2], mov1[3]])
v_n1 = v_n + (a_n + a_n1)*dt/2
self.y_actual[2] = v_n1[0]
self.y_actual[3] = v_n1[1]
self.t_actual += dt
def avanza_beeman(self, dt):
"""
Similar a avanza_rk4, pero usando Beeman.
"""
if self.t_actual == 0:
mov = self.ecuacion_de_movimiento()
self.a_previo = | np.array([mov[2], mov[3]]) | numpy.array |
from rllab.misc import ext
from rllab.misc import logger
from rllab.core.serializable import Serializable
import theano.tensor as TT
import theano
import numpy as np
import multiprocessing as mp
from rllab.misc.ext import sliced_fun
from sandbox.ex2.parallel_trpo.simple_container import SimpleContainer
import sandbox.ex2.parallel_trpo.krylov as krylov
class ParallelPerlmutterHvp(Serializable):
"""
Only difference from serial is the function defined within build_eval().
"""
def __init__(self, num_slices=1):
Serializable.quick_init(self, locals())
self.target = None
self.reg_coeff = None
self.opt_fun = None
self._num_slices = num_slices
self.pd = None # relies on ParallelCGOpt class to set parallel values.
self._par_objs = None
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
xs = tuple([ext.new_tensor_like("%s x" % p.name, p) for p in params])
def Hx_plain():
Hx_plain_splits = TT.grad(
TT.sum([TT.sum(g * x)
for g, x in zip(constraint_grads, xs)]),
wrt=params,
disconnected_inputs='warn'
)
return TT.concatenate([TT.flatten(s) for s in Hx_plain_splits])
self.opt_fun = ext.lazydict(
f_Hx_plain=lambda: ext.compile_function(
inputs=inputs + xs,
outputs=Hx_plain(),
log_name="f_Hx_plain",
),
)
def build_eval(self, inputs):
def parallel_eval(x):
"""
Parallelized.
"""
shareds, barriers = self._par_objs
xs = tuple(self.target.flat_to_params(x, trainable=True))
shareds.grads_2d[:, self.pd.rank] = self.pd.avg_fac * \
sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(inputs, xs)
barriers.Hx[0].wait()
shareds.Hx[self.pd.vb[0]:self.pd.vb[1]] = \
self.reg_coeff * x[self.pd.vb[0]:self.pd.vb[1]] + \
np.sum(shareds.grads_2d[self.pd.vb[0]:self.pd.vb[1], :], axis=1)
barriers.Hx[1].wait()
return shareds.Hx # (or can just access this persistent var elsewhere)
return parallel_eval
# class FiniteDifferenceHvp(Serializable):
# def __init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1):
# Serializable.quick_init(self, locals())
# self.base_eps = base_eps
# self.symmetric = symmetric
# self.grad_clip = grad_clip
# self._num_slices = num_slices
# def update_opt(self, f, target, inputs, reg_coeff):
# self.target = target
# self.reg_coeff = reg_coeff
# params = target.get_params(trainable=True)
# constraint_grads = theano.grad(
# f, wrt=params, disconnected_inputs='warn')
# flat_grad = ext.flatten_tensor_variables(constraint_grads)
# def f_Hx_plain(*args):
# inputs_ = args[:len(inputs)]
# xs = args[len(inputs):]
# flat_xs = np.concatenate([np.reshape(x, (-1,)) for x in xs])
# param_val = self.target.get_param_values(trainable=True)
# eps = np.cast['float32'](
# self.base_eps / (np.linalg.norm(param_val) + 1e-8))
# self.target.set_param_values(
# param_val + eps * flat_xs, trainable=True)
# flat_grad_dvplus = self.opt_fun["f_grad"](*inputs_)
# if self.symmetric:
# self.target.set_param_values(
# param_val - eps * flat_xs, trainable=True)
# flat_grad_dvminus = self.opt_fun["f_grad"](*inputs_)
# hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
# self.target.set_param_values(param_val, trainable=True)
# else:
# self.target.set_param_values(param_val, trainable=True)
# flat_grad = self.opt_fun["f_grad"](*inputs_)
# hx = (flat_grad_dvplus - flat_grad) / eps
# return hx
# self.opt_fun = ext.lazydict(
# f_grad=lambda: ext.compile_function(
# inputs=inputs,
# outputs=flat_grad,
# log_name="f_grad",
# ),
# f_Hx_plain=lambda: f_Hx_plain,
# )
# def build_eval(self, inputs):
# def eval(x):
# xs = tuple(self.target.flat_to_params(x, trainable=True))
# ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
# inputs, xs) + self.reg_coeff * x
# return ret
# return eval
class ParallelConjugateGradientOptimizer(Serializable):
"""
Performs constrained optimization via line search. The search direction is computed using a conjugate gradient
algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient
of the loss function.
Only the optimize() method changes for parallel implementation, but some
options of serial implementation may not be available.
"""
def __init__(
self,
cg_iters=10,
reg_coeff=1e-5,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
accept_violation=False,
hvp_approach=None,
num_slices=1,
name=None,
verbose=False,
):
"""
:param cg_iters: The number of CG iterations used to calculate A^-1 g
:param reg_coeff: A small value so that A -> A + reg*I
:param subsample_factor: Subsampling factor to reduce samples when using "conjugate gradient. Since the
computation time for the descent direction dominates, this can greatly reduce the overall computation time.
:param accept_violation: whether to accept the descent step if it violates the line search condition after
exhausting all backtracking budgets
:return:
"""
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._accept_violation = accept_violation
# if hvp_approach is None:
# hvp_approach = PerlmutterHvp(num_slices)
hvp_approach = ParallelPerlmutterHvp(num_slices) # Only option supported.
self._hvp_approach = hvp_approach
self._name = name
self._verbose = verbose
def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name="constraint", *args,
**kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed
that the first dimension of these inputs should correspond to the number of data points
:param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled
:return: No return value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
constraint_term, constraint_value = leq_constraint
params = target.get_params(trainable=True)
grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(grads)
self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=inputs + extra_inputs,
reg_coeff=self._reg_coeff)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = ext.lazydict(
f_loss=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
log_name="f_loss",
),
f_grad=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=constraint_term,
log_name="constraint",
),
f_loss_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=[loss, constraint_term],
log_name="f_loss_constraint",
),
)
def init_par_objs(self, n_parallel, size_grad):
"""
These objects will be inherited by forked subprocesses.
(Also possible to return these and attach them explicitly within
subprocess--needed in Windows.)
"""
n_grad_elm_worker = -(-size_grad // n_parallel) # ceiling div
vb_idx = [n_grad_elm_worker * i for i in range(n_parallel + 1)]
vb_idx[-1] = size_grad
# Build container to share with hvp_approach.
par_data = SimpleContainer(
rank=None,
avg_fac=1.0 / n_parallel,
vb=[(vb_idx[i], vb_idx[i + 1]) for i in range(n_parallel)],
)
self.pd = par_data
self._hvp_approach.pd = par_data
shareds = SimpleContainer(
flat_g=np.frombuffer(mp.RawArray('d', size_grad)),
grads_2d=np.reshape(
np.frombuffer(mp.RawArray('d', size_grad * n_parallel)),
(size_grad, n_parallel)),
Hx=np.frombuffer(mp.RawArray('d', size_grad)),
loss=mp.RawArray('d', n_parallel),
constraint_val=mp.RawArray('d', n_parallel),
descent=np.frombuffer(mp.RawArray('d', size_grad)),
prev_param=np.frombuffer(mp.RawArray('d', size_grad)),
cur_param=np.frombuffer(mp.RawArray('d', size_grad)),
cg_p=np.frombuffer(mp.RawArray('d', size_grad)),
n_steps_collected=mp.RawArray('i', n_parallel),
)
barriers = SimpleContainer(
avg_fac=mp.Barrier(n_parallel),
flat_g=[mp.Barrier(n_parallel) for _ in range(2)],
loss=mp.Barrier(n_parallel),
cnstr=mp.Barrier(n_parallel),
loss_cnstr=mp.Barrier(n_parallel),
bktrk=mp.Barrier(n_parallel),
)
self._par_objs = (shareds, barriers)
shareds_hvp = SimpleContainer(
grads_2d=shareds.grads_2d, # OK to use the same memory.
Hx=shareds.Hx,
)
barriers_hvp = SimpleContainer(
Hx=[mp.Barrier(n_parallel) for _ in range(2)],
)
self._hvp_approach._par_objs = (shareds_hvp, barriers_hvp)
# For passing into krylov.cg
cg_par_objs = {
'z': shareds.Hx, # (location of result of Hx)
'p': np.frombuffer(mp.RawArray('d', size_grad)),
'x': shareds.descent, # (location to write krylov.cg result)
'brk': mp.RawValue('i'),
'barrier': mp.Barrier(n_parallel),
}
self._cg_par_objs = cg_par_objs
def init_rank(self, rank):
self.pd.rank = rank
self.rank = rank
self.pd.vb = self.pd.vb[rank] # (assign gradient vector boundaries)
self.vb = self.pd.vb
def set_avg_fac(self, n_steps_collected):
shareds, barriers = self._par_objs
shareds.n_steps_collected[self.rank] = n_steps_collected
barriers.avg_fac.wait()
self.pd.avg_fac = 1.0 * n_steps_collected / sum(shareds.n_steps_collected)
self.avg_fac = self.pd.avg_fac
def _loss(self, inputs, extra_inputs):
"""
Parallelized: returns the same value in all workers.
"""
shareds, barriers = self._par_objs
shareds.loss[self.rank] = self.avg_fac * sliced_fun(
self._opt_fun["f_loss"], self._num_slices)(inputs, extra_inputs)
barriers.loss.wait()
return sum(shareds.loss)
def _constraint_val(self, inputs, extra_inputs):
"""
Parallelized: returns the same value in all workers.
"""
shareds, barriers = self._par_objs
shareds.constraint_val[self.rank] = self.avg_fac * sliced_fun(
self._opt_fun["f_constraint"], self._num_slices)(inputs, extra_inputs)
barriers.cnstr.wait()
return sum(shareds.constraint_val)
def _loss_constraint(self, inputs, extra_inputs):
"""
Parallelized: returns the same values in all workers.
"""
shareds, barriers = self._par_objs
loss, constraint_val = sliced_fun(self._opt_fun["f_loss_constraint"],
self._num_slices)(inputs, extra_inputs)
shareds.loss[self.rank] = self.avg_fac * loss
shareds.constraint_val[self.rank] = self.avg_fac * constraint_val
barriers.loss_cnstr.wait()
return sum(shareds.loss), sum(shareds.constraint_val)
def _flat_g(self, inputs, extra_inputs):
"""
Parallelized: returns the same values in all workers.
"""
shareds, barriers = self._par_objs
# Each worker records result available to all.
shareds.grads_2d[:, self.rank] = self.avg_fac * \
sliced_fun(self._opt_fun["f_grad"], self._num_slices)(inputs, extra_inputs)
barriers.flat_g[0].wait()
# Each worker sums over an equal share of the grad elements across
# workers (row major storage--sum along rows).
shareds.flat_g[self.vb[0]:self.vb[1]] = \
np.sum(shareds.grads_2d[self.vb[0]:self.vb[1], :], axis=1)
barriers.flat_g[1].wait()
# No return (elsewhere, access shareds.flat_g)
def force_compile(self, inputs, extra_inputs=None):
"""
Serial - force compiling of Theano functions.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
self._opt_fun["f_loss"](*(inputs + extra_inputs))
x = self._opt_fun["f_grad"](*(inputs + extra_inputs))
xs = tuple(self._target.flat_to_params(x, trainable=True))
self._hvp_approach.opt_fun["f_Hx_plain"](*(inputs + extra_inputs + xs))
self._opt_fun["f_loss_constraint"](*(inputs + extra_inputs))
# self._opt_fun["f_constraint"] # not used!
def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):
"""
Parallelized: all workers get the same parameter update.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
if self._subsample_factor < 1:
if subsample_grouped_inputs is None:
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(
n_samples,
max(1,int(n_samples * self._subsample_factor)),
replace=False
)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
if self.rank == 0:
info = self._optimize_master(inputs, extra_inputs, subsample_inputs)
else:
info = self._optimize(inputs, extra_inputs, subsample_inputs)
return info
def log(self,message):
if self._verbose:
if self._name is not None:
logger.log(self._name + ": " + message)
else:
logger.log(message)
def _optimize_master(self, inputs, extra_inputs, subsample_inputs):
shareds, barriers = self._par_objs
self.log("computing loss before")
loss_before = self._loss(inputs, extra_inputs) # (parallel)
self.log("performing update")
self.log("computing descent direction")
self._flat_g(inputs, extra_inputs) # (parallel, writes shareds.flat_g)
# Hx is parallelized.
Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
# Krylov is parallelized, but only to save on memory (could run serial).
krylov.cg(Hx, shareds.flat_g, self._cg_par_objs, self.rank,
cg_iters=self._cg_iters)
# # Serial call version:
# shareds.descent[:] = krylov.cg(Hx, shareds.flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(
2.0 * self._max_constraint_val *
(1. / (shareds.descent.dot(shareds.flat_g) + 1e-8))
)
if np.isnan(initial_step_size):
initial_step_size = 1.
shareds.descent *= initial_step_size
self.log("descent direction computed")
shareds.prev_param[:] = self._target.get_param_values(trainable=True)
for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):
shareds.cur_param[:] = shareds.prev_param - ratio * shareds.descent
barriers.bktrk.wait()
self._target.set_param_values(shareds.cur_param, trainable=True)
loss, constraint_val = self._loss_constraint(inputs, extra_inputs) # (parallel)
if loss < loss_before and constraint_val <= self._max_constraint_val:
break
if ((np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before or
constraint_val >= self._max_constraint_val) and not self._accept_violation):
self._target.set_param_values(shareds.prev_param, trainable=True)
self.log("Line search condition violated. Rejecting the step!")
if np.isnan(loss):
self.log("Violated because loss is NaN")
if np.isnan(constraint_val):
self.log("Violated because constraint %s is NaN" %
self._constraint_name)
if loss >= loss_before:
self.log("Violated because loss not improving")
if constraint_val >= self._max_constraint_val:
self.log(
"Violated because constraint %s is violated" % self._constraint_name)
loss = loss_before
constraint_val = 0.
self.log("backtrack iters: %d" % n_iter)
self.log("optimization finished")
if self._name is not None:
log_prefix = self._name + "_"
else:
log_prefix = ""
# logger.record_tabular(log_prefix + 'LossBefore', loss_before)
# logger.record_tabular(log_prefix + 'LossAfter', loss)
# logger.record_tabular(log_prefix + 'MeanKLBefore', mean_kl_before) # zero!
# logger.record_tabular(log_prefix + 'MeanKL', constraint_val)
# logger.record_tabular(log_prefix + 'dLoss', loss_before - loss)
# logger.record_tabular(log_prefix + 'bktrk_iters', n_iter)
return dict(
loss_before=loss_before,
loss_after=loss,
constraint_val=constraint_val,
)
def _optimize(self, inputs, extra_inputs, subsample_inputs):
shareds, barriers = self._par_objs
loss_before = self._loss(inputs, extra_inputs) # (parallel)
self._flat_g(inputs, extra_inputs) # (parallel, writes shareds.flat_g)
Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
# Krylov is parallelized, but only to save on memory (could run serial).
krylov.cg(Hx, shareds.flat_g, self._cg_par_objs, self.rank,
cg_iters=self._cg_iters)
# Serial call version:
# _ = krylov.cg(Hx, shareds.flat_g, cg_iters=self._cg_iters)
# master writes shareds.descent and shareds.prev_param
for n_iter, ratio in enumerate(self._backtrack_ratio ** | np.arange(self._max_backtracks) | numpy.arange |
# -*- coding: utf-8 -*-
"""Optimal Control Pulse Design functions.
"""
from sigpy import backend
from sigpy.mri.rf import slr
import numpy as np
__all__ = ['optcont1d', 'blochsim', 'deriv']
def optcont1d(dthick, N, os, tb, stepsize=0.001, max_iters=1000, d1=0.01,
d2=0.01, dt=4e-6, conv_tolerance=1e-5):
r"""1D optimal control pulse designer
Args:
dthick: thickness of the slice (cm)
N: number of points in pulse
os: matrix scaling factor
tb: time bandwidth product, unitless
stepsize: optimization step size
max_iters: max number of iterations
d1: ripple level in passband
d2: ripple level in stopband
dt: dwell time (s)
conv_tolerance: max change between iterations, convergence tolerance
Returns:
gamgdt: scaled gradient
pulse: pulse of interest, complex RF waveform
"""
# set mag of gamgdt according to tb + dthick
gambar = 4257 # gamma/2/pi, Hz/g
gmag = tb / (N * dt) / dthick / gambar
# get spatial locations + gradient
x = | np.arange(0, N * os, 1) | numpy.arange |
# -*- coding: utf-8 -*-
# Copyright 2018-2020 the orix developers
#
# This file is part of orix.
#
# orix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with orix. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from collections import OrderedDict
from io import StringIO
from numbers import Number
import os
import sys
from diffpy.structure import Lattice, Structure
from diffpy.structure.spacegroups import GetSpaceGroup
from h5py import File
import pytest
import numpy as np
from orix import __version__ as orix_version
from orix.crystal_map import CrystalMap, Phase, PhaseList
from orix.io import (
load,
save,
loadang,
loadctf,
_plugin_from_footprints,
_overwrite_or_not,
)
from orix.io.plugins.ang import (
_get_header,
_get_phases_from_header,
_get_vendor_columns,
)
from orix.io.plugins.orix_hdf5 import (
hdf5group2dict,
dict2crystalmap,
dict2phaselist,
dict2phase,
dict2structure,
dict2lattice,
dict2atom,
dict2hdf5group,
crystalmap2dict,
phaselist2dict,
phase2dict,
structure2dict,
lattice2dict,
atom2dict,
)
from orix.io.plugins import ang, emsoft_h5ebsd, orix_hdf5
from orix.quaternion.rotation import Rotation
from orix.tests.conftest import (
ANGFILE_TSL_HEADER,
ANGFILE_ASTAR_HEADER,
ANGFILE_EMSOFT_HEADER,
)
plugin_list = [ang, emsoft_h5ebsd, orix_hdf5]
@contextmanager
def replace_stdin(target):
orig = sys.stdin
sys.stdin = target
yield
sys.stdin = orig
def assert_dictionaries_are_equal(input_dict, output_dict):
for key in output_dict.keys():
output_value = output_dict[key]
input_value = input_dict[key]
if isinstance(output_value, (dict, OrderedDict)):
assert_dictionaries_are_equal(input_value, output_value)
else:
if isinstance(output_value, (np.ndarray, Number)):
assert np.allclose(input_value, output_value)
elif isinstance(output_value, Rotation):
assert np.allclose(input_value.to_euler(), output_value.to_euler())
elif isinstance(output_value, Phase):
assert_dictionaries_are_equal(
input_value.__dict__, output_value.__dict__
)
elif isinstance(output_value, PhaseList):
assert_dictionaries_are_equal(input_value._dict, output_value._dict)
elif isinstance(output_value, Structure):
assert np.allclose(output_value.xyz, input_value.xyz)
assert str(output_value.element) == str(input_value.element)
assert np.allclose(output_value.occupancy, input_value.occupancy)
else:
assert input_value == output_value
class TestGeneralIO:
def test_load_no_filename_match(self):
fname = "what_is_hip.ang"
with pytest.raises(IOError, match=f"No filename matches '{fname}'."):
_ = load(fname)
@pytest.mark.parametrize("temp_file_path", ["ctf"], indirect=["temp_file_path"])
def test_load_unsupported_format(self, temp_file_path):
np.savetxt(temp_file_path, X=np.random.rand(100, 8))
with pytest.raises(IOError, match=f"Could not read "):
_ = load(temp_file_path)
@pytest.mark.parametrize(
"top_group, expected_plugin",
[("Scan 1", emsoft_h5ebsd), ("crystal_map", orix_hdf5), ("Scan 2", None)],
)
def test_plugin_from_footprints(self, temp_file_path, top_group, expected_plugin):
with File(temp_file_path, mode="w") as f:
f.create_group(top_group)
assert (
_plugin_from_footprints(
temp_file_path, plugins=[emsoft_h5ebsd, orix_hdf5]
)
is expected_plugin
)
def test_overwrite_or_not(self, crystal_map, temp_file_path):
save(temp_file_path, crystal_map)
with pytest.warns(UserWarning, match="Not overwriting, since your terminal "):
_overwrite_or_not(temp_file_path)
@pytest.mark.parametrize(
"answer, expected", [("y", True), ("n", False), ("m", None)]
)
def test_overwrite_or_not_input(
self, crystal_map, temp_file_path, answer, expected
):
save(temp_file_path, crystal_map)
if answer == "m":
with replace_stdin(StringIO(answer)):
with pytest.raises(EOFError):
_overwrite_or_not(temp_file_path)
else:
with replace_stdin(StringIO(answer)):
assert _overwrite_or_not(temp_file_path) is expected
@pytest.mark.parametrize("temp_file_path", ["angs", "hdf4", "h6"])
def test_save_unsupported_raises(self, temp_file_path, crystal_map):
_, ext = os.path.splitext(temp_file_path)
with pytest.raises(IOError, match=f"'{ext}' does not correspond to any "):
save(temp_file_path, crystal_map)
def test_save_overwrite_raises(self, temp_file_path, crystal_map):
with pytest.raises(ValueError, match="`overwrite` parameter can only be "):
save(temp_file_path, crystal_map, overwrite=1)
@pytest.mark.parametrize(
"overwrite, expected_phase_name", [(True, "hepp"), (False, "")]
)
def test_save_overwrite(
self, temp_file_path, crystal_map, overwrite, expected_phase_name
):
assert crystal_map.phases[0].name == ""
save(temp_file_path, crystal_map)
assert os.path.isfile(temp_file_path) is True
crystal_map.phases[0].name = "hepp"
save(temp_file_path, crystal_map, overwrite=overwrite)
crystal_map2 = load(temp_file_path)
assert crystal_map2.phases[0].name == expected_phase_name
@pytest.mark.parametrize(
"angfile_astar, expected_data",
[
(
(
(2, 5),
(1, 1),
np.ones(2 * 5, dtype=int),
np.array(
[
[4.485496, 0.952426, 0.791507],
[1.343904, 0.276111, 0.825890],
[1.343904, 0.276111, 0.825890],
[1.343904, 0.276111, 0.825890],
[4.555309, 2.895152, 3.972020],
[1.361357, 0.276111, 0.825890],
[4.485496, 0.220784, 0.810182],
[0.959931, 2.369110, 4.058938],
[0.959931, 2.369110, 4.058938],
[4.485496, 0.220784, 0.810182],
],
),
),
np.array(
[
[0.77861956, -0.12501022, 0.44104243, 0.42849224],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.05331986, 0.95051048, 0.28534763, -0.11074093],
[0.45489991, -0.13271448, -0.03640618, -0.87984517],
[0.8752001, -0.02905178, 0.10626836, 0.47104969],
[0.3039118, 0.01972273, -0.92612154, 0.22259272],
[0.3039118, 0.01972273, -0.92612154, 0.22259272],
[0.8752001, -0.02905178, 0.10626836, 0.47104969],
]
),
),
],
indirect=["angfile_astar"],
)
def test_loadang(angfile_astar, expected_data):
loaded_data = loadang(angfile_astar)
assert np.allclose(loaded_data.data, expected_data)
def test_loadctf():
""" Crude test of the ctf loader """
z = np.random.rand(100, 8)
fname = "temp.ctf"
np.savetxt(fname, z)
_ = loadctf(fname)
os.remove(fname)
class TestAngPlugin:
@pytest.mark.parametrize(
"angfile_tsl, map_shape, step_sizes, phase_id, n_unknown_columns, example_rot",
[
(
# Read by angfile_tsl() via request.param (passed via `indirect` below)
(
(5, 3), # map_shape
(0.1, 0.1), # step_sizes
np.zeros(5 * 3, dtype=int), # phase_id
5, # n_unknown_columns
np.array(
[[1.59942, 2.37748, 4.53419], [1.59331, 2.37417, 4.53628]]
), # rotations as rows of Euler angle triplets
),
(5, 3),
(0.1, 0.1),
np.zeros(5 * 3, dtype=int),
5,
np.array(
[[1.59942, 2.37748, -1.74690], [1.59331, 2.37417, -1.74899]]
), # rotations as rows of Euler angle triplets
),
(
(
(8, 4), # map_shape
(1.5, 1.5), # step_sizes
np.zeros(8 * 4, dtype=int), # phase_id
5, # n_unknown_columns
np.array(
[[5.81107, 2.34188, 4.47345], [6.16205, 0.79936, 1.31702]]
), # rotations as rows of Euler angle triplets
),
(8, 4),
(1.5, 1.5),
np.zeros(8 * 4, dtype=int),
5,
np.array(
[[-0.12113, 2.34188, 1.31702], [-0.47211, 0.79936, -1.80973]]
), # rotations as rows of Euler angle triplets
),
],
indirect=["angfile_tsl"],
)
def test_load_ang_tsl(
self,
angfile_tsl,
map_shape,
step_sizes,
phase_id,
n_unknown_columns,
example_rot,
):
cm = load(angfile_tsl)
# Fraction of non-indexed points
non_indexed_fraction = int(np.prod(map_shape) * 0.1)
assert non_indexed_fraction == np.sum(~cm.is_indexed)
# Properties
assert list(cm.prop.keys()) == [
"iq",
"ci",
"unknown1",
"fit",
"unknown2",
"unknown3",
"unknown4",
"unknown5",
]
# Coordinates
ny, nx = map_shape
dy, dx = step_sizes
assert np.allclose(cm.x, np.tile(np.arange(nx) * dx, ny))
assert np.allclose(cm.y, np.sort(np.tile(np.arange(ny) * dy, nx)))
# Map shape and size
assert cm.shape == map_shape
assert cm.size == np.prod(map_shape)
# Attributes are within expected ranges or have a certain value
assert cm.prop["ci"].max() <= 1
assert cm["indexed"].fit.max() <= 3
assert all(cm["not_indexed"].fit == 180)
assert all(cm["not_indexed"].ci == -1)
# Phase IDs (accounting for non-indexed points)
phase_id[cm["not_indexed"].id] = -1
assert np.allclose(cm.phase_id, phase_id)
# Rotations
rot_unique = np.unique(cm["indexed"].rotations.to_euler(), axis=0)
assert np.allclose(
np.sort(rot_unique, axis=0), np.sort(example_rot, axis=0), atol=1e-5
)
assert np.allclose(
cm["not_indexed"].rotations.to_euler()[0],
| np.array([np.pi, 0, np.pi]) | numpy.array |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
from mindspore import Tensor, Parameter
from mindspore.nn import Cell
from mindspore.nn._graph_kernels import LambUpdateWithLR, LambNextMV
class LambNet(Cell):
def __init__(self, i2, i5, x6):
super(LambNet, self).__init__()
self.i2 = Parameter(i2, name='i2')
self.i5 = Parameter(i5, name='i5')
self.x6 = Parameter(x6, name='x6')
self.lamb_next = LambNextMV()
self.lamb_update = LambUpdateWithLR()
def construct(self, i1, i3, i4, i6, i7, i8, i9, ix0, ix1, ix2, ix3,
x1, x2, x3, x4, x5, gy, se, my):
i1_ = i1 + i3
return self.lamb_next(i1_, self.i2, i3, i4, self.i5, i6, i7, i8, i9, ix0,
ix1, ix2, ix3), \
self.lamb_update(x1, x2, x3, x4, x5, self.x6, gy, se, my)
def LambUpdateNumpy(x1, x2, x3, x4, x5, x6, gy, se, my):
trust_ratio = np.where(np.greater(x2, gy),
np.where(np.greater(x1, gy), np.divide(x2, x3), se),
se)
trust_ratio = np.maximum(np.minimum(trust_ratio, my), gy)
update_with_lr = trust_ratio * x4 * x5
next_param = x6 - np.reshape(update_with_lr, x6.shape)
return next_param
def LambNextMVNumpy(i1, i2, i3, i4, i5, i6, i7, i8, i9, x0, x1, x2, x3):
m_fp32 = i5.astype(np.float32)
v_fp32 = i2.astype(np.float32)
next_m = i8 * m_fp32 + i9 * i4
next_v = x0 * v_fp32 + x1 * i1
next_mm = next_m / i6
next_vv = next_v / i3
update = next_mm / (np.sqrt(next_vv) + x3)
add3 = next_mm / np.sqrt(next_vv + x3) + x2 * i7
return add3, next_m, next_v, update
def tensor_all(*args):
res = [Tensor(a) for a in args]
return res
def test_graph_kernel_lamb():
shape = [1, 16]
oshape = [1]
np.random.seed(0)
x1 = np.random.normal(0, 1, oshape).astype(np.float32)
x2 = np.random.normal(0, 1, oshape).astype(np.float32)
x3 = np.random.normal(0, 1, oshape).astype(np.float32)
x4 = np.random.normal(0, 1, oshape).astype(np.float32)
x5 = np.random.normal(0, 1, shape).astype(np.float32)
x6 = np.random.normal(0, 1, shape).astype(np.float32)
gy = np.random.normal(0, 1, oshape).astype(np.float32)
se = np.random.normal(0, 1, oshape).astype(np.float32)
my = np.random.normal(0, 1, oshape).astype(np.float32)
tx1, tx2, tx3, tx4, tx5, tx6, tgy, tse, tmy = tensor_all(
x1, x2, x3, x4, x5, x6, gy, se, my)
np.random.seed(1)
i1 = np.abs(np.random.normal(0, 1, shape)).astype(np.float32)
i2 = np.abs(np.random.normal(0, 1, shape)).astype(np.float32)
i3 = np.abs( | np.random.normal(0, 1, shape) | numpy.random.normal |
# This module contains some convenience function from vcs2vtk
from __future__ import division
import vcs
import vtk
import numpy
import json
import os
import math
from . import meshfill
from vtk.util import numpy_support as VN
import cdms2
import warnings
from .projection import round_projections, no_over_proj4_parameter_projections
from .vcsvtk import fillareautils
import sys
import numbers
DEBUG_MODE = False
def debugWriteGrid(grid, name):
if DEBUG_MODE:
writer = vtk.vtkXMLDataSetWriter()
gridType = grid.GetDataObjectType()
if (gridType == vtk.VTK_STRUCTURED_GRID):
ext = ".vts"
elif (gridType == vtk.VTK_UNSTRUCTURED_GRID):
ext = ".vtu"
elif (gridType == vtk.VTK_POLY_DATA):
ext = ".vtp"
else:
print("Unknown grid type: {0}".format(gridType))
ext = ".vtk"
writer.SetFileName(name + ext)
writer.SetInputData(grid)
writer.Write()
def debugMsg(msg):
if DEBUG_MODE:
print(msg)
f = open(os.path.join(vcs.vcs_egg_path, "wmo_symbols.json"))
wmo = json.load(f)
projNames = [
"linear",
"utm",
"state",
"aea",
"lcc",
"merc",
"stere",
"poly",
"eqdc",
"tmerc",
"stere",
"laea",
"azi",
"gnom",
"ortho",
"vertnearper",
"sinu",
"eqc",
"mill",
"vandg",
"omerc",
"robin",
"somerc",
"alsk",
"goode",
"moll",
"imoll",
"hammer",
"wag4",
"wag7",
"oea"]
projDict = {"polar stereographic": "stere",
-3: "aeqd",
}
for i in range(len(projNames)):
projDict[i] = projNames[i]
def computeDrawAreaBounds(bounds, flipX=False, flipY=False):
if flipX:
lowerLeftX = bounds[1]
width = bounds[0] - bounds[1]
else:
lowerLeftX = bounds[0]
width = bounds[1] - bounds[0]
if flipY:
lowerLeftY = bounds[3]
height = bounds[2] - bounds[3]
else:
lowerLeftY = bounds[2]
height = bounds[3] - bounds[2]
return vtk.vtkRectd(lowerLeftX, lowerLeftY, width, height)
def adjustBounds(bounds, xScale, yScale):
# Scale the bounds, keeping the same center point
centerX = (bounds[1] + bounds[0]) / 2.0
centerY = (bounds[3] + bounds[2]) / 2.0
width = bounds[1] - bounds[0]
height = bounds[3] - bounds[2]
newWidth = width * xScale
newHeight = height * yScale
newBounds = [bounds[i] for i in range(len(bounds))]
halfWidth = newWidth / 2.0
halfHeight = newHeight / 2.0
newBounds[0] = centerX - halfWidth
newBounds[1] = centerX + halfWidth
newBounds[2] = centerY - halfHeight
newBounds[3] = centerY + halfHeight
return newBounds
def configureContextArea(area, dataBounds, screenGeom):
area.SetDrawAreaBounds(dataBounds)
area.SetGeometry(screenGeom)
area.SetFillViewport(False)
area.SetShowGrid(False)
axisLeft = area.GetAxis(vtk.vtkAxis.LEFT)
axisRight = area.GetAxis(vtk.vtkAxis.RIGHT)
axisBottom = area.GetAxis(vtk.vtkAxis.BOTTOM)
axisTop = area.GetAxis(vtk.vtkAxis.TOP)
axisLeft.SetVisible(False)
axisRight.SetVisible(False)
axisBottom.SetVisible(False)
axisTop.SetVisible(False)
axisLeft.SetMargins(0, 0)
axisRight.SetMargins(0, 0)
axisBottom.SetMargins(0, 0)
axisTop.SetMargins(0, 0)
def growBounds(previousBounds, newBounds):
nextBounds = [i for i in previousBounds]
if newBounds[0] < previousBounds[0]:
nextBounds[0] = newBounds[0]
if newBounds[1] > previousBounds[1]:
nextBounds[1] = newBounds[1]
if newBounds[2] < previousBounds[2]:
nextBounds[2] = newBounds[2]
if newBounds[3] > previousBounds[3]:
nextBounds[3] = newBounds[3]
return nextBounds
def applyTransformationToDataset(T, data):
vectors = data.GetPointData().GetVectors()
data.GetPointData().SetActiveVectors(None)
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(data)
transformFilter.SetTransform(T)
transformFilter.Update()
outputData = transformFilter.GetOutput()
data.GetPointData().SetVectors(vectors)
outputData.GetPointData().SetVectors(vectors)
return outputData
def generateSolidColorArray(numTuples, color):
np_colors = numpy.empty([numTuples, 4])
np_colors[:, 0] = color[0]
np_colors[:, 1] = color[1]
np_colors[:, 2] = color[2]
np_colors[:, 3] = color[3]
return VN.numpy_to_vtk(num_array=np_colors,
deep=True,
array_type=vtk.VTK_UNSIGNED_CHAR)
def applyAttributesFromVCStmpl(tmpl, tmplattribute, txtobj=None):
tatt = getattr(tmpl, tmplattribute)
if txtobj is None:
txtobj = vcs.createtext(
To_source=tatt.textorientation,
Tt_source=tatt.texttable)
for att in ["x", "y", "priority"]:
setattr(txtobj, att, getattr(tatt, att))
return txtobj
def numpy_to_vtk_wrapper(numpyArray, deep=False, array_type=None):
result = VN.numpy_to_vtk(numpyArray, deep, array_type)
# Prevent garbage collection on shallow copied data:
if not deep:
result.numpyArray = numpyArray
return result
# Adds 'array' to 'grid' as cell or point attribute based on 'isCellData'
# It also sets it as the active scalar if 'isScalars'.
# If the grid has pedigree ids (it was wrapped) we use them to set the array.
def setArray(grid, array, arrayName, isCellData, isScalars):
attributes = grid.GetCellData() if isCellData else grid.GetPointData()
pedigreeId = attributes.GetPedigreeIds()
if (pedigreeId):
vtkarray = attributes.GetArray(arrayName)
for i in range(0, vtkarray.GetNumberOfTuples()):
vtkarray.SetValue(i, array[pedigreeId.GetValue(i)])
else:
vtkarray = numpy_to_vtk_wrapper(array, deep=False)
vtkarray.SetName(arrayName)
attributes.AddArray(vtkarray)
if (isScalars):
attributes.SetActiveScalars(arrayName)
def putMaskOnVTKGrid(data, grid, actorColor=None, cellData=True, deep=True):
msk = data.mask
mapper = None
if msk is not numpy.ma.nomask and not numpy.allclose(msk, False):
if actorColor is not None:
flatIMask = msk.astype(numpy.double).flat
grid2 = grid.NewInstance()
if grid.IsA("vtkStructuredGrid"):
vtkmask = numpy_to_vtk_wrapper(flatIMask, deep=deep, array_type=vtk.VTK_DOUBLE)
attributes2 = grid2.GetCellData() if cellData else grid2.GetPointData()
else:
if (cellData):
attributes2 = grid2.GetCellData()
attributes = grid.GetCellData()
else:
attributes2 = grid2.GetPointData()
attributes = grid.GetPointData()
if (attributes.GetPedigreeIds()):
attributes2.SetPedigreeIds(attributes.GetPedigreeIds())
pedigreeId = attributes2.GetPedigreeIds()
vtkmask = vtk.vtkDoubleArray()
vtkmask.SetNumberOfTuples(attributes2.GetPedigreeIds().GetNumberOfTuples())
for i in range(0, vtkmask.GetNumberOfTuples()):
vtkmask.SetValue(i, flatIMask[pedigreeId.GetValue(i)])
else:
# the unstructured grid is not wrapped
vtkmask = numpy_to_vtk_wrapper(flatIMask, deep=deep, array_type=vtk.VTK_DOUBLE)
vtkmask.SetName("scalar")
attributes2.RemoveArray(vtk.vtkDataSetAttributes.GhostArrayName())
attributes2.SetScalars(vtkmask)
grid2.CopyStructure(grid)
geoFilter = vtk.vtkDataSetSurfaceFilter()
lut = vtk.vtkLookupTable()
r, g, b, a = actorColor
geoFilter.SetInputData(grid2)
if not cellData:
pointToCell = vtk.vtkPointDataToCellData()
pointToCell.SetInputConnection(geoFilter.GetOutputPort())
geoFilter = pointToCell
lut.SetNumberOfTableValues(256)
lut.SetTableValue(0, 1., 1., 1., 0.)
for i in range(1, 256):
lut.SetTableValue(i, r / 100., g / 100., b / 100., a / 100.)
else:
lut.SetNumberOfTableValues(2)
lut.SetTableValue(0, r / 100., g / 100., b / 100., 0.)
lut.SetTableValue(1, r / 100., g / 100., b / 100., a / 100.)
geoFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(geoFilter.GetOutputPort())
mapper.SetLookupTable(lut)
mapper.SetScalarRange(0, 1)
if cellData:
mapper.SetScalarModeToUseCellData()
# The ghost array now stores information about hidden (blanked)
# points/cells. Setting an array entry to the bitwise value
# `vtkDataSetAttributes.HIDDEN(CELL|POINT)` will blank the cell/point.
flatMask = msk.flat
ghost = numpy.zeros(len(flatMask), dtype=numpy.uint8)
invalidMaskValue = vtk.vtkDataSetAttributes.HIDDENCELL if cellData else \
vtk.vtkDataSetAttributes.HIDDENPOINT
for i, isInvalid in enumerate(flatMask):
if isInvalid:
ghost[i] = invalidMaskValue
attributes = grid.GetCellData() if cellData else grid.GetPointData()
pedigreeIds = attributes.GetPedigreeIds()
if (pedigreeIds):
# we need to create the ghost array because setArray does not create it in this case
vtkghost = vtk.vtkUnsignedCharArray()
vtkghost.SetNumberOfTuples(pedigreeIds.GetNumberOfTuples())
vtkghost.SetName(vtk.vtkDataSetAttributes.GhostArrayName())
attributes.AddArray(vtkghost)
setArray(grid, ghost, vtk.vtkDataSetAttributes.GhostArrayName(),
cellData, isScalars=False)
if (grid.GetExtentType() == vtk.VTK_PIECES_EXTENT):
removeHiddenPointsOrCells(grid, celldata=cellData)
return mapper
def getBoundsList(axis, hasCellData, dualGrid):
'''
Returns the bounds list for 'axis'. If axis has n elements the
bounds list will have n+1 elements
If there are not explicit bounds in the file we return None
'''
needsCellData = (hasCellData != dualGrid)
axisBounds = axis.getBoundsForDualGrid(dualGrid)
# we still have to generate bounds for non lon-lat axes, because
# the default in axis.py is 2 (generate bounds only for lat/lon axis)
# this is used for non lon-lat plots - by default numpy arrays are POINT data
if (not axis.isLatitude() and not axis.isLongitude() and needsCellData):
axisBounds = axis.genGenericBounds()
if (axisBounds is not None):
bounds = numpy.zeros(len(axis) + 1)
if (axis[0] < axis[-1]):
# axis is increasing
if (axisBounds[0][0] < axisBounds[0][1]):
# interval is increasing
bounds[:len(axis)] = axisBounds[:, 0]
bounds[len(axis)] = axisBounds[-1, 1]
else:
# interval is decreasing
bounds[:len(axis)] = axisBounds[:, 1]
bounds[len(axis)] = axisBounds[-1, 0]
else:
# axis is decreasing
if (axisBounds[0][0] < axisBounds[0][1]):
# interval is increasing
bounds[:len(axis)] = axisBounds[:, 1]
bounds[len(axis)] = axisBounds[-1, 0]
else:
# interval is decreasing
bounds[:len(axis)] = axisBounds[:, 0]
bounds[len(axis)] = axisBounds[-1, 1]
return bounds
else:
return None
def setInfToValid(geoPoints, ghost):
'''
Set infinity points to a point that already exists in the list.
We also hide infinity points in the ghost array.
We return true if any points are infinity
'''
anyInfinity = False
validPoint = [0, 0, 0]
for i in range(geoPoints.GetNumberOfPoints()):
point = geoPoints.GetPoint(i)
if (not math.isinf(point[0]) and not math.isinf(point[1])):
validPoint[0] = point[0]
validPoint[1] = point[1]
break
for i in range(geoPoints.GetNumberOfPoints()):
point = geoPoints.GetPoint(i)
if (math.isinf(point[0]) or math.isinf(point[1])):
anyInfinity = True
newPoint = list(point)
if (math.isinf(point[0])):
newPoint[0] = validPoint[0]
if (math.isinf(point[1])):
newPoint[1] = validPoint[1]
geoPoints.SetPoint(i, newPoint)
ghost.SetValue(i, vtk.vtkDataSetAttributes.HIDDENPOINT)
return anyInfinity
def removeHiddenPointsOrCells(grid, celldata=False):
"""Remove hidden points or cells from the input VTK polydata.
Note that, at a time, this method removes only one hidden entity - either
points or cells from the input dataset. To remove both, hidden points and
cells, call the function twice, toggling the celldata flag for each call.
Keyword arguments:
grid -- The input dataset
celldata -- If True, this method will remove cells, else points
"""
# Since this method involves deleting points or cells from the polydata, the
# first step is to build "upward" links from points to cells.
grid.BuildLinks()
ghost = grid.GetCellGhostArray() if celldata else grid.GetPointGhostArray()
if (not ghost):
return
minScalar = sys.float_info.max
minVector = [0, 0, 0]
minVectorNorm = sys.float_info.max
num = grid.GetNumberOfCells() if celldata else grid.GetNumberOfPoints()
hidden = vtk.vtkDataSetAttributes.HIDDENCELL if celldata else vtk.vtkDataSetAttributes.HIDDENPOINT
if not celldata:
scalars = grid.GetPointData().GetScalars()
vectors = grid.GetPointData().GetVectors()
else:
scalars = grid.GetCellData().GetScalars()
vectors = grid.GetCellData().GetVectors()
if (scalars or vectors):
vector = [0, 0, 0]
for i in range(num):
if (not (ghost.GetValue(i) & hidden)):
if (scalars):
scalar = scalars.GetValue(i)
if (scalar < minScalar):
minScalar = scalar
if (vectors):
vectors.GetTypedTuple(i, vector)
vectorNorm = numpy.linalg.norm(vector)
if (vectorNorm < minVectorNorm):
minVector = vector
minVectorNorm = vectorNorm
hiddenScalars = False
hiddenVectors = False
for i in range(num):
if (ghost.GetValue(i) & hidden):
if not celldata:
cells = vtk.vtkIdList()
# point hidden, remove all cells used by this point
grid.GetPointCells(i, cells)
for j in range(cells.GetNumberOfIds()):
grid.DeleteCell(cells.GetId(j))
# hidden points are not removed. This causes problems
# because it changes the scalar range.
if(scalars):
hiddenScalars = True
scalars.SetValue(i, minScalar)
if(vectors):
hiddenVectors = True
vectors.SetTypedTuple(i, minVector)
else:
grid.DeleteCell(i)
# SetValue does not call modified - we'll have to call it after all calls.
if (hiddenScalars):
scalars.Modified()
if (hiddenVectors):
vectors.Modified()
# ensure that GLOBALIDS are copied
attributes = grid.GetCellData()
attributes.SetActiveAttribute(-1, attributes.GLOBALIDS)
grid.RemoveDeletedCells()
# set GLOBALIDS attribute
attributes = grid.GetCellData()
globalIdsIndex = vtk.mutable(-1)
attributes.GetArray("GlobalIds", globalIdsIndex)
attributes.SetActiveAttribute(globalIdsIndex, attributes.GLOBALIDS)
def genGrid(data1, data2, gm, deep=True, grid=None, geo=None, genVectors=False,
dualGrid=False):
continents = False
wrap = None
m3 = None
g = None
cellData = True
xm, xM, ym, yM = None, None, None, None
projection = vcs.elements["projection"][gm.projection]
try: # First try to see if we can get a mesh out of this
g = data1.getGrid()
# Ok need unstructured grid
if isinstance(g, cdms2.gengrid.AbstractGenericGrid):
continents = True
wrap = [0., 360.]
if grid is None:
m = g.getMesh()
xm = m[:, 1].min()
xM = m[:, 1].max()
ym = m[:, 0].min()
yM = m[:, 0].max()
numberOfCells = m.shape[0]
# For vtk we need to reorder things
m2 = numpy.ascontiguousarray(numpy.transpose(m, (0, 2, 1)))
m2.resize((m2.shape[0] * m2.shape[1], m2.shape[2]))
m2 = m2[..., ::-1]
# here we add dummy levels, might want to reconsider converting
# "trimData" to "reOrderData" and use actual levels?
m3 = numpy.concatenate(
(m2, numpy.zeros(
(m2.shape[0], 1))), axis=1)
# Could still be meshfill with mesh data
# Ok probably should do a test for hgrid before sending data2
if isinstance(gm, meshfill.Gfm) and data2 is not None:
wrap = gm.wrap
if gm.wrap[1] == 360.:
continents = True
if grid is None:
xm = data2[:, 1].min()
xM = data2[:, 1].max()
ym = data2[:, 0].min()
yM = data2[:, 0].max()
numberOfCells = data2.shape[0]
data2 = data2.filled(numpy.nan)
m2 = numpy.ascontiguousarray(numpy.transpose(data2, (0, 2, 1)))
nVertices = m2.shape[-2]
m2.resize((m2.shape[0] * m2.shape[1], m2.shape[2]))
m2 = m2[..., ::-1]
# here we add dummy levels, might want to reconsider converting
# "trimData" to "reOrderData" and use actual levels?
m3 = numpy.concatenate(
(m2, numpy.zeros(
(m2.shape[0], 1))), axis=1)
except Exception: # Ok no mesh on file, will do with lat/lon
pass
if m3 is not None:
# Create unstructured grid points
vg = vtk.vtkUnstructuredGrid()
for i in range(numberOfCells):
pt_ids = []
for j in range(nVertices):
indx = i * nVertices + j
if not numpy.isnan(m3[indx][0]): # missing value means skip vertex
pt_ids.append(indx)
vg.InsertNextCell(vtk.VTK_POLYGON,
len(pt_ids),
pt_ids)
else:
# Ok a simple structured grid is enough
if grid is None:
vg = vtk.vtkStructuredGrid()
hasCellData = data1.hasCellData()
if g is not None:
# Ok we have grid
continents = True
wrap = [0., 360.]
if grid is None:
lat = g.getLatitude()
lon = g.getLongitude()
if isinstance(g, cdms2.hgrid.AbstractCurveGrid):
# Ok in this case it is a structured grid we
# have no bounds, using ponitData
# ??? We should probably revist and see if we
# can use the "bounds" attribute
# to generate cell instead of points
cellData = False
elif grid is None:
lon = data1.getAxis(-1)
lat = data1.getAxis(-2)
# Ok let's try to get the bounds
lon2 = getBoundsList(lon, hasCellData, dualGrid)
lat2 = getBoundsList(lat, hasCellData, dualGrid)
if (lon2 is not None and lat2 is not None):
lon3 = lon2
lat3 = lat2
else:
lon3 = lon
lat3 = lat
cellData = False
# Note that m,M is min,max for an increasing list
# and max,min for a decreasing list
xm = lon3[0]
xM = lon3[-1]
ym = lat3[0]
yM = lat3[-1]
lat = lat3[:, numpy.newaxis] * numpy.ones(lon3.shape)[numpy.newaxis, :]
lon = lon3[numpy.newaxis, :] * numpy.ones(lat3.shape)[:, numpy.newaxis]
elif grid is None:
# No grid info from data, making one up
data1 = cdms2.asVariable(data1)
lon = data1.getAxis(-1)
lat = data1.getAxis(-2)
# Ok let's try to get the bounds
lon2 = getBoundsList(lon, hasCellData, dualGrid)
lat2 = getBoundsList(lat, hasCellData, dualGrid)
if (lon2 is not None and lat2 is not None):
lon3 = lon2
lat3 = lat2
else:
lon3 = lon
lat3 = lat
cellData = False
# Note that m,M is min,max for an increasing list
# and max,min for a decreasing list
xm = lon3[0]
xM = lon3[-1]
ym = lat3[0]
yM = lat3[-1]
lat = lat3[:, numpy.newaxis] * \
numpy.ones(lon3.shape)[numpy.newaxis, :]
lon = lon3[numpy.newaxis, :] * \
numpy.ones(lat3.shape)[:, numpy.newaxis]
if grid is None:
vg.SetDimensions(lat.shape[1], lat.shape[0], 1)
lon = numpy.ma.ravel(lon)
lat = numpy.ma.ravel(lat)
sh = list(lat.shape)
sh.append(1)
lon = numpy.ma.reshape(lon, sh)
lat = numpy.ma.reshape(lat, sh)
z = numpy.zeros(lon.shape)
m3 = numpy.concatenate((lon, lat), axis=1)
m3 = numpy.concatenate((m3, z), axis=1)
if xm is None:
try:
xm = lon[0]
xM = lon[-1]
ym = lat[0]
yM = lat[-1]
except Exception:
xm = lon.min()
xM = lon.max()
ym = lat.min()
yM = lat.max()
# attribute data
gridForAttribute = grid if grid else vg
if genVectors:
attribute = generateVectorArray(data1, data2, gridForAttribute)
else:
attribute = numpy_to_vtk_wrapper(data1.filled(0.).flat,
deep=False)
attribute.SetName("scalar")
if cellData:
attributes = gridForAttribute.GetCellData()
else:
attributes = gridForAttribute.GetPointData()
if genVectors:
attributes.SetVectors(attribute)
else:
attributes.SetScalars(attribute)
if grid is None:
# First create the points/vertices (in vcs terms)
pts = vtk.vtkPoints()
# Convert nupmy array to vtk ones
ppV = numpy_to_vtk_wrapper(m3, deep=deep)
pts.SetData(ppV)
ptsBounds = pts.GetBounds()
xRange = ptsBounds[1] - ptsBounds[0]
xm, xM, ym, yM, tmp, tmp2 = pts.GetBounds()
# We use the zooming feature for linear and polar projections
# We use plotting coordinates for doing the projection
# such that parameters such that central meridian are set correctly
if (gm.g_name == 'Gfm'):
# axes are not lon/lat for meshfill
wc = [gm.datawc_x1, gm.datawc_x2, gm.datawc_y1, gm.datawc_y2]
else:
wc = vcs.utils.getworldcoordinates(gm,
data1.getAxis(-1),
data1.getAxis(-2))
vg.SetPoints(pts)
# index into the scalar array. Used for upgrading
# the scalar after wrapping. Note this will work
# correctly only for cell data. For point data
# the indexes for points on the border will be incorrect after
# wrapping
pedigreeId = vtk.vtkIntArray()
pedigreeId.SetName("PedigreeIds")
pedigreeId.SetNumberOfTuples(attribute.GetNumberOfTuples())
for i in range(0, attribute.GetNumberOfTuples()):
pedigreeId.SetValue(i, i)
if cellData:
vg.GetCellData().SetPedigreeIds(pedigreeId)
else:
vg.GetPointData().SetPedigreeIds(pedigreeId)
if (isinstance(g, cdms2.hgrid.TransientCurveGrid) and
xRange > 360 and not numpy.isclose(xRange, 360)):
vg = wrapDataSetX(vg)
pts = vg.GetPoints()
xm, xM, ym, yM, tmp, tmp2 = vg.GetPoints().GetBounds()
debugMsg('did wrapDataSetX, [xm, xM, ym, yM] = [{0}, {1}, {2}, {3}]'.format(xm, xM, ym, yM))
vg = doWrapData(vg, wc, wrap)
pts = vg.GetPoints()
xm, xM, ym, yM, tmp, tmp2 = vg.GetPoints().GetBounds()
debugMsg('did doWrapData, [xm, xM, ym, yM] = [{0}, {1}, {2}, {3}]'.format(xm, xM, ym, yM))
projection = vcs.elements["projection"][gm.projection]
vg.SetPoints(pts)
wrb = getWrappedBounds(wc, [xm, xM, ym, yM], wrap)
debugMsg('wrapped bounds = [xm, xM, ym, yM] = [{0}, {1}, {2}, {3}]'.format(xm, xM, ym, yM))
geo, geopts = project(pts, projection, wrb)
# proj4 returns inf for points that are not visible. Set those to a valid point
# and hide them.
ghost = vg.AllocatePointGhostArray()
if (setInfToValid(geopts, ghost)):
# if there are hidden points, we recompute the bounds
xm = ym = sys.float_info.max
xM = yM = - sys.float_info.max
for i in range(pts.GetNumberOfPoints()):
if (ghost.GetValue(i) & vtk.vtkDataSetAttributes.HIDDENPOINT == 0):
# point not hidden
p = pts.GetPoint(i)
if (p[0] < xm):
xm = p[0]
if (p[0] > xM):
xM = p[0]
if (p[1] < ym):
ym = p[1]
if (p[1] > yM):
yM = p[1]
debugMsg('bounds after removing infs = [xm, xM, ym, yM] = [{0}, {1}, {2}, {3}]'.format(xm, xM, ym, yM))
# hidden point don't work for polys or unstructured grids.
# We remove the cells in this case.
if (vg.GetExtentType() == vtk.VTK_PIECES_EXTENT):
removeHiddenPointsOrCells(vg, celldata=False)
# Sets the vertics into the grid
vg.SetPoints(geopts)
else:
xm, xM, ym, yM, tmp, tmp2 = grid.GetPoints().GetBounds()
vg = grid
# Add a GlobalIds array to keep track of cell ids throughout the pipeline
globalIds = numpy_to_vtk_wrapper(numpy.arange(0, vg.GetNumberOfCells()), deep=True)
globalIds.SetName('GlobalIds')
vg.GetCellData().SetGlobalIds(globalIds)
out = {"vtk_backend_grid": vg,
"xm": xm,
"xM": xM,
"ym": ym,
"yM": yM,
"continents": continents,
"wrap": wrap,
"geo": geo,
"cellData": cellData,
"data": data1,
"data2": data2
}
return out
# Continents first
# Try to save time and memorize these continents
vcsContinents = {}
def prepContinents(fnm, xConvertFunction=lambda x: x, yConvertFunction=lambda y: y):
""" This converts vcs continents files to vtkpolydata
Author: <NAME>
Input: vcs continent file name
"""
poly = vtk.vtkPolyData()
cells = vtk.vtkCellArray()
pts = vtk.vtkPoints()
f = open(fnm)
ln = f.readline()
while ln.strip().split() != ["-99", "-99"]:
# Many lines, need to know number of points
N = int(ln.split()[0])
# Now create and store these points
n = 0
npts = pts.GetNumberOfPoints()
while n < N:
ln = str(f.readline())
sp = ln.split()
sn = len(sp)
didIt = False
if sn % 2 == 0:
try:
spts = []
for i in range(sn // 2):
l, L = float(sp[i * 2]), float(sp[i * 2 + 1])
spts.append([l, L])
for p in spts:
x = xConvertFunction(p[1])
y = yConvertFunction(p[0])
pts.InsertNextPoint(x, y, 0.)
n += sn
didIt = True
except Exception:
didIt = False
if didIt is False:
while len(ln) > 2:
l, L = float(ln[:8]), float(ln[8:16])
x = xConvertFunction(L)
y = yConvertFunction(l)
pts.InsertNextPoint(x, y, 0.)
ln = ln[16:]
n += 2
ln = vtk.vtkPolyLine()
ln.GetPointIds().SetNumberOfIds(N // 2)
for i in range(N // 2):
ln.GetPointIds().SetId(i, i + npts)
cells.InsertNextCell(ln)
ln = f.readline()
poly.SetPoints(pts)
poly.SetLines(cells)
# The dataset has some duplicate lines that extend
# outside of x=[-180, 180],
# which will cause wrapping artifacts for
# certain projections (e.g.
# Robinson). Clip out the duplicate data:
box = vtk.vtkBox()
box.SetXMin(-180., -90., 0.)
box.SetXMax(180., 90., 1.)
clipper = vtk.vtkClipPolyData()
clipper.SetInputData(poly)
clipper.InsideOutOn()
clipper.SetClipFunction(box)
clipper.Update()
poly = clipper.GetOutput()
return poly
def apply_proj_parameters(pd, projection, x1, x2, y1, y2):
pname = projDict.get(projection._type, projection.type)
projName = pname
pd.SetName(projName)
if projection.type == "polar (non gctp)":
if (y1 < y2):
pd.SetOptionalParameter("lat_0", "-90.")
pd.SetCentralMeridian(x1)
else:
pd.SetOptionalParameter("lat_0", "90.")
pd.SetCentralMeridian(x1 + 180.)
else:
if projection.type not in no_over_proj4_parameter_projections:
pd.SetOptionalParameter("over", "true")
else:
pd.SetOptionalParameter("over", "false")
setProjectionParameters(pd, projection)
if (hasattr(projection, 'centralmeridian')):
if (numpy.allclose(projection.centralmeridian, 1e+20)):
centralmeridian = float(x1 + x2) / 2.0
else:
centralmeridian = projection.centralmeridian
pd.SetCentralMeridian(centralmeridian)
if (hasattr(projection, 'centerlongitude')):
if (numpy.allclose(projection.centerlongitude, 1e+20)):
centerlongitude = float(x1 + x2) / 2.0
else:
centerlongitude = projection.centerlongitude
pd.SetCentralMeridian(centerlongitude)
if (hasattr(projection, 'originlatitude')):
if (numpy.allclose(projection.originlatitude, 1e+20)):
originlatitude = float(y1 + y2) / 2.0
else:
originlatitude = projection.originlatitude
pd.SetOptionalParameter("lat_0", str(originlatitude))
if (hasattr(projection, 'centerlatitude')):
if (numpy.allclose(projection.centerlatitude, 1e+20)):
centerlatitude = float(y1 + y2) / 2.0
else:
centerlatitude = projection.centerlatitude
pd.SetOptionalParameter("lat_0", str(centerlatitude))
if (hasattr(projection, 'standardparallel1')):
if (numpy.allclose(projection.standardparallel1, 1.e20)):
standardparallel1 = min(y1, y2)
else:
standardparallel1 = projection.standardparallel1
pd.SetOptionalParameter('lat_1', str(standardparallel1))
if (hasattr(projection, 'standardparallel2')):
if (numpy.allclose(projection.standardparallel2, 1.e20)):
standardparallel2 = max(y1, y2)
else:
standardparallel2 = projection.standardparallel2
pd.SetOptionalParameter('lat_2', str(standardparallel2))
def projectArray(w, projection, wc, geo=None):
x1, x2, y1, y2 = wc
if isinstance(projection, str):
projection = vcs.elements["projection"][projection]
if projection.type == "linear":
return None, w
if geo is None:
geo = vtk.vtkGeoTransform()
ps = vtk.vtkGeoProjection()
pd = vtk.vtkGeoProjection()
apply_proj_parameters(pd, projection, x1, x2, y1, y2)
geo.SetSourceProjection(ps)
geo.SetDestinationProjection(pd)
for i in range(0, w.GetNumberOfTuples()):
tuple = [0, 0, 0]
w.GetTypedTuple(i, tuple)
geo.TransformPoint(tuple, tuple)
w.SetTypedTuple(i, tuple)
# Geo projection
def project(pts, projection, wc, geo=None):
x1, x2, y1, y2 = wc
if isinstance(projection, str):
projection = vcs.elements["projection"][projection]
if projection.type == "linear":
return None, pts
if geo is None:
geo = vtk.vtkGeoTransform()
ps = vtk.vtkGeoProjection()
pd = vtk.vtkGeoProjection()
apply_proj_parameters(pd, projection, x1, x2, y1, y2)
geo.SetSourceProjection(ps)
geo.SetDestinationProjection(pd)
geopts = vtk.vtkPoints()
geo.TransformPoints(pts, geopts)
return geo, geopts
def setProjectionParameters(pd, proj):
if proj._type > 200:
proj4 = proj.parameters
if numpy.allclose(proj4, 1.e20):
proj4 = {}
else:
p = proj.parameters
proj4 = {}
if proj._type in [3, 4]:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["lat_1"] = proj.standardparallel1
proj4["lat_2"] = proj.standardparallel2
proj4["lon_0"] = proj.centralmeridian
proj4["lat_0"] = proj.originlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type == 5:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["lon_0"] = proj.centralmeridian
proj4["lat_ts"] = proj.truescale
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type == 6:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["lon_wrap"] = proj.centerlongitude # MAP NAME ?????
proj4["lat_ts"] = proj.truescale
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type == 7:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["lon_0"] = proj.centralmeridian
proj4["lat_0"] = proj.originlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type == 8:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["lon_0"] = proj.centralmeridian
proj4["lat_0"] = proj.originlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
if (p[8] == 0 or p[8] > 9.9E19):
proj4["subtype"] = proj.subtype = 0
proj4["lat_1"] = proj.standardparallel # MAP NAME ?????
proj4["lat_2"] = proj.standardparallel # MAP NAME ?????
proj4["lat_ts"] = proj.standardparallel # MAP NAME ?????
else:
proj4["subtype"] = proj.subtype = 1
proj4["lat_1"] = proj.standardparallel1
proj4["lat_2"] = proj.standardparallel2
elif proj._type == 9:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["k_0"] = proj.factor
proj4["lon_0"] = proj.centralmeridian
proj4["lat_0"] = proj.originlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type in [10, 11, 12, 13, 14]:
proj4["a"] = proj.sphere # MAP NAME ?????
proj4["b"] = proj.sphere # MAP NAME ?????
proj4["lon_0"] = proj.centerlongitude
proj4["lat_0"] = proj.centerlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type == 15:
proj4["a"] = proj.sphere # MAP NAME ?????
proj4["b"] = proj.sphere # MAP NAME ?????
proj4["height"] = proj.height # MAP NAME ?????
proj4["lon_0"] = proj.centerlongitude
proj4["lat_0"] = proj.centerlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type in [16, 18, 21, 25, 27, 28, 29]:
proj4["a"] = proj.sphere # MAP NAME ?????
proj4["b"] = proj.sphere # MAP NAME ?????
proj4["lon_0"] = proj.centralmeridian
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type == 17:
proj4["a"] = proj.sphere
proj4["b"] = proj.sphere
proj4["lon_0"] = proj.centralmeridian
proj4["lat_ts"] = proj.truescale
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type == 19:
proj4["a"] = proj.sphere # MAP NAME ?????
proj4["b"] = proj.sphere # MAP NAME ?????
proj4["lon_0"] = proj.centralmeridian
proj4["lat_0"] = proj.originlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type == 20:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["k_0"] = proj.factor
proj4["lat_0"] = proj.originlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
if (p[12] == 0 or p[12] > 9.9E19):
proj4["subtype"] = proj.subtype
proj4["lon_0"] = proj.longitude1 # MAP NAME ?????
proj4["lat_1"] = proj.latitude1
proj4["lonc"] = proj.longitude2 # MAP NAME ?????
proj4["lat_2"] = proj.latitude2
else:
proj4["subtype"] = proj.subtype
proj4["azi"] = proj.azimuthalangle # MAP NAME ?????
proj4["lon_0"] = proj.azimuthallongitude # MAP NAME ?????
elif proj._type == 22:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
if (p[12] == 0 or p[12] > 9.9E19):
proj4["subtype"] = proj.subtype
proj4["???"] = proj.orbitinclination # MAP NAME ?????
proj4["???"] = proj.orbitlongitude # MAP NAME ?????
proj4["???"] = proj.satelliterevolutionperiod # MAP NAME ?????
proj4["???"] = proj.landsatcompensationratio # MAP NAME ?????
proj4["???"] = proj.pathflag # MAP NAME ?????
else:
proj4["subtype"] = proj.subtype
proj4["???"] = proj.satellite # MAP NAME ?????
proj4["???"] = proj.path # MAP NAME ?????
elif proj._type == 23:
proj4["a"] = proj.smajor
proj4["b"] = proj.sminor
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
elif proj._type in [24, 26]:
proj4["a"] = proj.sphere # MAP NAME ?????
proj4["b"] = proj.sphere # MAP NAME ?????
elif proj._type == 30:
proj4["a"] = proj.sphere # MAP NAME ?????
proj4["b"] = proj.sphere # MAP NAME ?????
proj4["???"] = proj.shapem # MAP NAME ?????
proj4["???"] = proj.shapen # MAP NAME ?????
proj4["lon_0"] = proj.centerlongitude
proj4["lat_0"] = proj.centerlatitude
proj4["x_0"] = proj.falseeasting
proj4["y_0"] = proj.falsenorthing
if proj._type == 6:
pd.SetOptionalParameter("lat_0", "90")
for k in proj4:
if not numpy.allclose(proj4[k], 1.e20):
if k == "lon_0":
pd.SetCentralMeridian(proj4[k])
elif k != "???":
pd.SetOptionalParameter(k, str(proj4[k]))
# Vtk dump
dumps = {}
def dump2VTK(obj, fnm=None):
global dumps
if fnm is None:
fnm = "foo.vtk" % dumps
if fnm[:-4].lower() != ".vtk":
fnm += ".vtk"
if fnm in dumps:
dumps[fnm] += 1
fnm = fnm[:-4] + "%.3i.vtk" % dumps[fnm]
else:
dumps[fnm] = 0
dsw = vtk.vtkDataSetWriter()
dsw.SetFileName(fnm)
try:
dsw.SetInputData(obj)
except Exception:
dsw.SetInputConnection(obj.GetOutputPort())
dsw.Write()
def doWrapData(data, wc, wrap=[0., 360], fastClip=True):
'''
Wrapping around and 'wrap' modulo' and clipping.
wrap contains YWrap, XWrap modulo in degrees, 0 means no wrap
'''
if wrap is None:
return data
debugMsg('Doing actual wrapping')
debugMsg(' wc = {0}'.format(wc))
debugMsg(' wrap = {0}'.format(wrap))
# convert to poly data
surface = vtk.vtkDataSetSurfaceFilter()
surface.SetInputData(data)
surface.Update()
data = surface.GetOutput()
bounds = data.GetBounds()
# insure that GLOBALIDS are not removed by the append filter
attributes = data.GetCellData()
globalIds = attributes.GetGlobalIds()
globalIdsName = None
if (globalIds):
globalIdsName = globalIds.GetName()
attributes.SetActiveAttribute(-1, vtk.vtkDataSetAttributes.GLOBALIDS)
# insure that vtkTransformPolyData does not change the VECTORS attribute
pointAttributes = data.GetPointData()
vectors = pointAttributes.GetVectors()
vectorsName = None
if (vectors):
vectorsName = vectors.GetName()
pointAttributes.SetActiveAttribute(-1, vtk.vtkDataSetAttributes.VECTORS)
xmn = min(wc[0], wc[1])
xmx = max(wc[0], wc[1])
if (numpy.allclose(xmn, 1.e20) or numpy.allclose(xmx, 1.e20)):
xmn = bounds[0]
if (wrap[1] > 0. and (bounds[1] - bounds[0] > wrap[1])):
xmx = bounds[0] + abs(wrap[1])
else:
xmx = bounds[1]
ymn = min(wc[2], wc[3])
ymx = max(wc[2], wc[3])
if numpy.allclose(ymn, 1.e20) or numpy.allclose(ymx, 1.e20):
ymn = bounds[2]
if (wrap[0] > 0. and (bounds[3] - bounds[2] > wrap[0])):
ymx = bounds[2] + abs(wrap[0])
else:
ymx = bounds[3]
appendFilter = vtk.vtkAppendPolyData()
appendFilter.AddInputData(data)
appendFilter.Update()
# X axis wrappping
Amn, Amx = bounds[0], bounds[1]
nX = [0, 0] # number of translations needed (neg and pos)
if wrap[1] != 0.:
while Amn > xmn:
nX[0] += 1
Amn -= wrap[1]
while Amx < xmx:
nX[1] += 1
Amx += wrap[1]
Amn, Amx = bounds[2], bounds[3]
nY = [0, 0] # number of translations needed (neg and pos)
if wrap[0] != 0.:
while Amn > ymn:
nY[0] += 1
Amn -= wrap[0]
while Amx < ymx:
nY[1] += 1
Amx += wrap[0]
nNeg = -max(nX[0], nY[0]) # Number of negative translation needed
nPos = max(nX[1], nY[1]) + 1 # Number of negative translation needed
# Negative translation
for i in range(nNeg, nPos):
for j in range(nNeg, nPos):
if i == 0 and j == 0:
continue
Tpf = vtk.vtkTransformPolyDataFilter()
Tpf.SetInputData(data)
T = vtk.vtkTransform()
T.Translate(i * wrap[1], j * wrap[0], 0)
Tpf.SetTransform(T)
Tpf.Update()
appendFilter.AddInputData(Tpf.GetOutput())
appendFilter.Update()
# Clip the data to the final window:
clipBox = vtk.vtkBox()
clipBox.SetXMin(xmn, ymn, -1.0)
clipBox.SetXMax(xmx, ymx, 1.0)
if fastClip:
clipper = vtk.vtkExtractPolyDataGeometry()
clipper.ExtractInsideOn()
clipper.SetImplicitFunction(clipBox)
clipper.ExtractBoundaryCellsOn()
clipper.PassPointsOff()
else:
clipper = vtk.vtkClipPolyData()
clipper.InsideOutOn()
clipper.SetClipFunction(clipBox)
clipper.SetInputConnection(appendFilter.GetOutputPort())
clipper.Update()
result = clipper.GetOutput()
if (globalIdsName):
attributes = result.GetCellData()
index = vtk.mutable(-1)
attributes.GetArray(globalIdsName, index)
attributes.SetActiveAttribute(index, vtk.vtkDataSetAttributes.GLOBALIDS)
if (vectorsName):
index = vtk.mutable(-1)
pointAttributes = result.GetPointData()
pointAttributes.GetArray(vectorsName, index)
pointAttributes.SetActiveAttribute(index, vtk.vtkDataSetAttributes.VECTORS)
debugMsg(' bounds after wrap = {0}'.format(result.GetBounds()))
return result
# Wrap grid in interval minX, minX + 360
# minX is the minimum x value for 'grid'
def wrapDataSetX(grid):
# Clip the dataset into 2 pieces: left and right
bounds = grid.GetBounds()
minX = bounds[0]
intervalX = 360
maxX = minX + intervalX
plane = vtk.vtkPlane()
plane.SetOrigin(maxX, 0, 0)
plane.SetNormal(1, 0, 0)
clipRight = vtk.vtkClipDataSet()
clipRight.SetClipFunction(plane)
clipRight.SetInputData(grid)
clipRight.Update()
right = clipRight.GetOutputDataObject(0)
plane.SetNormal(-1, 0, 0)
clipLeft = vtk.vtkClipDataSet()
clipLeft.SetClipFunction(plane)
clipLeft.SetInputData(grid)
clipLeft.Update()
left = clipLeft.GetOutputDataObject(0)
# translate the right piece
tl = vtk.vtkTransform()
tl.Translate(-intervalX, 0, 0)
translateLeft = vtk.vtkTransformFilter()
translateLeft.SetTransform(tl)
translateLeft.SetInputData(right)
translateLeft.Update()
right = translateLeft.GetOutput()
# append the pieces together
append = vtk.vtkAppendFilter()
append.AddInputData(left)
append.AddInputData(right)
append.MergePointsOn()
append.Update()
whole = append.GetOutput()
return whole
def setClipPlanes(mapper, xmin, xmax, ymin, ymax):
clipPlaneCollection = vtk.vtkPlaneCollection()
if xmin != xmax:
clipPlaneXMin = vtk.vtkPlane()
clipPlaneXMin.SetOrigin(xmin, 0.0, 0.0)
clipPlaneXMin.SetNormal(1.0, 0.0, 0.0)
clipPlaneXMax = vtk.vtkPlane()
clipPlaneXMax.SetOrigin(xmax, 0.0, 0.0)
clipPlaneXMax.SetNormal(-1.0, 0.0, 0.0)
clipPlaneCollection.AddItem(clipPlaneXMin)
clipPlaneCollection.AddItem(clipPlaneXMax)
if ymin != ymax:
clipPlaneYMin = vtk.vtkPlane()
clipPlaneYMin.SetOrigin(0.0, ymin, 0.0)
clipPlaneYMin.SetNormal(0.0, 1.0, 0.0)
clipPlaneYMax = vtk.vtkPlane()
clipPlaneYMax.SetOrigin(0.0, ymax, 0.0)
clipPlaneYMax.SetNormal(0.0, -1.0, 0.0)
clipPlaneCollection.AddItem(clipPlaneYMin)
clipPlaneCollection.AddItem(clipPlaneYMax)
if clipPlaneCollection.GetNumberOfItems() > 0:
mapper.SetClippingPlanes(clipPlaneCollection)
# The code is replaced by the setClipPlanes above
# def doClip(data, xmin,xmax,ymin,ymax):
# if xmin!=xmax:
# xminClip = doClip1(data,xmin,1,0)
# xfullClip = doClip1(xminClip,xmax,-1,0)
# else:
# xfullClip = data
# if ymin!=ymax:
# yminClip = doClip1(xfullClip,ymin,1,1)
# xyClip = doClip1(yminClip,ymax,-1,1)
# else:
# xyClip = xfullClip
# return xyClip
# def doClip1(data,value,normal,axis=0):
# return data
# We have the actor, do clipping
# clpf = vtk.vtkPlane()
# if axis == 0:
# clpf.SetOrigin(value,0,0)
# clpf.SetNormal(normal,0,0)
# else:
# clpf.SetOrigin(0,value,0)
# clpf.SetNormal(0,normal,0)
# clp = vtk.vtkClipPolyData()
# clp.SetClipFunction(clpf)
# clp.SetInputData(data)
# clp.Update()
# return clp.GetOutput()
def prepTextProperty(p, winSize, to="default", tt="default", cmap=None,
overrideColorIndex=None):
if isinstance(to, str):
to = vcs.elements["textorientation"][to]
if isinstance(tt, str):
tt = vcs.elements["texttable"][tt]
if tt.colormap is not None:
cmap = tt.colormap
elif cmap is None:
cmap = vcs._colorMap
if isinstance(cmap, str):
cmap = vcs.elements["colormap"][cmap]
colorIndex = overrideColorIndex if overrideColorIndex else tt.color
if isinstance(colorIndex, int):
c = cmap.index[colorIndex]
else:
c = colorIndex
p.SetColor([C / 100. for C in c[:3]])
p.SetOpacity(c[-1] / 100.)
bcolorIndex = tt.backgroundcolor if tt.backgroundcolor else 255
if isinstance(bcolorIndex, int):
bc = cmap.index[bcolorIndex]
else:
bc = bcolorIndex
p.SetBackgroundColor([C / 100. for C in bc[:3]])
bopacity = (tt.backgroundopacity / 100.) if tt.backgroundopacity else 0
p.SetBackgroundOpacity(bopacity)
if to.halign in [0, 'left']:
p.SetJustificationToLeft()
elif to.halign in [2, 'right']:
p.SetJustificationToRight()
elif to.halign in [1, 'center']:
p.SetJustificationToCentered()
p.SetOrientation(-to.angle)
if to.valign in [0, 'top']:
p.SetVerticalJustificationToTop()
elif to.valign in [2, 'half']:
p.SetVerticalJustificationToCentered()
elif to.valign in [4, 'bottom']:
p.SetVerticalJustificationToBottom()
elif to.valign in [1, 'cap']:
warnings.warn("VTK does not support 'cap' align, using 'top'")
p.SetVerticalJustificationToTop()
elif to.valign in [3, 'base']:
warnings.warn("VTK does not support 'base' align, using 'bottom'")
p.SetVerticalJustificationToBottom()
p.SetFontFamily(vtk.VTK_FONT_FILE)
p.SetFontFile(vcs.elements["font"][vcs.elements["fontNumber"][tt.font]])
p.SetFontSize(int(to.height * winSize[1] / 800.))
class TextActorWrapperItem(object):
def __init__(self, textActor):
self.textActor = textActor
def Initialize(self, vtkSelf):
return True
def Paint(self, vtkSelf, context2D):
pos = self.textActor.GetPosition()
text = self.textActor.GetInput()
textProp = self.textActor.GetTextProperty()
context2D.ApplyTextProp(textProp)
context2D.DrawString(pos[0], pos[1], text)
return False
# def genTextActor(renderer, string=None, x=None, y=None,
def genTextActor(contextArea, string=None, x=None, y=None,
to='default', tt='default', cmap=None, geoBounds=None, geo=None):
renderer = contextArea.GetDrawAreaItem().GetScene().GetRenderer()
if isinstance(to, str):
to = vcs.elements["textorientation"][to]
if isinstance(tt, str):
tt = vcs.elements["texttable"][tt]
if tt.priority == 0:
return []
if string is None:
string = tt.string
if x is None:
x = tt.x
if y is None:
y = tt.y
if x is None or y is None or string in [['', ], []]:
return []
n = max(len(x), len(y), len(string))
for a in [x, y, string]:
while len(a) < n:
a.append(a[-1])
sz = renderer.GetRenderWindow().GetSize()
actors = []
pts = vtk.vtkPoints()
if vcs.elements["projection"][tt.projection].type != "linear":
if geoBounds is not None:
wc = geoBounds[:4]
else:
wc = None
for i in range(n):
t = vtk.vtkTextActor()
p = t.GetTextProperty()
prepTextProperty(p, sz, to, tt, cmap)
pts = vtk.vtkPoints()
pts.InsertNextPoint(x[i], y[i], 0.)
if vcs.elements["projection"][tt.projection].type != "linear":
_, pts = project(pts, tt.projection, tt.worldcoordinate, geo=geo)
X, Y, tz = pts.GetPoint(0)
if wc is None:
wc = tt.worldcoordinate
pts_wc = vtk.vtkPoints()
# Scan a bunch of points within wc
# In case the proj deformation bring origin close
# from each others
for wx in numpy.arange(wc[0], wc[1], (wc[1] - wc[0]) / 25.):
for wy in numpy.arange(wc[2], wc[3], (wc[3] - wc[2]) / 25.):
pts_wc.InsertNextPoint(wx, wy, 0.)
_, pts_wc = project(pts_wc, tt.projection, tt.worldcoordinate, geo=geo)
as_numpy = VN.vtk_to_numpy(pts_wc.GetData())
wx = as_numpy[:, 0]
wy = as_numpy[:, 1]
wc = [wx.min(), wx.max(), wy.min(), wy.max()]
X, Y = world2Renderer(renderer, X, Y, tt.viewport, wc)
else:
X, Y = world2Renderer(
renderer, x[i], y[i], tt.viewport, tt.worldcoordinate)
t.SetPosition(X, Y)
t.SetInput(string[i])
item = vtk.vtkPythonItem()
item.SetPythonObject(TextActorWrapperItem(t))
contextArea.GetDrawAreaItem().AddItem(item)
actors.append(t)
return actors
def prepPrimitive(prim):
if prim.x is None or prim.y is None:
return 0
if not isinstance(prim.x[0], (list, tuple)):
prim.x = [prim.x, ]
if not isinstance(prim.y[0], (list, tuple)):
prim.y = [prim.y, ]
if vcs.isfillarea(prim):
atts = ["x", "y", "color", "style", "index"]
elif vcs.ismarker(prim):
atts = ["x", "y", "color", "size", "type"]
elif vcs.isline(prim):
atts = ["x", "y", "color", "width", "type"]
n = 0
for a in atts:
n = max(n, len(getattr(prim, a)))
for a in atts:
v = getattr(prim, a)
while len(v) < n:
v.append(v[-1])
setattr(prim, a, v)
# Handle fillarea opacity case, where the default will depend on the style
if vcs.isfillarea(prim):
o = getattr(prim, "opacity")
s = getattr(prim, "style")
assert(len(s) == n)
if not o:
o = []
while len(o) < n:
lastind = len(o) - 1
if s[lastind] == "pattern":
o.append(0)
else:
o.append(100)
return n
def __build_pd__():
pts = vtk.vtkPoints()
polygons = vtk.vtkCellArray()
polygonPolyData = vtk.vtkPolyData()
polygonPolyData.SetPoints(pts)
polygonPolyData.SetPolys(polygons)
return pts, polygons, polygonPolyData
def prepFillarea(context, renWin, farea, cmap=None):
vp = farea.viewport
# view and interactive area
view = context.contextView
area = vtk.vtkContextArea()
view.GetScene().AddItem(area)
wc = farea.worldcoordinate
rect = vtk.vtkRectd(wc[0], wc[2], wc[1] - wc[0], wc[3] - wc[2])
[renWinWidth, renWinHeight] = renWin.GetSize()
geom = vtk.vtkRecti(int(round(vp[0] * renWinWidth)),
int(round(vp[2] * renWinHeight)),
int(round((vp[1] - vp[0]) * renWinWidth)),
int(round((vp[3] - vp[2]) * renWinHeight)))
area.SetDrawAreaBounds(rect)
area.SetGeometry(geom)
area.SetFillViewport(False)
area.SetShowGrid(False)
axisLeft = area.GetAxis(vtk.vtkAxis.LEFT)
axisRight = area.GetAxis(vtk.vtkAxis.RIGHT)
axisBottom = area.GetAxis(vtk.vtkAxis.BOTTOM)
axisTop = area.GetAxis(vtk.vtkAxis.TOP)
axisTop.SetVisible(False)
axisRight.SetVisible(False)
axisLeft.SetVisible(False)
axisBottom.SetVisible(False)
axisTop.SetMargins(0, 0)
axisRight.SetMargins(0, 0)
axisLeft.SetMargins(0, 0)
axisBottom.SetMargins(0, 0)
n = prepPrimitive(farea)
if n == 0:
return []
actors = []
# Find color map:
if farea.colormap is not None:
cmap = farea.colormap
elif cmap is None:
cmap = vcs._colorMap
if isinstance(cmap, str):
cmap = vcs.elements["colormap"][cmap]
# Create data structures
pts, polygons, polygonPolyData = __build_pd__()
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(4)
colors.SetNumberOfTuples(n)
colors.SetName("Colors")
polygonPolyData.GetCellData().SetScalars(colors)
pattern_polydatas = []
# Iterate through polygons:
for i in range(n):
x = farea.x[i]
y = farea.y[i]
st = farea.style[i]
if st == "pattern":
c = (0., 0., 0., 100.)
else:
c = farea.color[i]
if st == "solid":
points, polys, pd, color_arr = pts, polygons, polygonPolyData, colors
else:
points, polys, pd = __build_pd__()
color_arr = vtk.vtkUnsignedCharArray()
color_arr.SetNumberOfComponents(4)
color_arr.SetNumberOfTuples(1)
color_arr.SetName("BackgroundColors")
pd.GetCellData().SetScalars(color_arr)
pattern_polydatas.append([i, pd])
N = max(len(x), len(y))
for a in [x, y]:
assert(len(a) == N)
polygon = vtk.vtkPolygon()
# Add current polygon
pid = polygon.GetPointIds()
pid.SetNumberOfIds(N)
for j in range(N):
pid.SetId(j, points.InsertNextPoint(x[j], y[j], 0.))
cellId = polys.InsertNextCell(polygon)
if isinstance(c, int):
color = [C for C in cmap.index[c]]
else:
color = [C for C in c]
if len(farea.opacity) > i:
opacity = farea.opacity[i]
if opacity is not None:
opacity = farea.opacity[i]
else:
opacity = None
# Draw colored background for solid
# transparent/white background for hatches/patterns
# Add the color to the color array:
if opacity is not None:
color[-1] = opacity
color = [int(C / 100. * 255) for C in color]
if st == 'solid':
# In this case, colors is our scalar array
# so, add the color at the cell index
color_arr.SetTypedTuple(cellId, color)
else:
# In this case, colors is a backup array that represents colors
# for the pattern in each polygon. Each tuple in the colors array
# should represent the pattern color for the indexed polygon
colors.SetTypedTuple(i, color)
# color_arr is our scalar array
color_arr.SetTypedTuple(cellId, [255, 255, 255, 0])
# Transform points
geo, pts = project(pts, farea.projection, farea.worldcoordinate)
polygonPolyData.SetPoints(pts)
# for concave polygons
tris = vtk.vtkTriangleFilter()
tris.SetInputData(polygonPolyData)
# If we had at least one "solid" style area
if pts.GetNumberOfPoints() > 0:
tris.Update()
solidPoly = tris.GetOutput()
item = vtk.vtkPolyDataItem()
item.SetPolyData(solidPoly)
item.SetScalarMode(vtk.VTK_SCALAR_MODE_USE_CELL_DATA)
colorArray = solidPoly.GetCellData().GetArray('Colors')
item.SetMappedColors(colorArray)
area.GetDrawAreaItem().AddItem(item)
# Patterns/hatches support
for i, pd in pattern_polydatas:
st = farea.style[i]
if st != "solid":
geo, proj_points = project(
pd.GetPoints(), farea.projection, farea.worldcoordinate)
pd.SetPoints(proj_points)
cellcolor = [0, 0, 0, 0]
colors.GetTypedTuple(i, cellcolor)
pcolor = [indC * 100. / 255.0 for indC in cellcolor]
if len(farea.x[i]) >= 3:
screenGeom = [
(farea.x[i][1] - farea.x[i][0]) * renWinWidth,
(farea.y[i][2] - farea.y[i][1]) * renWinHeight
]
else:
screenGeom = [
(farea.viewport[1] - farea.viewport[0]) * renWinWidth,
(farea.viewport[3] - farea.viewport[2]) * renWinHeight
]
act = fillareautils.make_patterned_polydata(pd,
st,
fillareaindex=farea.index[i],
fillareacolors=pcolor,
fillareaopacity=pcolor[3],
fillareapixelspacing=farea.pixelspacing,
fillareapixelscale=farea.pixelscale,
size=[renWinWidth, renWinHeight],
screenGeom=screenGeom)
if act is not None:
patMapper = act.GetMapper()
patMapper.Update()
patPoly = patMapper.GetInput()
item = vtk.vtkPolyDataItem()
item.SetPolyData(patPoly)
item.SetScalarMode(vtk.VTK_SCALAR_MODE_USE_CELL_DATA)
colorArray = patPoly.GetCellData().GetArray('Colors')
item.SetMappedColors(colorArray)
area.GetDrawAreaItem().AddItem(item)
return actors
def genPoly(coords, pts, filled=True, scale=1.):
N = pts.GetNumberOfPoints()
if filled:
poly = vtk.vtkPolygon()
else:
poly = vtk.vtkPolyLine()
pid = poly.GetPointIds()
if scale != 1.:
coords = numpy.array(coords) * scale
coords = coords.tolist()
n = len(coords)
pid.SetNumberOfIds(n)
for j in range(n):
c = list(coords[j])
if len(c) == 2:
c.append(0)
pts.InsertNextPoint(*c)
pid.SetId(j, j + N)
return poly
def prepGlyph(g, marker, screenGeom, index=0):
t, s = marker.type[index], marker.size[index]
gs = vtk.vtkGlyphSource2D()
pd = None
dx = marker.worldcoordinate[1] - marker.worldcoordinate[0]
dy = marker.worldcoordinate[3] - marker.worldcoordinate[2]
scale = fillareautils.computeMarkerScale([dx, dy], screenGeom, (s * 10))
finalScale = scale
if t == 'dot':
gs.SetGlyphTypeToCircle()
gs.FilledOn()
gs.SetResolution(25)
elif t == 'circle':
gs.SetGlyphTypeToCircle()
gs.FilledOff()
gs.SetResolution(25)
elif t == 'plus':
gs.SetGlyphTypeToCross()
gs.CrossOn()
gs.FilledOff()
elif t == 'cross':
gs.SetGlyphTypeToCross()
gs.CrossOn()
gs.SetRotationAngle(45)
gs.FilledOff()
elif t[:6] == 'square':
gs.SetGlyphTypeToSquare()
gs.FilledOff()
elif t[:7] == 'diamond':
gs.SetGlyphTypeToDiamond()
gs.FilledOff()
elif t[:8] == 'triangle':
gs.SetGlyphTypeToTriangle()
gs.FilledOff()
if t[9] == "d":
gs.SetRotationAngle(180)
elif t[9] == "l":
gs.SetRotationAngle(90)
elif t[9] == "r":
gs.SetRotationAngle(-90)
elif t[9] == "u":
gs.SetRotationAngle(0)
elif t == "hurricane":
scale_factor = finalScale / 2 # Hurricane appears bigger than others
ds = vtk.vtkDiskSource()
ds.SetInnerRadius(.55 * scale_factor)
ds.SetOuterRadius(1.01 * scale_factor)
ds.SetCircumferentialResolution(90)
ds.SetRadialResolution(30)
gf = vtk.vtkGeometryFilter()
gf.SetInputConnection(ds.GetOutputPort())
gf.Update()
scale_factor *= 2. # we need to add a factr 2 for the "arms"
pd1 = gf.GetOutput()
apd = vtk.vtkAppendPolyData()
apd.AddInputData(pd1)
pts = vtk.vtkPoints()
pd = vtk.vtkPolyData()
polygons = vtk.vtkCellArray()
add_angle = numpy.pi / 360.
coords = []
angle1 = .6 * numpy.pi
angle2 = .88 * numpy.pi
while angle1 <= angle2:
coords.append(
[1 + numpy.cos(angle1), numpy.sin(angle1)])
angle1 += add_angle
angle1 = .79 * numpy.pi
angle2 = .6 * numpy.pi
while angle1 >= angle2:
coords.append([1.125 +
2 *
numpy.cos(angle1), -
1 +
2 *
numpy.sin(angle1)])
angle1 -= add_angle
poly = genPoly(coords, pts, filled=True, scale=scale_factor)
polygons.InsertNextCell(poly)
coords = []
angle1 = 1.6 * numpy.pi
angle2 = 1.9 * numpy.pi
while angle1 <= angle2:
coords.append([-
1 +
numpy.cos(angle1),
numpy.sin(angle1)])
angle1 += add_angle
angle1 = 1.8 * numpy.pi
angle2 = 1.6 * numpy.pi
while angle1 >= angle2:
coords.append([-
1.135 +
2 *
| numpy.cos(angle1) | numpy.cos |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from scipy.signal import fftconvolve
import argparse
# turn off runtime warnings (lots from logic on nans)
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def size_limit(x,y,image):
yy,xx = image.shape
ind = ((y > 0) & (y < yy-1) & (x > 0) & (x < xx-1))
return ind
def region_cut(table,wcs):
ra = table.ra.values
dec = table.dec.values
foot = wcs.calc_footprint()
minra = min(foot[:,0])
maxra = max(foot[:,0])
mindec = min(foot[:,1])
maxdec = max(foot[:,1])
inddec = (dec < maxdec) & (dec> mindec)
indra = (ra < maxra) & (ra> minra)
ind = indra * inddec
tab = table.iloc[ind]
return tab
def circle_app(rad):
"""
makes a kinda circular aperture, probably not worth using.
"""
mask = np.zeros((int(rad*2+.5)+1,int(rad*2+.5)+1))
c = rad
x,y =np.where(mask==0)
dist = np.sqrt((x-c)**2 + (y-c)**2)
ind = (dist) < rad + .2
mask[y[ind],x[ind]]= 1
return mask
def check_table_format(table):
try:
temp = table.x
temp = table.y
temp = table.ra
temp = table.dec
temp = table.radius
temp = table.mag
temp = table.mjd_start
temp = table.mjd_end
except:
message = ("mask_table must be a csv with the following columns:\nx\ny\nra\ndec\nradius\nmag\nmjd_start\nmjd_end\n"
+ "Only a position (x,y) or (ra,dec) and size (radius) or (mag) is needed to run.")
raise ValueError()
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from scipy.signal import fftconvolve
import argparse
# turn off runtime warnings (lots from logic on nans)
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def size_limit(x,y,image):
yy,xx = image.shape
ind = ((y > 0) & (y < yy-1) & (x > 0) & (x < xx-1))
return ind
def region_cut(table,wcs):
ra = table.ra
dec = table.dec
foot = wcs.calc_footprint()
minra = min(foot[:,0])
maxra = max(foot[:,0])
mindec = min(foot[:,1])
maxdec = max(foot[:,1])
inddec = (dec < maxdec) & (dec> mindec)
indra = (ra < maxra) & (ra> minra)
ind = indra * inddec
tab = table.iloc[ind]
return tab
def circle_app(rad):
"""
makes a kinda circular aperture, probably not worth using.
"""
mask = np.zeros((int(rad*2+.5)+1,int(rad*2+.5)+1))
c = rad
x,y =np.where(mask==0)
dist = np.sqrt((x-c)**2 + (y-c)**2)
ind = (dist) < rad + .2
mask[y[ind],x[ind]]= 1
return mask
def check_table_format(table):
try:
temp = table.x
temp = table.y
temp = table.ra
temp = table.dec
temp = table.radius
temp = table.mag
temp = table.mjd_start
temp = table.mjd_end
except:
message = ("mask_table must be a csv with the following columns:\nx\ny\nra\ndec\nradius\nmag\nmjd_start\nmjd_end\n"
+ "Only a position (x,y) or (ra,dec) and size (radius) or (mag) is needed to run.")
raise ValueError()
def Spot_mask(fits_file,mask_table,ext=0):
table = pd.read_csv(mask_table)
check_table_format(table)
hdu = fits.open(fits_file)[ext]
# uses the file name to set the time, not versitile
t = float(fits_file.split('/')[-1].split('_')[1])
image = hdu.data
wcs = WCS(hdu.header)
spotmask = np.zeros_like(image,dtype=float)
for i in range(len(table)):
row = table.iloc[i]
start = row.mjd_start
end = row.mjd_end
cont = True
if np.isfinite(start):
if t < start:
cont = False
if np.isfinite(end):
if t > end:
cont = False
if cont:
if np.isfinite(row.x) & np.isfinite(row.y):
x = int(row.x + 0.5)
y = int(row.y + 0.5)
elif np.isfinite(row.ra) & np.isfinite(row.dec):
x,y = wcs.all_world2pix(row.ra,row.dec,0)
if size_limit(x,y,image):
pass
else:
x = np.nan
y = np.nan
print('coordinates ra={}, dec={} not in range'.format(np.round(ra,2),np.round(row.dec,2)))
pass
else:
print('no position provided')
# make aperture time
rad = row.radius
mag = row.mag
if np.isfinite(rad):
ap = circle_app(rad)
temp = np.zeros_like(image)
temp[y,x] = 1
conv = fftconvolve(temp, ap,mode='same')#.astype(int)
temp = (conv > 0.9) * 1.
spotmask += conv
elif | np.isfinite(mag) | numpy.isfinite |
import numpy as np
import pandas as pd
from astropy.modeling import models,fitting
from astropy.io import fits
from astropy.table import Table
class OptimalExtraction:
"""
OptimalExtraction is a python class to perform optimal extraction of a spectrum from an image. Originally, the algorithm was proposed by [1]. In this adaptation, we followed [2].
#####
+ Inputs:
- image = an image in 2D array.
Assumptions:
i) image was processed, cleaned, and calibrated in a standard CCD processing steps (i.e., bias subtraction, flatfield, cosmic-ray removal/repair, and background subtracted).
ii) image was resampling from the original image so that
a) dispersion is along x-axis and cross-dirspersion along y-axis. (See hstgrism.aperturedirection and hstgrism.resampling if you are working with HST images).
b) Each x column is assumed to be one wavelength bin with line spreading along y-axis.
c) Peak of each wavelength bin is aligned at the center row in the image. (See hstgrism.extractcompoundmodel1d for fitting the trace centers).
- var = 2D array of variance corresponding to the image
- bin_number = 1D array parallel to dispersion axis (i.e., x-axis) specifying bin numbers. Number 0 = skip the extraction on that column.
- bin_kernel_initial = initial extraction kernel. Only astropy.modeling.models.Gaussian1D is supported for this version.
- fitter = astropy.modeling.fitting.LevMarLSQFitter() recommended.
- do_fit_bin = True to re-fit using the bin_initial_kernel, and new parameters will be used in the final step of extraction. If False, bin_initial_kernel would be used for extracting in the final step.
- kernel_mode = 'Gaussian1D' only in this version.
#####
+ Compute:
- self.compute() to start the optimal extraction after properly instantiate.
i) self.bin_image = 2D array of binned image, i.e., columns with the same bin_number are added. x-axis of bin_image corresponds to bin_number.
ii) self.bin_var = 2D array of binned variance. Simply add columns of variance with the same bin_number.
iii) self.bin_kernel_fit = dict of key:bin_number, value:extraction kernel. If do_fit_bin = False, bin_kernel_fit = bin_kernel_initial.
iv) self.bin_image_kernel = 2D array of binned image using bin_kernel_fit for estimation.
v) self.kernel_fit = 1D array of extraction kernel runs parallel to dispersion axis (unbinned), and re-normalized. This is simply prepared from bin_kernel_fit and bin_numer. Re-normalization is done for each dispersion column given bin_kernel_fit, but fixed the shape profile. For example, in Gaussian1D kernel, the re-normalization fixes mean and stddev of each column, and re-fit for the amplitude.
vi) self.image_kernel = 2D array of unbinned image using kernel_fit for estimation.
vii) self.optimal1d = 1D array of optimally extracted profile
#####
+ Save:
- container = optimalextraction.container.Container
- wavelength = 1D array of wavelengths parallel to dispersion columns. (Use trace.csv if working with HSTGRISM environment).
- optimalextraction.fits = multiextension fits file (See more in astropy.io.fits)
EXT0: PrimaryHDU
EXT1: ImageHDU as self.image_kernel
EXT2: BinTableHDU with columns as bin_number (i.e., 1D array parallel to dispersion columns specifying bin numbers), and self.kernel_fit parameters (i.e., amplitude, mean, stddev for Gaussian1D kernel_mode).
- optimalextraction.csv = csv table with columns as column_index, wavelength (if specified), spectrum. Spectrum is optimally extracted.
#####
+ References:
[1] Horne 1986, 'An optimal extraction algorithm for CCD spectroscopy': https://ui.adsabs.harvard.edu/abs/1986PASP...98..609H/abstract
[2] Space Telescope Science Institute, 'NIRSpec MOS Optimal Spectral Extraction': https://github.com/spacetelescope/dat_pyinthesky/blob/master/jdat_notebooks/optimal_extraction/Spectral%20Extraction.ipynb
[3] Astropy, 'Models and Fitting': https://docs.astropy.org/en/stable/modeling/index.html
"""
def __init__(self,image,var,bin_number,bin_kernel_initial,fitter,do_fit_bin=True,kernel_mode='Gaussian1D'):
self.image = image
self.var = var
self.bin_number = bin_number
self.bin_kernel_initial = bin_kernel_initial
self.fitter = fitter
self.do_fit_bin = do_fit_bin
self.kernel_mode = kernel_mode
def compute(self):
self.bin_image = self._make_bin_image(mode='image')
self.bin_var = self._make_bin_image(mode='var')
self.bin_kernel_fit = self.bin_kernel_initial
if self.do_fit_bin:
self.bin_kernel_fit = self._fit_bin()
self.bin_image_kernel = self._compute_bin_image_kernel()
self.kernel_fit,self.image_kernel = self._compute_kernel_fit()
self.optimal1d = self._compute_optimal()
def save(self,container=None,wavelength=None):
if container is None:
raise ValueError('container must be specified. See optimalextraction.container.Container.')
#####
# bin_number,kernel_fit,image_kernel >>> optimalextraction.fits
string = './{0}/{1}_optimalextraction.fits'.format(container.data['savefolder'],container.data['saveprefix'])
if self.kernel_mode=='Gaussian1D':
amplitude,mean,stddev = [],[],[]
for ii,i in enumerate(self.kernel_fit):
amplitude.append(self.kernel_fit[ii].amplitude[0])
mean.append(self.kernel_fit[ii].mean[0])
stddev.append(self.kernel_fit[ii].stddev[0])
t = {'bin_number':self.bin_number,'amplitude':amplitude,'mean':mean,'stddev':stddev}
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU(self.image_kernel)
bhdu = fits.BinTableHDU(Table(t))
hdul = fits.HDUList([phdu,ihdu,bhdu])
hdul.writeto(string,overwrite=True)
print('Save {0}'.format(string))
#####
# optimal1d >>> optimalextraction.csv
string = './{0}/{1}_optimalextraction.csv'.format(container.data['savefolder'],container.data['saveprefix'])
ty,ty_name = self.optimal1d,'spectrum'
tx,tx_name = np.arange(len(ty)),'column_index'
t = {tx_name:tx, ty_name:ty}
if wavelength is not None:
tw,tw_name = wavelength,'wavelength'
t = {tx_name:tx, tw_name:tw, ty_name:ty}
pd.DataFrame(t).to_csv(string)
print('Save {0}'.format(string))
def _compute_optimal(self):
ny,nx = self.image.shape
cross_dispersion_array = np.arange(ny)
dispersion_array = np.arange(nx)
optimal1d = np.full_like(dispersion_array,0.,dtype=float)
for i in dispersion_array:
bin_number = self.bin_number[i]
if bin_number==0:
continue
val = self.image[:,i]
var = self.var[:,i]
kernel = self.bin_kernel_fit[bin_number]
if self.kernel_mode=='Gaussian1D':
norm = np.sqrt(2. * np.pi) * kernel.amplitude * kernel.stddev
else:
raise ValueError('kernel_mode = Gaussian1D only available')
kernel_normalized = kernel(cross_dispersion_array) / norm
A = val * kernel_normalized / var
B = np.power(kernel_normalized,2) / var
C = A.sum(axis=0) / B.sum(axis=0)
optimal1d[i] = C.copy()
return optimal1d
def _compute_kernel_fit(self):
kernel_fit = []
image_kernel = np.full_like(self.image,0.,dtype=float)
ny,nx = self.image.shape
cross_dispersion_array = np.arange(ny)
for i in | np.arange(nx) | numpy.arange |
#!/usr/bin/env python
import rospy
import os
import numpy as np
import torch
import message_filters
import cv_bridge
from pathlib import Path
import open3d as o3d
from utils import *
from adet.config import get_cfg
from adet.utils.visualizer import visualize_pred_amoda_occ
from adet.utils.post_process import detector_postprocess, DefaultPredictor
from foreground_segmentation.model import Context_Guided_Network
from std_msgs.msg import String, Header
from sensor_msgs.msg import Image, CameraInfo, RegionOfInterest
from uoais.msg import UOAISResults
from uoais.srv import UOAISRequest, UOAISRequestResponse
class UOAIS():
def __init__(self):
rospy.init_node("uoais")
self.mode = rospy.get_param("~mode", "topic")
rospy.loginfo("Starting uoais node with {} mode".format(self.mode))
self.rgb_topic = rospy.get_param("~rgb", "/camera/color/image_raw")
self.depth_topic = rospy.get_param("~depth", "/camera/aligned_depth_to_color/image_raw")
camera_info_topic = rospy.get_param("~camera_info", "/camera/color/camera_info")
# UOAIS-Net
self.det2_config_file = rospy.get_param("~config_file",
"configs/R50_rgbdconcat_mlc_occatmask_hom_concat.yaml")
self.confidence_threshold = rospy.get_param("~confidence_threshold", 0.5)
# CG-Net foreground segmentation
self.use_cgnet = rospy.get_param("~use_cgnet", False)
self.cgnet_weight = rospy.get_param("~cgnet_weight",
"foreground_segmentation/rgbd_fg.pth")
# RANSAC plane segmentation
self.use_planeseg = rospy.get_param("~use_planeseg", False)
self.ransac_threshold = rospy.get_param("~ransac_threshold", 0.003)
self.ransac_n = rospy.get_param("~ransac_n", 3)
self.ransac_iter = rospy.get_param("~ransac_iter", 10)
self.cv_bridge = cv_bridge.CvBridge()
# initialize UOAIS-Net and CG-Net
self.load_models()
if self.use_planeseg:
camera_info = rospy.wait_for_message(camera_info_topic, CameraInfo)
self.K = camera_info.K
self.o3d_camera_intrinsic = None
if self.mode == "topic":
rgb_sub = message_filters.Subscriber(self.rgb_topic, Image)
depth_sub = message_filters.Subscriber(self.depth_topic, Image)
self.ts = message_filters.ApproximateTimeSynchronizer([rgb_sub, depth_sub], queue_size=1, slop=2)
self.ts.registerCallback(self.topic_callback)
self.result_pub = rospy.Publisher("/uoais/results", UOAISResults, queue_size=10)
rospy.loginfo("uoais results at topic: /uoais/results")
elif self.mode == "service":
self.srv = rospy.Service('/get_uoais_results', UOAISRequest, self.service_callback)
rospy.loginfo("uoais results at service: /get_uoais_results")
else:
raise NotImplementedError
self.vis_pub = rospy.Publisher("/uoais/vis_img", Image, queue_size=10)
rospy.loginfo("visualization results at topic: /uoais/vis_img")
def load_models(self):
# UOAIS-Net
self.det2_config_file = os.path.join(Path(__file__).parent.parent, self.det2_config_file)
rospy.loginfo("Loading UOAIS-Net with config_file: {}".format(self.det2_config_file))
self.cfg = get_cfg()
self.cfg.merge_from_file(self.det2_config_file)
self.cfg.defrost()
self.cfg.MODEL.WEIGHTS = os.path.join(Path(__file__).parent.parent, self.cfg.OUTPUT_DIR, "model_final.pth")
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.confidence_threshold
self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = self.confidence_threshold
self.predictor = DefaultPredictor(self.cfg)
self.W, self.H = self.cfg.INPUT.IMG_SIZE
# CG-Net (foreground segmentation)
if self.use_cgnet:
checkpoint = torch.load(os.path.join(Path(__file__).parent.parent, self.cgnet_weight))
self.fg_model = Context_Guided_Network(classes=2, in_channel=4)
self.fg_model.load_state_dict(checkpoint['model'])
self.fg_model.cuda()
self.fg_model.eval()
def topic_callback(self, rgb_msg, depth_msg):
results = self.inference(rgb_msg, depth_msg)
self.result_pub.publish(results)
def service_callback(self, msg):
rgb_msg = rospy.wait_for_message(self.rgb_topic, Image)
depth_msg = rospy.wait_for_message(self.depth_topic, Image)
results = self.inference(rgb_msg, depth_msg)
return UOAISRequestResponse(results)
def inference(self, rgb_msg, depth_msg):
rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb_msg, desired_encoding='bgr8')
ori_H, ori_W, _ = rgb_img.shape
rgb_img = cv2.resize(rgb_img, (self.W, self.H))
depth = self.cv_bridge.imgmsg_to_cv2(depth_msg, desired_encoding='32FC1')
depth_img = normalize_depth(depth)
depth_img = cv2.resize(depth_img, (self.W, self.H), interpolation=cv2.INTER_NEAREST)
depth_img = inpaint_depth(depth_img)
# UOAIS-Net inference
if self.cfg.INPUT.DEPTH and self.cfg.INPUT.DEPTH_ONLY:
uoais_input = depth_img
elif self.cfg.INPUT.DEPTH and not self.cfg.INPUT.DEPTH_ONLY:
uoais_input = np.concatenate([rgb_img, depth_img], -1)
outputs = self.predictor(uoais_input)
instances = detector_postprocess(outputs['instances'], self.H, self.W).to('cpu')
preds = instances.pred_masks.detach().cpu().numpy()
pred_visibles = instances.pred_visible_masks.detach().cpu().numpy()
pred_bboxes = instances.pred_boxes.tensor.detach().cpu().numpy()
pred_occs = instances.pred_occlusions.detach().cpu().numpy()
# filter out the background instances
# CG-Net
if self.use_cgnet:
rospy.loginfo_once("Using foreground segmentation model (CG-Net) to filter out background instances")
fg_rgb_input = standardize_image(cv2.resize(rgb_img, (320, 240)))
fg_rgb_input = array_to_tensor(fg_rgb_input).unsqueeze(0)
fg_depth_input = cv2.resize(depth_img, (320, 240))
fg_depth_input = array_to_tensor(fg_depth_input[:,:,0:1]).unsqueeze(0) / 255
fg_input = torch.cat([fg_rgb_input, fg_depth_input], 1)
fg_output = self.fg_model(fg_input.cuda())
fg_output = fg_output.cpu().data[0].numpy().transpose(1, 2, 0)
fg_output = np.asarray(np.argmax(fg_output, axis=2), dtype=np.uint8)
fg_output = cv2.resize(fg_output, (self.W, self.H), interpolation=cv2.INTER_NEAREST)
# RANSAC
if self.use_planeseg:
rospy.loginfo_once("Using RANSAC plane segmentation to filter out background instances")
o3d_rgb_img = o3d.geometry.Image(rgb_img)
o3d_depth_img = o3d.geometry.Image(unnormalize_depth(depth_img))
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(o3d_rgb_img, o3d_depth_img)
if self.o3d_camera_intrinsic is None:
self.o3d_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
self.W, self.H,
self.K[0]*self.W/ori_W,
self.K[4]*self.H/ori_H,
self.K[2], self.K[5])
o3d_pc = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_image, self.o3d_camera_intrinsic)
plane_model, inliers = o3d_pc.segment_plane(distance_threshold=self.ransac_threshold,
ransac_n=self.ransac_n,
num_iterations=self.ransac_iter)
fg_output = np.ones(self.H * self.W)
fg_output[inliers] = 0
fg_output = np.resize(fg_output, (self.H, self.W))
fg_output = np.uint8(fg_output)
if self.use_cgnet or self.use_planeseg:
remove_idxs = []
for i, pred_visible in enumerate(pred_visibles):
iou = np.sum(np.bitwise_and(pred_visible, fg_output)) / np.sum(pred_visible)
if iou < 0.5:
remove_idxs.append(i)
preds = np.delete(preds, remove_idxs, 0)
pred_visibles = | np.delete(pred_visibles, remove_idxs, 0) | numpy.delete |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('../custom.mplstyle')
import sys
sys.path.append('..')
from lib import *
agemin, agemax = 0, 110
for dataset, bins in [('britanova', np.arange(1.7, 2.7, 0.05)),
('emerson', np.arange(1.7, 2.7, 0.05))
]:
if dataset == 'britanova':
meta = pd.read_csv('data/alpha_britanova_mincount16.csv')
else:
meta = pd.read_csv('data/alpha_emerson_mincount16.csv')
mask = meta['Subject ID'].isin(set(pd.read_csv(data_directory+'emerson-counts.csv')['Subject ID']))
meta = meta[mask]
if dataset == 'britanova':
mask = meta['age'] > 0
meta = meta[mask]
y = meta['alpha']-1.0
print(dataset, | np.mean(y) | numpy.mean |
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from scipy import fft
from vlapy.core import field, vlasov, collisions, vlasov_poisson
def get_vlasov_poisson_step(all_params, stuff_for_time_loop):
"""
This is the highest level function for getting a Vlasov-Poisson timestep method
It reads the chosen inputs where the different solvers to be used for the simulation are specified
and returns the appropriately constructed Vlasov-Poisson stepper based on the choice of
time-integrator
vdfdx-integrator
edfdv-integrator
Poisson-solver
:param all_params: dictionary containing input parameters for the simulation
:param stuff_for_time_loop: dictionary containing derivced parameters for the simulation
:return: function for taking a Vlasov-Poisson timestep
"""
vdfdx = vlasov.get_vdfdx(
stuff_for_time_loop=stuff_for_time_loop,
vdfdx_implementation=all_params["vlasov-poisson"]["vdfdx"],
)
edfdv = vlasov.get_edfdv(
stuff_for_time_loop=stuff_for_time_loop,
edfdv_implementation=all_params["vlasov-poisson"]["edfdv"],
)
field_solver = field.get_field_solver(
stuff_for_time_loop=stuff_for_time_loop,
field_solver_implementation=all_params["vlasov-poisson"]["poisson"],
)
vp_step = vlasov_poisson.get_time_integrator(
time_integrator_name=all_params["vlasov-poisson"]["time"],
vdfdx=vdfdx,
edfdv=edfdv,
field_solver=field_solver,
stuff_for_time_loop=stuff_for_time_loop,
)
return vp_step
def get_collision_step(stuff_for_time_loop, all_params):
"""
This returns the function to be used for performing a collision step based on the input parameters and
derived parameters
:param stuff_for_time_loop: dictionary containing derivced parameters for the simulation
:param all_params: dictionary containing input parameters for the simulation
:return: function for taking a Fokker-Planck timestep
"""
if all_params["nu"] == 0.0:
def take_collision_step(f):
return f
elif all_params["nu"] > 0.0:
solver = collisions.get_matrix_solver(
nx=stuff_for_time_loop["nx"],
nv=stuff_for_time_loop["nv"],
solver_name=all_params["fokker-planck"]["solver"],
)
get_collision_matrix_for_all_x = collisions.get_batched_array_maker(
vax=stuff_for_time_loop["v"],
nv=stuff_for_time_loop["nv"],
nx=stuff_for_time_loop["nx"],
nu=stuff_for_time_loop["nu"],
dt=stuff_for_time_loop["dt"],
dv=stuff_for_time_loop["dv"],
operator=all_params["fokker-planck"]["type"],
)
def take_collision_step(f):
# The three diagonals representing collision operator for all x
cee_a, cee_b, cee_c = get_collision_matrix_for_all_x(f_xv=f)
# Solve over all x
return solver(cee_a, cee_b, cee_c, f)
else:
raise NotImplementedError
return take_collision_step
def get_f_update(store_f_rule):
"""
This function returns the function used in the stepper for storing f in a batch
It performs the necessary transformations if we're just storing Fourier modes.
:param store_f_rule: (dictionary) the rules to store the distribution function as
dictated by the diagnostics routine for the simulation
:return:
"""
if store_f_rule["space"] == "all":
def get_f_to_store(f):
return f
elif store_f_rule["space"][0] == "k0":
def get_f_to_store(f):
return fft.fft(f, axis=0)[
: len(store_f_rule),
]
else:
raise NotImplementedError
return get_f_to_store
def get_fields_update(dv, v):
"""
This returns a function that updates the moment-based quantities that are stored at every time-step in the
simulation
:param dv: (float) the grid-spacing in v
:param v: (1D float array) the velocity-grid
:return: function with the above values initialized as static variables
"""
def update_fields(temp_storage_fields, e, de, f, i):
"""
This function updates the temporary storage dictionary with a number of moments of the distribution function
:param temp_storage_fields: (dictionary) for storage
:param e: (1D float array (nx,)) the electric field from the simulation
:param de: (1D float array (nx,)) the driver for this timestep
:param f: (2D float array (nx,nv)) Distribution function
:param i: (int) Loop index for storage
:return:
"""
temp_storage_fields["e"][i] = e
temp_storage_fields["driver"][i] = de
temp_storage_fields["n"][i] = np.trapz(f, dx=dv, axis=1)
temp_storage_fields["j"][i] = | np.trapz(f * v, dx=dv, axis=1) | numpy.trapz |
ENABLE_MULTIPROCESSING = True
from dsl import cpp_trace_param_automata
def generate_public_submission():
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from xgboost import XGBClassifier
import pdb
# data_path = Path('.')
data_path = Path('.')
if not (data_path / 'test').exists():
data_path = Path('../input/abstraction-and-reasoning-challenge')
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
def plot_result(test_input, test_prediction,
input_shape):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 2, figsize=(15, 15))
test_input = test_input.reshape(input_shape[0], input_shape[1])
axs[0].imshow(test_input, cmap=cmap, norm=norm)
axs[0].axis('off')
axs[0].set_title('Actual Target')
test_prediction = test_prediction.reshape(input_shape[0], input_shape[1])
axs[1].imshow(test_prediction, cmap=cmap, norm=norm)
axs[1].axis('off')
axs[1].set_title('Model Prediction')
plt.tight_layout()
plt.show()
def plot_test(test_prediction, task_name):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 1, figsize=(15, 15))
axs.imshow(test_prediction, cmap=cmap, norm=norm)
axs.axis('off')
axs.set_title(f'Test Prediction {task_name}')
plt.tight_layout()
plt.show()
# https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
sample_sub1 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub1 = sample_sub1.set_index('output_id')
sample_sub1.head()
def get_moore_neighbours(color, cur_row, cur_col, nrows, ncols):
if cur_row <= 0:
top = -1
else:
top = color[cur_row - 1][cur_col]
if cur_row >= nrows - 1:
bottom = -1
else:
bottom = color[cur_row + 1][cur_col]
if cur_col <= 0:
left = -1
else:
left = color[cur_row][cur_col - 1]
if cur_col >= ncols - 1:
right = -1
else:
right = color[cur_row][cur_col + 1]
return top, bottom, left, right
def get_tl_tr(color, cur_row, cur_col, nrows, ncols):
if cur_row == 0:
top_left = -1
top_right = -1
else:
if cur_col == 0:
top_left = -1
else:
top_left = color[cur_row - 1][cur_col - 1]
if cur_col == ncols - 1:
top_right = -1
else:
top_right = color[cur_row - 1][cur_col + 1]
return top_left, top_right
def make_features(input_color, nfeat):
nrows, ncols = input_color.shape
feat = np.zeros((nrows * ncols, nfeat))
cur_idx = 0
for i in range(nrows):
for j in range(ncols):
feat[cur_idx, 0] = i
feat[cur_idx, 1] = j
feat[cur_idx, 2] = input_color[i][j]
feat[cur_idx, 3:7] = get_moore_neighbours(input_color, i, j, nrows, ncols)
feat[cur_idx, 7:9] = get_tl_tr(input_color, i, j, nrows, ncols)
feat[cur_idx, 9] = len(np.unique(input_color[i, :]))
feat[cur_idx, 10] = len(np.unique(input_color[:, j]))
feat[cur_idx, 11] = (i + j)
feat[cur_idx, 12] = len(np.unique(input_color[i - local_neighb:i + local_neighb,
j - local_neighb:j + local_neighb]))
cur_idx += 1
return feat
def features(task, mode='train'):
num_train_pairs = len(task[mode])
feat, target = [], []
global local_neighb
for task_num in range(num_train_pairs):
input_color = np.array(task[mode][task_num]['input'])
target_color = task[mode][task_num]['output']
nrows, ncols = len(task[mode][task_num]['input']), len(task[mode][task_num]['input'][0])
target_rows, target_cols = len(task[mode][task_num]['output']), len(task[mode][task_num]['output'][0])
if (target_rows != nrows) or (target_cols != ncols):
print('Number of input rows:', nrows, 'cols:', ncols)
print('Number of target rows:', target_rows, 'cols:', target_cols)
not_valid = 1
return None, None, 1
imsize = nrows * ncols
# offset = imsize*task_num*3 #since we are using three types of aug
feat.extend(make_features(input_color, nfeat))
target.extend(np.array(target_color).reshape(-1, ))
return np.array(feat), np.array(target), 0
# mode = 'eval'
mode = 'test'
if mode == 'eval':
task_path = evaluation_path
elif mode == 'train':
task_path = training_path
elif mode == 'test':
task_path = test_path
all_task_ids = sorted(os.listdir(task_path))
nfeat = 13
local_neighb = 5
valid_scores = {}
model_accuracies = {'ens': []}
pred_taskids = []
for task_id in all_task_ids:
task_file = str(task_path / task_id)
with open(task_file, 'r') as f:
task = json.load(f)
feat, target, not_valid = features(task)
if not_valid:
print('ignoring task', task_file)
print()
not_valid = 0
continue
xgb = XGBClassifier(n_estimators=10, n_jobs=-1)
xgb.fit(feat, target, verbose=-1)
# training on input pairs is done.
# test predictions begins here
num_test_pairs = len(task['test'])
for task_num in range(num_test_pairs):
cur_idx = 0
input_color = np.array(task['test'][task_num]['input'])
nrows, ncols = len(task['test'][task_num]['input']), len(
task['test'][task_num]['input'][0])
feat = make_features(input_color, nfeat)
print('Made predictions for ', task_id[:-5])
preds = xgb.predict(feat).reshape(nrows, ncols)
if (mode == 'train') or (mode == 'eval'):
ens_acc = (np.array(task['test'][task_num]['output']) == preds).sum() / (nrows * ncols)
model_accuracies['ens'].append(ens_acc)
pred_taskids.append(f'{task_id[:-5]}_{task_num}')
# print('ensemble accuracy',(np.array(task['test'][task_num]['output'])==preds).sum()/(nrows*ncols))
# print()
preds = preds.astype(int).tolist()
# plot_test(preds, task_id)
sample_sub1.loc[f'{task_id[:-5]}_{task_num}',
'output'] = flattener(preds)
if (mode == 'train') or (mode == 'eval'):
df = pd.DataFrame(model_accuracies, index=pred_taskids)
print(df.head(10))
print(df.describe())
for c in df.columns:
print(f'for {c} no. of complete tasks is', (df.loc[:, c] == 1).sum())
df.to_csv('ens_acc.csv')
sample_sub1.head()
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
eval_tasks = sorted(os.listdir(evaluation_path))
T = training_tasks
Trains = []
for i in range(400):
task_file = str(training_path / T[i])
task = json.load(open(task_file, 'r'))
Trains.append(task)
E = eval_tasks
Evals = []
for i in range(400):
task_file = str(evaluation_path / E[i])
task = json.load(open(task_file, 'r'))
Evals.append(task)
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
# 0:black, 1:blue, 2:red, 3:greed, 4:yellow,
# 5:gray, 6:magenta, 7:orange, 8:sky, 9:brown
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, norm=norm)
plt.xticks(list(range(10)))
plt.yticks([])
# plt.show()
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(4 * n, 8), dpi=50)
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
plt.tight_layout()
plt.show()
def plot_picture(x):
plt.imshow(np.array(x), cmap=cmap, norm=norm)
plt.show()
def Defensive_Copy(A):
n = len(A)
k = len(A[0])
L = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
L[i, j] = 0 + A[i][j]
return L.tolist()
def Create(task, task_id=0):
n = len(task['train'])
Input = [Defensive_Copy(task['train'][i]['input']) for i in range(n)]
Output = [Defensive_Copy(task['train'][i]['output']) for i in range(n)]
Input.append(Defensive_Copy(task['test'][task_id]['input']))
return Input, Output
def Recolor(task):
Input = task[0]
Output = task[1]
Test_Picture = Input[-1]
Input = Input[:-1]
N = len(Input)
for x, y in zip(Input, Output):
if len(x) != len(y) or len(x[0]) != len(y[0]):
return -1
Best_Dict = -1
Best_Q1 = -1
Best_Q2 = -1
Best_v = -1
# v ranges from 0 to 3. This gives an extra flexibility of measuring distance from any of the 4 corners
Pairs = []
for t in range(15):
for Q1 in range(1, 8):
for Q2 in range(1, 8):
if Q1 + Q2 == t:
Pairs.append((Q1, Q2))
for Q1, Q2 in Pairs:
for v in range(4):
if Best_Dict != -1:
continue
possible = True
Dict = {}
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
color2 = y[i][j]
if color1 != color2:
rule = (p1, p2, color1)
if rule not in Dict:
Dict[rule] = color2
elif Dict[rule] != color2:
possible = False
if possible:
# Let's see if we actually solve the problem
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
rule = (p1, p2, color1)
if rule in Dict:
color2 = 0 + Dict[rule]
else:
color2 = 0 + y[i][j]
if color2 != y[i][j]:
possible = False
if possible:
Best_Dict = Dict
Best_Q1 = Q1
Best_Q2 = Q2
Best_v = v
if Best_Dict == -1:
return -1 # meaning that we didn't find a rule that works for the traning cases
# Otherwise there is a rule: so let's use it:
n = len(Test_Picture)
k = len(Test_Picture[0])
answer = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
if Best_v == 0 or Best_v == 2:
p1 = i % Best_Q1
else:
p1 = (n - 1 - i) % Best_Q1
if Best_v == 0 or Best_v == 3:
p2 = j % Best_Q2
else:
p2 = (k - 1 - j) % Best_Q2
color1 = Test_Picture[i][j]
rule = (p1, p2, color1)
if (p1, p2, color1) in Best_Dict:
answer[i][j] = 0 + Best_Dict[rule]
else:
answer[i][j] = 0 + color1
return answer.tolist()
sample_sub2 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub2.head()
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
example_grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# display(example_grid)
print(flattener(example_grid))
Solved = []
Problems = sample_sub2['output_id'].values
Proposed_Answers = []
test_paths_my = {task.stem: json.load(task.open()) for task in test_path.iterdir()}
test_task_ids = np.sort(list(test_paths_my.keys()))
print(Problems, len(Problems))
task_number_my = dict(zip(test_task_ids, np.arange(100)))
for i in range(len(Problems)):
output_id = Problems[i]
task_id = output_id.split('_')[0]
pair_id = int(output_id.split('_')[1])
f = str(test_path / str(task_id + '.json'))
with open(f, 'r') as read_file:
task = json.load(read_file)
n = len(task['train'])
Input = [Defensive_Copy(task['train'][j]['input']) for j in range(n)]
Output = [Defensive_Copy(task['train'][j]['output']) for j in range(n)]
Input.append(Defensive_Copy(task['test'][pair_id]['input']))
solution = Recolor([Input, Output])
pred = ''
if solution != -1:
Solved.append(i)
pred1 = flattener(solution)
pred = pred + pred1 + ' '
if pred == '':
pred = flattener(example_grid)
Proposed_Answers.append(pred)
sample_sub2['output'] = Proposed_Answers
sample_sub1 = sample_sub1.reset_index()
sample_sub1 = sample_sub1.sort_values(by="output_id")
sample_sub2 = sample_sub2.sort_values(by="output_id")
out1 = sample_sub1["output"].astype(str).values
out2 = sample_sub2["output"].astype(str).values
merge_output = []
for o1, o2 in zip(out1, out2):
o = o1.strip().split(" ")[:1] + o2.strip().split(" ")[:2]
o = " ".join(o[:3])
merge_output.append(o)
sample_sub1["output"] = merge_output
sample_sub1["output"] = sample_sub1["output"].astype(str)
# test_paths_my = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
# test_task_ids = np.sort(list(test_paths_my.keys()))
# task_number_my = dict(zip(test_task_ids, np.arange(100)))
submission = sample_sub1.copy()
submission.to_csv("public_submission.csv", index=False)
#generate_public_submission()
import numpy as np
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
import time
from collections import defaultdict
import os
import json
import random
import copy
import networkx as nx
from pathlib import Path
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from itertools import product
import pandas as pd
import multiprocessing
import subprocess
# from moviepy.editor import ImageSequenceClip
# from moviepy.editor import clips_array, CompositeVideoClip
# from moviepy.video.io.html_tools import html_embed, HTML2
# def display_vid(vid, verbose=False, **html_kw):
# """
# Display a moviepy video clip, useful for removing loadbars
# """
# rd_kwargs = {
# 'fps': 10, 'verbose': verbose
# }
# if not verbose:
# rd_kwargs['logger'] = None
# return HTML2(html_embed(vid, filetype=None, maxduration=60,
# center=True, rd_kwargs=rd_kwargs, **html_kw))
data_path = Path('../input/abstraction-and-reasoning-challenge/')
# data_path = Path('.') # Artyom: it's better use symlinks locally
cmap_lookup = [
'#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'
]
cmap_lookup = [np.array([int(x[1:3], 16), int(x[3:5], 16), int(x[5:], 16)]) for x in cmap_lookup]
def cmap(x):
"""
Translate a task matrix to a color coded version
arguments
x : a h x w task matrix
returns
a h x w x 3 matrix with colors instead of numbers
"""
y = np.zeros((*x.shape, 3))
y[x < 0, :] = np.array([112, 128, 144])
y[x > 9, :] = np.array([255, 248, 220])
for i, c in enumerate(cmap_lookup):
y[x == i, :] = c
return y
def draw_one(x, k=20):
"""
Create a PIL image from a task matrix, the task will be
drawn using the default color coding with grid lines
arguments
x : a task matrix
k = 20 : an up scaling factor
returns
a PIL image
"""
img = Image.fromarray(cmap(x).astype(np.uint8)).resize((x.shape[1] * k, x.shape[0] * k), Image.NEAREST)
draw = ImageDraw.Draw(img)
for i in range(x.shape[0]):
draw.line((0, i * k, img.width, i * k), fill=(80, 80, 80), width=1)
for j in range(x.shape[1]):
draw.line((j * k, 0, j * k, img.height), fill=(80, 80, 80), width=1)
return img
def vcat_imgs(imgs, border=10):
"""
Concatenate images vertically
arguments:
imgs : an array of PIL images
border = 10 : the size of space between images
returns:
a PIL image
"""
h = max(img.height for img in imgs)
w = sum(img.width for img in imgs)
res_img = Image.new('RGB', (w + border * (len(imgs) - 1), h), color=(255, 255, 255))
offset = 0
for img in imgs:
res_img.paste(img, (offset, 0))
offset += img.width + border
return res_img
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(n * 4, 8))
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
def go(ax, title, x):
ax.imshow(draw_one(x), interpolation='nearest')
ax.set_title(title)
ax.set_yticks([])
ax.set_xticks([])
for i, t in enumerate(task["train"]):
go(axs[0][fig_num], f'Train-{i} in', t["input"])
go(axs[1][fig_num], f'Train-{i} out', t["output"])
fig_num += 1
for i, t in enumerate(task["test"]):
go(axs[0][fig_num], f'Test-{i} in', t["input"])
try:
go(axs[1][fig_num], f'Test-{i} out', t["output"])
except:
go(axs[1][fig_num], f'Test-{i} out', np.zeros_like(t["input"]))
fig_num += 1
plt.tight_layout()
plt.show()
def real_trace_param_automata(input, params, n_iter, n_hidden):
"""
Execute an automata and return all the intermediate states
arguments:
step_fn : transition rule function, should take two arguments `input` and `hidden_i`,
should return an output grid an a new hidden hidden grid
n_iter : num of iteration to perform
n_hidden: number of hidden grids, if set to 0 `hidden_i` will be set to None
laodbar = True: weather display loadbars
returns:
an array of tuples if output and hidden grids
"""
# hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
#
# global_rules, ca_rules = params
#
# trace = [(input, hidden)]
#
# for rule in global_rules:
#
# output, hidden = apply_rule(input, hidden, rule)
# trace.append((output, hidden))
# input = output
#
# its = range(n_iter)
#
# for i_it in its:
# output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
# trace.append((output, hidden))
#
# if (input.shape == output.shape) and (output == input).all():
# break
# input = output
hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
global_rules, ca_rules, split_rule, merge_rule = params
grids = apply_split_rule(input, hidden, split_rule)
#print(grids[0][0])
for rule in global_rules:
for i, (inp, hid) in enumerate(grids):
if rule['macro_type'] == 'global_rule':
if rule['apply_to'] == 'all' or \
(rule['apply_to'] == 'index' and i == rule['apply_to_index']%len(grids) or
(rule['apply_to'] == 'last' and i == len(grids) - 1)):
grids[i] = apply_rule(inp, hid, rule)
elif rule['macro_type'] == 'global_interaction_rule':
grids = apply_interaction_rule(grids, rule)
#print(grids[0][0])
#1/0
for i, (input, hidden) in enumerate(grids):
for _ in range(n_iter):
output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
if np.array_equal(input, output):
break
input = output
grids[i] = (output, hidden)
output = apply_merge_rule(grids, merge_rule, split_rule)
return output
def apply_interaction_rule(grids, rule):
if rule['type'] == 'align_pattern':
# index_from = rule['index_from'] % len(grids)
# index_to = rule['index_to'] % len(grids)
# allow_rotation = rule['allow_rotation']
if len(grids) > 5:
return grids
for index_from in range(len(grids)):
for index_to in range(index_from+1, len(grids)):
input_i = grids[index_from][0]
input_j = grids[index_to][0]
# print(np.max(input_i>0, axis=1))
# print(np.max(input_i>0, axis=1).shape)
# print(np.arange(input_i.shape[0]).shape)
#1/0
i_nonzero_rows = np.arange(input_i.shape[0])[np.max(input_i>0, axis=1)]
i_nonzero_columns = np.arange(input_i.shape[1])[np.max(input_i>0, axis=0)]
j_nonzero_rows = np.arange(input_j.shape[0])[np.max(input_j>0, axis=1)]
j_nonzero_columns = np.arange(input_j.shape[1])[np.max(input_j>0, axis=0)]
if i_nonzero_rows.shape[0] == 0 or i_nonzero_columns.shape[0] == 0 or \
j_nonzero_rows.shape[0] == 0 or j_nonzero_columns.shape[0] == 0:
continue
i_minrow = np.min(i_nonzero_rows)
i_mincol = np.min(i_nonzero_columns)
i_maxrow = np.max(i_nonzero_rows) + 1
i_maxcol = np.max(i_nonzero_columns) + 1
j_minrow = np.min(j_nonzero_rows)
j_mincol = np.min(j_nonzero_columns)
j_maxrow = np.max(j_nonzero_rows) + 1
j_maxcol = np.max(j_nonzero_columns) + 1
figure_to_align = input_i[i_minrow:i_maxrow, i_mincol:i_maxcol]
figure_target = input_j[j_minrow:j_maxrow, j_mincol:j_maxcol]
best_fit = 0
best_i_fit, best_j_fit = -1, -1
#print(figure_to_align)
#print(figure_target)
if figure_to_align.shape[0] < figure_target.shape[0] or figure_to_align.shape[1] < figure_target.shape[1]:
continue
#1/0
else:
for i_start in range((figure_to_align.shape[0] - figure_target.shape[0])+1):
for j_start in range((figure_to_align.shape[1] - figure_target.shape[1])+1):
fig_1 = figure_to_align[i_start:(i_start + figure_target.shape[0]), j_start:(j_start + figure_target.shape[1])]
if np.logical_and(np.logical_and(figure_target > 0, figure_target!=rule['allow_color']), figure_target != fig_1).any():
continue
fit = np.sum(figure_target==fig_1)
if fit > best_fit:
best_i_fit, best_j_fit = i_start, j_start
best_fit = fit
if best_fit == 0:
continue
imin = j_minrow-best_i_fit
imax = j_minrow-best_i_fit + figure_to_align.shape[0]
jmin = j_mincol - best_j_fit
jmax = j_mincol - best_j_fit + figure_to_align.shape[1]
begin_i = max(imin, 0)
begin_j = max(jmin, 0)
end_i = min(imax, input_j.shape[0])
end_j = min(jmax, input_j.shape[1])
i_fig_begin = (begin_i-imin)
i_fig_end = figure_to_align.shape[0]-(imax-end_i)
j_fig_begin = (begin_j-jmin)
j_fig_end = figure_to_align.shape[1]-(jmax-end_j)
if rule['fill_with_color'] == 0:
input_j[begin_i:end_i, begin_j:end_j] = figure_to_align[i_fig_begin:i_fig_end, j_fig_begin:j_fig_end]
else:
for i, j in product(range(end_i-begin_i + 1), range(end_j-begin_j + 1)):
if input_j[begin_i + i, begin_j + j] == 0:
input_j[begin_i + i, begin_j + j] = rule['fill_with_color'] * (figure_to_align[i_fig_begin + i, j_fig_begin + j])
return grids
def trace_param_automata(input, params, n_iter, n_hidden):
# expected = real_trace_param_automata(input, params, n_iter, n_hidden)
#
# testcase = {'input': input, 'params': params}
# print(str(testcase).replace('\'', '"').replace('array(', '').replace(')', ''))
output = cpp_trace_param_automata(input, params, n_iter)
# if not np.array_equal(expected, output):
# print('cpp result is wrong')
# print('input:')
# print(input)
# print('expected:')
# print(expected)
# print('got:')
# print(output)
#
# diff = [[str(g) if e != g else '-' for e, g in zip(exp_row, got_row)]
# for exp_row, got_row in zip(expected, output)]
# diff_lines = [' '.join(line) for line in diff]
# diff_str = '[[' + ']\n ['.join(diff_lines)
#
# print('diff:')
# print(diff_str)
# print('rules')
# print(params)
#
# assert False
return [[output]]
# def vis_automata_trace(states, loadbar=False, prefix_image=None):
# """
# Create a video from an array of automata states
#
# arguments:
# states : array of automata steps, returned by `trace_automata()`
# loadbar = True: weather display loadbars
# prefix_image = None: image to add to the beginning of each frame
# returns
# a moviepy ImageSequenceClip
# """
# frames = []
# if loadbar:
# states = tqdm(states, desc='Frame')
# for i, (canvas, hidden) in enumerate(states):
#
# frame = []
# if prefix_image is not None:
# frame.append(prefix_image)
# frame.append(draw_one(canvas))
# frames.append(vcat_imgs(frame))
#
# return ImageSequenceClip(list(map(np.array, frames)), fps=10)
# def vis_automata_paramed_task(tasks, parameters, n_iter, n_hidden, vis_only_ix=None):
# """
# Visualize the automata steps during the task solution
# arguments:
# tasks : the task to be solved by the automata
# step_fn : automata transition function as passed to `trace_automata()`
# n_iter : number of iterations to perform
# n_hidden : number of hidden girds
# """
#
# n_vis = 0
#
# def go(task, n_vis, test=False):
#
# if vis_only_ix is not None and vis_only_ix != n_vis:
# return
# trace = trace_param_automata(task['input'], parameters, n_iter, n_hidden)
# if not test:
# vid = vis_automata_trace(trace, prefix_image=draw_one(task['output']))
# else:
# vid = vis_automata_trace(trace, prefix_image=draw_one(np.zeros_like(task['input'])))
#
# # display(display_vid(vid))
#
# for task in (tasks['train']):
# n_vis += 1
# go(task, n_vis)
#
# for task in (tasks['test']):
# n_vis += 1
# go(task, n_vis, True)
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
evaluation_tasks = sorted(os.listdir(evaluation_path))
test_tasks = sorted(os.listdir(test_path))
def load_data(p, phase=None):
"""
Load task data
"""
if phase in {'training', 'test', 'evaluation'}:
p = data_path / phase / p
task = json.loads(Path(p).read_text())
dict_vals_to_np = lambda x: {k: np.array(v) for k, v in x.items()}
assert set(task) == {'test', 'train'}
res = dict(test=[], train=[])
for t in task['train']:
assert set(t) == {'input', 'output'}
res['train'].append(dict_vals_to_np(t))
for t in task['test']:
if phase == 'test':
assert set(t) == {'input'}
else:
assert set(t) == {'input', 'output'}
res['test'].append(dict_vals_to_np(t))
return res
nbh = lambda x, i, j: {
(ip, jp) : x[i+ip, j+jp]
for ip, jp in product([1, -1, 0], repeat=2)
if 0 <= i+ip < x.shape[0] and 0 <= j+jp < x.shape[1] and (not (ip==0 and jp==0))
}
def get_random_split_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['nothing', 'color_figures', 'figures', 'macro_multiply'])
if rule['type'] in ['color_figures', 'figures']:
rule['sort'] = random.choice(['biggest', 'smallest'])
if rule['type'] == 'macro_multiply':
rule['k1'] = np.random.randint(config['mink1'], config['maxk1']+1)
rule['k2'] = np.random.randint(config['mink2'], config['maxk2']+1)
return rule
def get_random_merge_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['cellwise_or', 'output_first', 'output_last'])
return rule
def apply_split_rule(input, hidden, split_rule):
if split_rule['type'] == 'nothing':
return [(input, hidden)]
if split_rule['type'] == 'macro_multiply':
ks = split_rule['k1'] * split_rule['k2']
grids = [(np.copy(input), np.copy(hidden)) for _ in range(ks)]
return grids
#split_rule['type'] = 'figures'
dif_c_edge = split_rule['type'] == 'figures'
communities = get_connectivity_info(input, ignore_black=True, edge_for_difcolors=dif_c_edge)
if len(communities) > 0:
if split_rule['sort'] == 'biggest':
communities = communities[::-1]
grids = [(np.zeros_like(input), np.zeros_like(hidden)) for _ in range(len(communities))]
for i in range(len(communities)):
for point in communities[i]:
grids[i][0][point] = input[point]
else:
grids = [(input, hidden)]
return grids
def apply_merge_rule(grids, merge_rule, split_rule):
if split_rule['type'] == 'macro_multiply':
shape_base = grids[0][0].shape
shapes = [arr[0].shape for arr in grids]
if not np.array([shape_base == sh for sh in shapes]).all():
return np.zeros((1, 1), dtype=np.int)
ks_1 = split_rule['k1']
ks_2 = split_rule['k2']
output = np.zeros((shape_base[0] * ks_1, shape_base[1] * ks_2), dtype=np.int8)
for k1 in range(ks_1):
for k2 in range(ks_2):
output[(k1*shape_base[0]):((k1+1) * shape_base[0]), (k2*shape_base[1]):((k2+1) * shape_base[1])] = grids[k1*ks_2 + k2][0]
return output
if merge_rule['type'] == 'cellwise_or':
output = np.zeros_like(grids[0][0])
for i in np.arange(len(grids))[::-1]:
if grids[i][0].shape == output.shape:
output[grids[i][0]>0] = grids[i][0][grids[i][0]>0]
return output
elif merge_rule['type'] == 'output_first':
output = grids[0][0]
elif merge_rule['type'] == 'output_last':
output = grids[-1][0]
return output
def get_random_ca_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'copy_color_by_direction',
'direct_check',
'indirect_check',
'nbh_check',
'corner_check',
'color_distribution',
]
ca_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
ca_rules += [c['type'] for c in ca]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(ca_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
def get_random_ignore_colors():
if config['possible_ignore_colors'].shape[0] > 0:
possible_colors = config['possible_ignore_colors']
return possible_colors[np.random.randint(2, size=possible_colors.shape[0]) == 1]
else:
return []
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return get_random_all_colors()
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'ca_rule'
rule['ignore_colors'] = list(config['ignore_colors'])
if np.random.rand() < 0.5 and config['possible_ignore_colors'].shape[0]:
rule['ignore_colors'] += [random.choice(config['possible_ignore_colors'])]
if random_type == 'copy_color_by_direction':
rule['direction'] = random.choice(['everywhere'])
rule['copy_color'] = [get_random_out_color()]
rule['look_back_color'] = rule['copy_color'][0]
elif random_type == 'corner_check':
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'direct_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'indirect_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'nbh_check':
rule['nbh_check_sum'] = np.random.randint(8)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'color_distribution':
rule['direction'] = random.choice(
['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['check_in_empty'] = np.random.randint(2)
rule['color_out'] = get_random_out_color()
if rule['check_in_empty'] == 0:
rule['color_in'] = rule['color_out']
else:
rule['color_in'] = get_random_all_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['color_out']]))
return rule
def get_random_global_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'distribute_colors',
'unity',
'color_for_inners',
'map_color',
'draw_lines',
'draw_line_to',
'gravity',
'make_holes',
'distribute_from_border',
'align_pattern',
'rotate',
'flip'
]
if config['allow_make_smaller']:
types_possible += \
[
'crop_empty',
'crop_figure',
'split_by_H',
'split_by_W',
'reduce'
]
# if config['allow_make_bigger']:
# types_possible += \
# [
# 'macro_multiply_by',
# 'micro_multiply_by',
# 'macro_multiply_k',
# ]
gl_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
gl_rules += [c['type'] for c in gl]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(gl_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'global_rule'
rule['apply_to'] = random.choice(['all', 'index'])
if np.random.rand()<0.2:
rule['apply_to'] = 'last'
if rule['apply_to'] == 'index':
rule['apply_to_index'] = np.random.choice(10)
if random_type == 'macro_multiply_k':
rule['k'] = (np.random.randint(1, 4), np.random.randint(1, 4))
elif random_type == 'flip':
rule['how'] = random.choice(['ver', 'hor'])
elif random_type == 'rotate':
rule['rotations_count'] = np.random.randint(1, 4)
elif random_type == 'micro_multiply_by':
rule['how_many'] = random.choice([2, 3, 4, 5, 'size'])
elif random_type == 'macro_multiply_by':
rule['how_many'] = random.choice(['both', 'hor', 'ver'])
rule['rotates'] = [np.random.randint(1) for _ in range(4)]
rule['flips'] = [random.choice(['hor', 'ver', 'horver', 'no']) for _ in range(4)]
elif random_type == 'distribute_from_border':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
elif random_type == 'draw_lines':
rule['direction'] = random.choice(['everywhere', 'horizontal', 'vertical', 'horver', 'diagonal'])
# 'top', 'bottom', 'left', 'right',
# 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['not_stop_by_color'] = 0 # get_random_all_color()
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'reduce':
rule['skip_color'] = get_random_all_color()
elif random_type == 'draw_line_to':
#rule['direction_type'] = random.choice(['border'])
rule['direction_color'] = get_random_all_color()
rule['not_stop_by_color'] = 0
if np.random.rand() < 0.5:
rule['not_stop_by_color_and_skip'] = get_random_all_color()
else:
rule['not_stop_by_color_and_skip'] = 0
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'distribute_colors':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
rule['horizontally'] = np.random.randint(2)
rule['vertically'] = np.random.randint(2)
rule['intersect'] = get_random_out_color()
elif random_type == 'color_for_inners':
rule['color_out'] = get_random_out_color()
elif random_type == 'crop_figure':
rule['mode'] = random.choice(['smallest', 'biggest'])
rule['dif_c_edge'] = random.choice([True, False])
elif random_type == 'unity':
rule['mode'] = random.choice(['diagonal', 'horizontal', 'vertical', 'horver'])
# rule['inner'] = np.random.choice(2)
rule['ignore_colors'] = [0]
if np.random.rand() < 0.5:
rule['ignore_colors'] += [get_random_all_color()]
rule['with_color'] = random.choice([get_random_out_color(), 0])
elif random_type == 'map_color':
rule['color_in'] = get_random_all_color()
rule['color_out'] = get_random_out_color()
elif random_type == 'gravity':
rule['gravity_type'] = random.choice(['figures', 'cells'])
rule['steps_limit'] = np.random.choice(2)
rule['look_at_what_to_move'] = np.random.choice(2)
if rule['look_at_what_to_move'] == 1:
rule['color_what'] = get_random_out_color()
rule['direction_type'] = random.choice(['border', 'color'])
if rule['direction_type'] == 'border':
rule['direction_border'] = random.choice(['top', 'bottom', 'left', 'right'])
else:
rule['direction_color'] = get_random_color()
elif random_type == 'split_by_H' or random_type == 'split_by_W':
rule['merge_rule'] = random.choice(['and', 'equal', 'or', 'xor'])
elif random_type == 'align_pattern':
rule['macro_type'] = 'global_interaction_rule'
# rule['allow_rotation'] = False
rule['allow_color'] = get_random_all_color()
rule['fill_with_color'] = 0 #random.choice([0, get_random_all_color()])
return rule
def get_task_metadata(task):
colors = []
shapes_input = [[], []]
shapes_output = [[], []]
for part in ['train']:
for uni_task in task[part]:
inp = uni_task['input']
colors += list(np.unique(inp))
out = uni_task['output']
colors += list(np.unique(out))
shapes_input[0].append(inp.shape[0])
shapes_input[1].append(inp.shape[1])
shapes_output[0].append(out.shape[0])
shapes_output[1].append(out.shape[1])
all_colors = np.unique(colors)
min_k1 = int(np.floor(np.min(np.array(shapes_output[0])/np.array(shapes_input[0]))))
min_k2 = int(np.floor(np.min(np.array(shapes_output[1])/np.array(shapes_input[1]))))
max_k1 = int(np.ceil(np.max(np.array(shapes_output[0])/np.array(shapes_input[0]))))
max_k2 = int(np.ceil(np.max(np.array(shapes_output[1])/np.array(shapes_input[1]))))
max_shape = np.max([shapes_input])
config = {}
config['mink1'] = max(1, min(min(min_k1, 30//max_shape), 3))
config['mink2'] = max(1, min(min(min_k2, 30//max_shape), 3))
config['maxk1'] = max(1, min(min(max_k1, 30//max_shape), 3))
config['maxk2'] = max(1, min(min(max_k2, 30//max_shape), 3))
config['allow_make_smaller'] = False
config['allow_make_bigger'] = False
for uni_task in task['train']:
if uni_task['input'].shape[0] > uni_task['output'].shape[0] or \
uni_task['input'].shape[1] > uni_task['output'].shape[1]:
config['allow_make_smaller'] = True
if uni_task['input'].shape[0] < uni_task['output'].shape[0] or \
uni_task['input'].shape[1] < uni_task['output'].shape[1]:
config['allow_make_bigger'] = True
colors_out = []
changed_colors = []
inp_colors = []
for uni_task in task['train']:
inp = uni_task['input']
out = uni_task['output']
for i in range(min(inp.shape[0], out.shape[0])):
for j in range(min(inp.shape[1], out.shape[1])):
inp_colors.append(inp[i, j])
if out[i, j] != inp[i, j]:
colors_out.append(out[i, j])
changed_colors.append(inp[i, j])
inp_colors = np.unique(inp_colors)
changed_colors = np.unique(changed_colors)
config['ignore_colors'] = [c for c in inp_colors if not c in changed_colors]
config['possible_ignore_colors'] = np.array([c for c in all_colors if not c in config['ignore_colors']])
if len(colors_out) == 0:
colors_out = [0]
config['possible_colors_out'] = np.unique(colors_out)
return all_colors, config
def compute_parametrized_automata(input, hidden_i, rules):
output = np.zeros_like(input, dtype=int)
hidden_o = np.copy(hidden_i)
for i, j in product(range(input.shape[0]), range(input.shape[1])):
i_c = input[i, j]
i_nbh = nbh(input, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
i_indirect_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (-1, -1), (-1, 1), (1, -1)}}
is_top_b, is_bottom_b = i == 0, i == input.shape[0] - 1
is_left_b, is_right_b = j == 0, j == input.shape[1] - 1
is_b = is_top_b or is_bottom_b or is_left_b or is_right_b
if i_c > 0:
output[i, j] = i_c
for rule in rules:
if i_c in rule['ignore_colors']:
continue
if rule['type'] == 'copy_color_by_direction':
if rule['direction'] == 'bottom' or rule['direction'] == 'everywhere':
if not is_top_b and input[i - 1, j] in rule['copy_color'] and \
(i == 1 or input[i - 2, j] == rule['look_back_color']):
output[i, j] = input[i - 1, j]
break
if rule['direction'] == 'top' or rule['direction'] == 'everywhere':
if not is_bottom_b and input[i + 1, j] in rule['copy_color'] and \
(i == input.shape[0] - 2 or input[i + 2, j] == rule['look_back_color']):
output[i, j] = input[i + 1, j]
break
if rule['direction'] == 'right' or rule['direction'] == 'everywhere':
if not is_left_b and input[i, j - 1] in rule['copy_color'] and \
(j == 1 or input[i, j - 2] == rule['look_back_color']):
output[i, j] = input[i, j - 1]
break
if rule['direction'] == 'left' or rule['direction'] == 'everywhere':
if not is_right_b and input[i, j + 1] in rule['copy_color'] and \
(j == input.shape[1] - 2 or input[i, j + 2] == rule['look_back_color']):
output[i, j] = input[i, j + 1]
break
elif rule['type'] == 'corner_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = 3
out_nbh = rule['nbh_check_out']
i_uplecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, -1), (-1, 0), (0, -1)}}
i_upricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, 1), (-1, 0), (0, 1)}}
i_dolecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, -1), (1, 0), (0, -1)}}
i_doricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (1, 0), (0, 1)}}
if sum(1 for v in i_nbh.values() if v in color_nbh) < 3:
continue
did_something = False
for corner_idx in [i_uplecorner_nbh, i_upricorner_nbh, i_dolecorner_nbh, i_doricorner_nbh]:
for color in color_nbh:
if sum(1 for v in corner_idx.values() if v == color) == sum_nbh:
output[i, j] = out_nbh
did_something = True
break
if did_something:
break
if did_something:
break
elif rule['type'] == 'nbh_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'direct_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_direct_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'indirect_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_indirect_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'color_distribution':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
not_border_conditions = \
[
not is_top_b,
not is_bottom_b,
not is_left_b,
not is_right_b,
not is_top_b and not is_left_b,
not is_bottom_b and not is_left_b,
not is_top_b and not is_right_b,
not is_bottom_b and not is_right_b
]
index_from = \
[
(i - 1, j),
(i + 1, j),
(i, j - 1),
(i, j + 1),
(i - 1, j - 1),
(i + 1, j - 1),
(i - 1, j + 1),
(i + 1, j + 1)
]
did_something = False
for i_dir, direction in enumerate(directions):
if rule['direction'] == direction:
if not_border_conditions[i_dir]:
if (rule['check_in_empty'] == 1 and input[index_from[i_dir]] > 0) or \
(rule['check_in_empty'] == 0 and input[index_from[i_dir]] == rule['color_in']):
output[i, j] = rule['color_out']
did_something = True
break
if did_something:
break
return output, hidden_o
def get_connectivity_info(color: np.array, ignore_black = False, von_neumann_only = False, edge_for_difcolors = False):
# UnionFind structure allows us to detect all connected areas in a linear time.
class UnionFind:
def __init__(self) -> None:
self.area = np.ones(color.size)
self.parent = np.arange(color.size)
def find(self, x: int) -> int:
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, u: int, v: int) -> None:
root_u, root_v = self.find(u), self.find(v)
if root_u != root_v:
area_u, area_v = self.area[root_u], self.area[root_v]
if area_u < area_v:
root_u, root_v = root_v, root_u
self.parent[root_v] = root_u
self.area[root_u] = area_u + area_v
union_find = UnionFind()
neighbours = [[-1, 0], [0, -1], [1, 0], [0, 1]]
if not von_neumann_only:
neighbours.extend([[-1, -1], [1, -1], [1, 1], [-1, 1]])
nrows, ncols = color.shape
for i in range(nrows):
for j in range(ncols):
for s, t in neighbours:
u, v = i + s, j + t
if u >= 0 and u < nrows and v >= 0 and v < ncols and \
(color[u, v] == color[i, j] or (edge_for_difcolors and (color[u, v]>0) == (color[i, j]>0))):
union_find.union(u * ncols + v, i * ncols + j)
# for every cell: write down the area of its corresponding area
communities = defaultdict(list)
for i, j in product(range(nrows), range(ncols)):
if not ignore_black or color[i, j] > 0:
communities[union_find.find(i * ncols + j)].append((i, j))
# the result is always sorted for consistency
communities = sorted(communities.values(), key = lambda area: (len(area), area))
return communities
def get_graph_communities(im, ignore_black=False):
G = nx.Graph()
I, J = im.shape
for i in range(I):
for j in range(J):
if ignore_black and im[i, j] == 0:
continue
G.add_node((i, j))
edges = []
if j >= 1:
if im[i, j] == im[i, j - 1]:
edges.append(((i, j), (i, j - 1)))
if j < J - 1:
if im[i, j] == im[i, j + 1]:
edges.append(((i, j), (i, j + 1)))
if i >= 1:
if im[i, j] == im[i - 1, j]:
edges.append(((i, j), (i - 1, j)))
if j >= 1:
if im[i, j] == im[i - 1, j - 1]:
edges.append(((i, j), (i - 1, j - 1)))
if j < J - 1:
if im[i, j] == im[i - 1, j + 1]:
edges.append(((i, j), (i - 1, j + 1)))
if i < I - 1:
if im[i, j] == im[i + 1, j]:
edges.append(((i, j), (i + 1, j)))
if j >= 1:
if im[i, j] == im[i + 1, j - 1]:
edges.append(((i, j), (i + 1, j - 1)))
if j < J - 1:
if im[i, j] == im[i + 1, j + 1]:
edges.append(((i, j), (i + 1, j + 1)))
G.add_edges_from(edges)
communities = list(nx.community.k_clique_communities(G, 2))
communities = [list(com) for com in communities]
for i in range(I):
for j in range(J):
i_nbh = nbh(im, i, j)
if sum(1 for v in i_nbh.values() if v == im[i, j]) == 0:
communities.append([(i, j)])
return communities
def apply_rule(input, hidden_i, rule):
output = np.zeros_like(input, dtype=int)
# print(type(input))
# print(input.shape)
hidden = np.zeros_like(input)
output[:, :] = input[:, :]
if rule['type'] == 'macro_multiply_k':
output = np.tile(output, rule['k'])
elif rule['type'] == 'flip':
if rule['how'] == 'ver':
output = output[::-1, :]
elif rule['how'] == 'hor':
output = output[:, ::-1]
elif rule['type'] == 'reduce':
skip_row = np.zeros(input.shape[0])
for i in range(1, input.shape[0]):
skip_row[i] = (input[i] == input[i-1]).all() or (input[i] == rule['skip_color']).all()
if (input[0] == rule['skip_color']).all():
skip_row[0] = 1
if np.sum(skip_row==0)>0:
output = input[skip_row == 0]
skip_column = np.zeros(input.shape[1])
for i in range(1, input.shape[1]):
skip_column[i] = (input[:, i] == input[:, i-1]).all() or (input[:, i] == rule['skip_color']).all()
if (input[:, 0] == rule['skip_color']).all():
skip_column[0] = 1
if np.sum(skip_column==0)>0:
output = output[:, skip_column == 0]
elif rule['type'] == 'rotate':
output = np.rot90(output, rule['rotations_count'])
elif rule['type'] == 'micro_multiply_by':
if rule['how_many'] == 'size':
k = output.shape[0]
else:
k = rule['how_many']
output = np.repeat(output, k, axis=0)
output = np.repeat(output, k, axis=1)
elif rule['type'] == 'macro_multiply_by':
if rule['how_many'] == 'both':
k = (2, 2)
elif rule['how_many'] == 'hor':
k = (1, 2)
elif rule['how_many'] == 'ver':
k = (2, 1)
output = np.tile(output, k)
if input.shape[0] == input.shape[1]:
for i in range(k[0]):
for j in range(k[1]):
sub = output[i * input.shape[0]: (i + 1) * input.shape[0],
j * input.shape[1]: (j + 1) * input.shape[1]]
sub_rotated = np.rot90(sub, rule['rotates'][i * 2 + j])
output[i * input.shape[0]: (i + 1) * input.shape[0],
j * input.shape[1]: (j + 1) * input.shape[1]] = sub_rotated
for i in range(k[0]):
for j in range(k[1]):
sub = output[i * input.shape[0]: (i + 1) * input.shape[0], j * input.shape[1]: (j + 1) * input.shape[1]]
if 'ver' in rule['flips'][i * 2 + j]:
sub = sub[::-1, :]
if 'hor' in rule['flips'][i * 2 + j]:
sub = sub[:, ::-1]
output[i * input.shape[0]: (i + 1) * input.shape[0], j * input.shape[1]: (j + 1) * input.shape[1]] = sub
elif rule['type'] == 'distribute_from_border':
hidden = np.zeros_like(input)
for i in range(1, input.shape[0] - 1):
if output[i, 0] in rule['colors']:
if not output[i, input.shape[1] - 1] in rule['colors'] or output[i, input.shape[1] - 1] == output[i, 0]:
output[i] = output[i, 0]
for j in range(1, input.shape[1] - 1):
if output[0, j] in rule['colors']:
if not output[input.shape[0] - 1, j] in rule['colors'] or output[input.shape[0] - 1, j] == output[0, j]:
output[:, j] = output[0, j]
elif rule['type'] == 'color_for_inners':
hidden = np.zeros_like(input)
changed = 1
while changed == 1:
changed = 0
for i, j in product(range(input.shape[0]), range(input.shape[1])):
i_c = input[i, j]
if i_c > 0 or hidden[i, j] == 1:
continue
if i == 0 or i == input.shape[0] - 1 or j == 0 or j == input.shape[1] - 1:
hidden[i, j] = 1
changed = 1
continue
i_nbh = nbh(hidden, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
if sum(1 for v in i_direct_nbh.values() if v == 1) > 0:
hidden[i, j] = 1
changed = 1
output[((hidden == 0).astype(np.int) * (input == 0).astype(np.int)) == 1] = rule['color_out']
hidden = np.copy(hidden)
elif rule['type'] == 'draw_lines':
hidden = np.zeros_like(input)
if rule['direction'] == 'everywhere':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
elif rule['direction'] == 'horizontal':
directions = ['left', 'right']
elif rule['direction'] == 'vertical':
directions = ['top', 'bottom']
elif rule['direction'] == 'horver':
directions = ['top', 'bottom', 'left', 'right']
elif rule['direction'] == 'diagonal':
directions = ['top_left', 'bottom_left', 'top_right', 'bottom_right']
else:
directions = [rule['direction']]
possible_directions = ['top', 'bottom', 'left', 'right',
'top_left', 'bottom_left', 'top_right', 'bottom_right']
index_change = \
[
[-1, 0],
[1, 0],
(0, -1),
(0, 1),
(-1, -1),
(+1, -1),
(-1, +1),
(+1, +1)
]
for i_dir, direction in enumerate(possible_directions):
if direction in directions:
idx_ch = index_change[i_dir]
for i in range(input.shape[0]):
for j in range(input.shape[1]):
if input[i, j] == rule['start_by_color']:
tmp_i = i + idx_ch[0]
tmp_j = j + idx_ch[1]
while 0 <= tmp_i < input.shape[0] and \
0 <= tmp_j < input.shape[1] and \
input[tmp_i, tmp_j] == rule['not_stop_by_color']:
output[tmp_i, tmp_j] = rule['with_color']
tmp_i += idx_ch[0]
tmp_j += idx_ch[1]
elif rule['type'] == 'draw_line_to':
hidden = np.zeros_like(input)
index_change = \
[
[-1, 0],
[1, 0],
(0, -1),
(0, 1),
]
for i, j in product(range(input.shape[0]), range(input.shape[1])):
if input[i, j] != rule['start_by_color']:
continue
number_0 = np.sum(output[:i] == rule['direction_color'])
number_1 = np.sum(output[(i + 1):] == rule['direction_color'])
number_2 = np.sum(output[:, :j] == rule['direction_color'])
number_3 = np.sum(output[:, (j + 1):] == rule['direction_color'])
i_dir = np.argmax([number_0, number_1, number_2, number_3])
# print([number_0, number_1, number_2, number_3])
# 1/0
idx_ch = index_change[i_dir]
tmp_i = i + idx_ch[0]
tmp_j = j + idx_ch[1]
while 0 <= tmp_i < input.shape[0] and \
0 <= tmp_j < input.shape[1] and \
(input[tmp_i, tmp_j] in [rule['not_stop_by_color'], rule['not_stop_by_color_and_skip']]):
skip_color = rule['not_stop_by_color_and_skip']
if skip_color == 0 or input[tmp_i, tmp_j] != skip_color:
output[tmp_i, tmp_j] = rule['with_color']
tmp_i += idx_ch[0]
tmp_j += idx_ch[1]
elif rule['type'] == 'distribute_colors':
non_zero_rows = []
non_zero_columns = []
color_for_row = np.zeros(input.shape[0])
color_for_column = np.zeros(input.shape[1])
for i in range(input.shape[0]):
row = input[i]
colors, counts = np.unique(row, return_counts=True)
good_colors = np.array([c in rule['colors'] for c in colors])
if not good_colors.any():
continue
colors = colors[good_colors]
counts = counts[good_colors]
best_color = colors[np.argmax(counts)]
color_for_row[i] = best_color
non_zero_rows.append(i)
for j in range(input.shape[1]):
row = input[:, j]
colors, counts = np.unique(row, return_counts=True)
good_colors = np.array([c in rule['colors'] for c in colors])
if not good_colors.any():
continue
colors = colors[good_colors]
counts = counts[good_colors]
best_color = colors[np.argmax(counts)]
color_for_column[j] = best_color
non_zero_columns.append(j)
if rule['horizontally'] == 1:
for i in non_zero_rows:
output[i] = color_for_row[i]
if rule['vertically'] == 1:
for j in non_zero_columns:
output[:, j] = color_for_column[j]
for i in non_zero_rows:
for j in non_zero_columns:
if input[i, j] == 0:
output[i, j] = rule['intersect']
hidden = np.copy(hidden_i)
elif rule['type'] == 'unity':
hidden = np.copy(hidden_i)
if rule['mode'] == 'vertical':
for j in range(input.shape[1]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for i in range(input.shape[0]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[(last_color_now[input[i, j]] + 1):i, j] = input[i, j]
else:
output[(last_color_now[input[i, j]] + 1):i, j] = rule['with_color']
last_color_now[input[i, j]] = i
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = i
elif rule['mode'] == 'horizontal':
for i in range(input.shape[0]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for j in range(input.shape[1]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[i, (last_color_now[input[i, j]] + 1):j] = input[i, j]
else:
output[i, (last_color_now[input[i, j]] + 1):j] = rule['with_color']
last_color_now[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = j
elif rule['mode'] == 'horver':
for j in range(input.shape[1]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for i in range(input.shape[0]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[(last_color_now[input[i, j]] + 1):i, j] = input[i, j]
else:
output[(last_color_now[input[i, j]] + 1):i, j] = rule['with_color']
last_color_now[input[i, j]] = i
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = i
for i in range(input.shape[0]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for j in range(input.shape[1]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[i, (last_color_now[input[i, j]] + 1):j] = input[i, j]
else:
output[i, (last_color_now[input[i, j]] + 1):j] = rule['with_color']
last_color_now[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = j
elif rule['mode'] == 'diagonal':
for diag_id in range(-input.shape[0] - 1, input.shape[1] + 1):
last_color_now_x = np.zeros(10, dtype=np.int) - 1
last_color_now_y = np.zeros(10, dtype=np.int) - 1
for i, j in zip(np.arange(input.shape[0]), diag_id + np.arange(input.shape[0])):
if 0 <= i < input.shape[0] and 0 <= j < input.shape[1]:
if not input[i, j] in rule['ignore_colors'] and last_color_now_x[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[np.arange(last_color_now_x[input[i, j]] + 1, i), np.arange(
last_color_now_y[input[i, j]] + 1, j)] = input[i, j]
else:
output[np.arange(last_color_now_x[input[i, j]] + 1, i), np.arange(
last_color_now_y[input[i, j]] + 1, j)] = rule[
'with_color']
last_color_now_x[input[i, j]] = i
last_color_now_y[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now_x[input[i, j]] = i
last_color_now_y[input[i, j]] = j
reflected_input = input[:, ::-1]
output = output[:, ::-1]
for diag_id in range(-reflected_input.shape[0] - 1, reflected_input.shape[1] + 1):
last_color_now_x = np.zeros(10, dtype=np.int) - 1
last_color_now_y = np.zeros(10, dtype=np.int) - 1
for i, j in zip(np.arange(reflected_input.shape[0]), diag_id + np.arange(reflected_input.shape[0])):
if 0 <= i < reflected_input.shape[0] and 0 <= j < reflected_input.shape[1]:
if not reflected_input[i, j] in rule['ignore_colors'] and last_color_now_x[
reflected_input[i, j]] >= 0:
if rule['with_color'] == 0:
output[np.arange(last_color_now_x[reflected_input[i, j]] + 1, i), np.arange(
last_color_now_y[reflected_input[i, j]] + 1, j)] = reflected_input[i, j]
else:
output[np.arange(last_color_now_x[reflected_input[i, j]] + 1, i), np.arange(
last_color_now_y[reflected_input[i, j]] + 1, j)] = rule[
'with_color']
last_color_now_x[reflected_input[i, j]] = i
last_color_now_y[reflected_input[i, j]] = j
elif not reflected_input[i, j] in rule['ignore_colors']:
last_color_now_x[reflected_input[i, j]] = i
last_color_now_y[reflected_input[i, j]] = j
output = output[:, ::-1]
elif rule['type'] == 'split_by_H':
hidden = np.copy(hidden_i)
if output.shape[0] >= 2:
part1 = output[:int(np.floor(output.shape[0] / 2))]
part2 = output[int( | np.ceil(output.shape[0] / 2) | numpy.ceil |
import os
import numpy as np
from PIL import Image
import cv2
import _pickle as cPickle
import numpngw
import open3d as o3d
import matplotlib.pyplot as plt
data_list_file = '/home/pwl/Work/Dataset/NOCS/nocs/Real/test_list_all.txt'
data_path = '/home/pwl/Work/Dataset/NOCS/nocs/Real/'
result_path = '/home/pwl/Work/Dataset/NOCS/nocs/Real/depth/'
cam_fx, cam_fy, cam_cx, cam_cy = [591.0125, 590.16775, 322.525, 244.11084]
xmap = np.array([[i for i in range(640)] for j in range(480)])
ymap = np.array([[j for i in range(640)] for j in range(480)])
def load_depth(img_path):
""" Load depth image from img_path. """
depth_path = img_path + '_depth.png'
depth = cv2.imread(depth_path, -1)
if len(depth.shape) == 3:
depth16 = depth[:, :, 1]*256 + depth[:, :, 2]
depth16 = np.where(depth16==32001, 0, depth16)
depth16 = depth16.astype(np.uint16)
elif len(depth.shape) == 2 and depth.dtype == 'uint16':
depth16 = depth
else:
assert False, '[ Error ]: Unsupported depth type.'
return depth16
def display_inlier_outlier(cloud, ind):
inlier_cloud = cloud.select_by_index(ind)
outlier_cloud = cloud.select_by_index(ind, invert=True)
outlier_cloud.paint_uniform_color([1, 0, 0])
inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])
o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud])
def display_inlier_outlier_all(cloud, ind1, ind2, ind3):
outlier_cloud_1 = cloud.select_by_index(ind1, invert=True)
inlier_cloud_1 = cloud.select_by_index(ind1)
outlier_cloud_2 = inlier_cloud_1.select_by_index(ind2, invert=True)
inlier_cloud_2 = inlier_cloud_1.select_by_index(ind2)
outlier_cloud_3 = inlier_cloud_2.select_by_index(ind3, invert=True)
inlier_cloud_3 = inlier_cloud_2.select_by_index(ind3)
outlier_cloud_1.paint_uniform_color([1, 0, 0])
outlier_cloud_2.paint_uniform_color([0, 1, 0])
outlier_cloud_3.paint_uniform_color([0, 0, 1])
inlier_cloud_3.paint_uniform_color([0.8, 0.8, 0.8])
o3d.visualization.draw_geometries([inlier_cloud_3, outlier_cloud_1, outlier_cloud_2, outlier_cloud_3])
def depth2pc(depth, choose):
depth_masked = depth.flatten()[choose][:, np.newaxis]
xmap_masked = xmap.flatten()[choose][:, np.newaxis]
ymap_masked = ymap.flatten()[choose][:, np.newaxis]
pt2 = depth_masked
pt0 = (xmap_masked - cam_cx) * pt2 / cam_fx
pt1 = (ymap_masked - cam_cy) * pt2 / cam_fy
points = np.concatenate((pt0, pt1, pt2), axis=1)
return points
def outliner_removal(img_name):
depth = load_depth(os.path.join(data_path, img_name))
mask_raw = cv2.imread(os.path.join(data_path, img_name + '_mask.png'))[:, :, 2]
gts = cPickle.load(open(os.path.join(data_path, img_name + '_label.pkl'), 'rb'))
depth_flattened = depth.flatten()
for idx in gts['instance_ids']:
mask_i = np.equal(mask_raw, idx)
mask_depth = np.logical_and(mask_i, depth > 0)
#mask_depth = np.logical_and(mask_i, depth < 2500)
choose = mask_depth.flatten().nonzero()[0]
points_raw = depth2pc(depth, choose)
#print(points_raw.shape)
pcd_pc_raw = o3d.geometry.PointCloud()
pcd_pc_raw.points = o3d.utility.Vector3dVector(points_raw)
cl, ind_1 = pcd_pc_raw.remove_statistical_outlier(nb_neighbors=80, std_ratio=1.3)
#display_inlier_outlier(pcd_pc_raw, ind_1)
pcd_pc_inler1 = pcd_pc_raw.select_by_index(ind_1)
cl, ind_2 = pcd_pc_inler1.remove_statistical_outlier(nb_neighbors=2000, std_ratio=4.5)
#display_inlier_outlier(pcd_pc_inler1, ind_2)
pcd_pc_inler2 = pcd_pc_inler1.select_by_index(ind_2)
labels = np.array(pcd_pc_inler2.cluster_dbscan(eps=60, min_points=200))
max_label = labels.max()
min_label = labels.min()
# print(f"point cloud has {max_label + 1} clusters")
colors = plt.get_cmap("tab20")(labels / (max_label if max_label > 0 else 1))
colors[labels < 0] = 0
pcd_pc_inler2.colors = o3d.utility.Vector3dVector(colors[:, :3])
#o3d.visualization.draw_geometries([pcd_pc_inler2])
biggest_cluster_idx = 0
biggest_cluster_elem_count = 0
if max_label >= 1 or min_label == -1:
final_cluster_list = []
for label_idx in range(max_label + 1):
cluster_elem_count = len(np.where(labels == label_idx)[0])
if cluster_elem_count > biggest_cluster_elem_count:
biggest_cluster_elem_count = len(np.where(labels == label_idx)[0])
biggest_cluster_idx = label_idx
final_cluster_list.append(biggest_cluster_idx)
ind_biggest_cluster = np.where(labels == biggest_cluster_idx)[0]
pcd_pc_biggest_cluster = pcd_pc_inler2.select_by_index(ind_biggest_cluster)
pcd_pc_biggest_cluster_center = np.mean(np.array(pcd_pc_biggest_cluster.points), axis=0)
for label_idx in range(max_label + 1):
if label_idx == biggest_cluster_idx:
continue
label_idx_ind = np.where(labels == label_idx)[0]
pcd_pc_idx_cluster = pcd_pc_inler2.select_by_index(label_idx_ind)
pcd_pc_idx_cluster_center = np.mean(np.array(pcd_pc_idx_cluster.points), axis=0)
#print(np.linalg.norm(pcd_pc_biggest_cluster_center - pcd_pc_idx_cluster_center))
if np.linalg.norm(pcd_pc_biggest_cluster_center - pcd_pc_idx_cluster_center) < 140:
final_cluster_list.append(label_idx)
ind_3 = []
for idx in final_cluster_list:
idx_ind = list(np.where(labels == idx)[0])
ind_3.extend(idx_ind)
pcd_pc_inler = pcd_pc_inler2.select_by_index(ind_3)
else:
pcd_pc_inler = pcd_pc_inler2
ind_3 = np.array(range(labels.shape[0]))
#display_inlier_outlier_all(pcd_pc_raw, ind_1, ind_2, ind_3)
#o3d.visualization.draw_geometries([pcd_pc_inler])
choose_f1 = choose[ind_1]
choose_del1 = np.delete(choose, ind_1)
choose_f2 = choose_f1[ind_2]
choose_del2 = np.delete(choose_f1, ind_2)
choose_f3 = choose_f2[ind_3]
choose_del3 = np.delete(choose_f2, ind_3)
choose_deleted = | np.concatenate([choose_del1, choose_del2, choose_del3]) | numpy.concatenate |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import glob
import random
from collections import namedtuple
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from scipy.spatial.distance import pdist, cdist, squareform
from scipy.stats import mode
def gen_knn_data(size=(20, 20)):
"""
Make data for KNN plotter.
"""
a, b = size
X_0 = np.random.multivariate_normal([10,10], [[3, 0],[0, 3]], size=a)
X_1 = np.random.multivariate_normal([13,7], [[3, 0],[0, 3]], size=b)
X = np.vstack([X_0, X_1])
y = np.array(a*[0] + b*[1])
return X, y
def plot_knn(size=(20, 20), k=7, target=(14, 11)):
X, y = gen_knn_data(size)
fig, ax = plt.subplots(figsize=(9, 6))
scatter = ax.scatter(*X.T, c=y, cmap='RdBu', vmin=-0.3, vmax=1.4, s=60)
ax.axis('equal')
ax.grid(c='k', alpha=0.2)
ax.legend(*scatter.legend_elements())
target = np.atleast_2d(target)
dists, = cdist(target, X)
idx = np.argsort(dists)[:k]
r = dists[idx[-1]]
nearest = X[idx]
y_pred = mode(y[idx]).mode.item()
ax.scatter(*target.T, c='k', s=120, marker='x')
ax.scatter(*nearest.T, ec='k', s=130, fc='none')
ax.set_title(f"Prediction: {y_pred}", fontsize=20)
circle = plt.Circle(np.squeeze(target), radius=r, color='lightgray', zorder=0)
ax.add_artist(circle)
plt.show()
return
def decision_regions(clf, X_val, y_val, extent, step=1):
"""
Generate the decision surface of a classifier.
"""
y_pred = clf.predict(X_val)
x_min, x_max, y_min, y_max = extent
try:
x_step, y_step = step
except TypeError:
x_step = y_step = step
xx, yy = np.meshgrid(np.arange(x_min, x_max, x_step),
np.arange(y_min, y_max, y_step))
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
return y_pred, Z.reshape(xx.shape + (-1,))
def visualize(X_val, y_val, y_prob, cutoff=0.5, ncols=6, nrows=3, figsize=(12, 8), classes=None, shape=None):
"""
Visualize some random samples from the prediction results.
Colours: green for a good prediction, red for a wrong one. If the
probability was less than some cutoff (default 0.5), we'll mute the colour.
Args:
X_val (ndarray): The validation features, n_samples x n_features.
y_val (ndarray): The validation labels, n_samples x 1.
y_prob (ndarray): The predicted probabilities, n_samples x n_classes.
cutoff (float): the cutoff for 'uncertain'.
ncols (int): how many plots across the grid.
nrows (int): how many plots down the grid.
figsize (tuple): tuple of ints.
classes (array-like): the classes, in order. Will be inferred if None.
shape (tuple): Shape of each instance, if it needs reshaping.
"""
idx = random.sample(range(X_val.shape[0]), ncols*nrows)
sample = X_val[idx]
if classes is None:
classes = np.unique(y_val)
else:
y_val = np.asarray(classes)[y_val]
fig, axs = plt.subplots(figsize=figsize, ncols=ncols, nrows=nrows)
axs = axs.ravel()
for ax, img, actual, probs in zip(axs, sample, y_val[idx], y_prob[idx]):
pred = classes[np.argmax(probs)]
prob = np.max(probs)
if shape is not None:
img = img.reshape(shape)
ax.imshow(np.squeeze(img), cmap='gray')
ax.set_title(f"{pred} - {prob:.3f}\n[{actual}]")
ax.set_xticks([])
ax.set_yticks([])
if prob > cutoff:
c = 'limegreen' if (actual == pred) else 'red'
else:
c = 'y' if (actual == pred) else 'lightsalmon'
for spine in ax.spines.values():
spine.set_edgecolor(c)
spine.set_linewidth(4)
return
def get_file_info(fname):
"""
Get various bits of info from the full path of a file.
Example
>>> get_file_info('../data/prod/train/trilobites/0263.jpg')
File_info(base='0263.jpg', cohort='train', cname='trilobites', stem='0263', ext='.jpg')
"""
_path, base = os.path.split(fname) # base: the name of the file
_path, cname = os.path.split(_path) # cname: the class name
_path, cohort = os.path.split(_path) # cohort: train or val
stem, ext = os.path.splitext(base) # stem, ext: file stem and extension
File_info = namedtuple('File_info', ['base', 'cohort', 'cname', 'stem', 'ext'])
return File_info(base, cohort, cname, stem, ext)
def make_train_test(path, include=None, skip=None):
"""
Take a POSIX path, with wildcards, and turn the image files into arrays.
Example
>>> path = '../data/prod/*/*/*.jpg'
>>> X_train, X_val, y_train, y_val = make_train_test(path)
>>> X_train.shape, y_train.shape
((528, 4096), (528,))
"""
X_train, X_val, y_train, y_val = [], [], [], []
for fname in glob.glob(path, recursive=True):
base, cohort, cname, stem, ext = get_file_info(fname)
if skip is None:
skip = []
if cname in skip:
continue
if (include is not None) and (cname not in include):
continue
im = Image.open(fname)
img_i = | np.asarray(im, dtype=np.float) | numpy.asarray |
from llg.functions import external_fields
import numpy
import pytest
@pytest.mark.repeat(10)
def test_thermal_field_null_temperature(num_sites):
assert numpy.allclose(
external_fields.thermal_field(
numpy.array([0] * num_sites),
numpy.array([1] * num_sites),
1.0,
1.0,
1.0,
1.0,
),
numpy.zeros((num_sites, 3)),
)
@pytest.mark.repeat(10)
def test_thermal_field_zero_denominator(num_sites):
with pytest.warns(RuntimeWarning):
assert numpy.all(
numpy.isinf(
external_fields.thermal_field(
numpy.array([1] * num_sites),
numpy.array([0] * num_sites),
1.0,
1.0,
1.0,
1.0,
)
)
)
assert numpy.all(
numpy.isinf(
external_fields.thermal_field(
numpy.array([1] * num_sites),
numpy.array([1] * num_sites),
1.0,
0.0,
1.0,
1.0,
)
)
)
assert numpy.all(
numpy.isinf(
external_fields.thermal_field(
numpy.array([1] * num_sites),
numpy.array([1] * num_sites),
1.0,
1.0,
0.0,
1.0,
)
)
)
@pytest.mark.filterwarnings("ignore:api v1")
@pytest.mark.repeat(10)
def test_thermal_field_invalid_argument_for_square_root(num_sites):
with pytest.warns(RuntimeWarning):
assert numpy.all(
numpy.isnan(
external_fields.thermal_field(
numpy.array([-1] * num_sites),
| numpy.array([1] * num_sites) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>,Morgane
Functions in this python file are related to plotting different stages of the calcium imaging analysis pipeline.
Most of the save the result in the corresponding folder of the particular step.
"""
# %% Importation
import pylab as pl
import caiman as cm
import matplotlib.pyplot as plt
import math
import numpy as np
from caiman.motion_correction import high_pass_filter_space
from caiman.source_extraction.cnmf.cnmf import load_CNMF
import Analysis_tools.metrics as metrics
import logging
import os
import datetime
import Analysis_tools.analysis_files_manipulation as fm
from caiman.source_extraction.cnmf.initialization import downscale
from Database.database_connection import database
mycursor = database.cursor()
def plot_movie_frame(decoded_file):
"""
This function creates an image for visual inspection of cropping points.
"""
m = cm.load(decoded_file)
pl.imshow(m[0, :, :], cmap='gray')
return
def plot_movie_frame_cropped(cropped_file):
"""
This function creates an image for visual inspections of cropped frame
"""
m = cm.load(cropped_file)
pl.imshow(m[0, :, :], cmap='gray')
return
def get_fig_gSig_filt_vals(cropped_file, gSig_filt_vals):
"""
Plot original cropped frame and several versions of spatial filtering for comparison
:param cropped_file
:param gSig_filt_vals: array containing size of spatial filters that will be applied
:return: figure
"""
m = cm.load(cropped_file)
temp = cm.motion_correction.bin_median(m)
N = len(gSig_filt_vals)
fig, axes = plt.subplots(int(math.ceil((N + 1) / 2)), 2)
axes[0, 0].imshow(temp, cmap='gray')
axes[0, 0].set_title('unfiltered')
axes[0, 0].axis('off')
for i in range(0, N):
gSig_filt = gSig_filt_vals[i]
m_filt = [high_pass_filter_space(m_, (gSig_filt, gSig_filt)) for m_ in m]
temp_filt = cm.motion_correction.bin_median(m_filt)
axes.flatten()[i + 1].imshow(temp_filt, cmap='gray')
axes.flatten()[i + 1].set_title(f'gSig_filt = {gSig_filt}')
axes.flatten()[i + 1].axis('off')
if N + 1 != axes.size:
for i in range(N + 1, axes.size):
axes.flatten()[i].axis('off')
# Get output file paths
sql = "SELECT mouse,session,trial,is_rest,cropping_v,decoding_v,motion_correction_v FROM Analysis WHERE cropping_main=%s "
val = [cropped_file, ]
mycursor.execute(sql, val)
myresult = mycursor.fetchall()
data = []
for x in myresult:
data += x
file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[5]}.{data[4]}.{data[6]}"
data_dir = 'data/interim/motion_correction/'
output_meta_gSig_filt = data_dir + f'meta/figures/frame_gSig_filt/{file_name}.png'
fig.savefig(output_meta_gSig_filt)
return fig
def plot_crispness_for_parameters(selected_rows=None):
"""
This function plots crispness for all the selected rows motion correction states. The idea is to compare crispness results
:param selected_rows: analysis states for which crispness is required to be plotted
:return: figure that is also saved
"""
crispness_mean_original, crispness_corr_original, crispness_mean, crispness_corr = metrics.compare_crispness(
selected_rows)
total_states_number = len(selected_rows)
fig, axes = plt.subplots(1, 2)
axes[0].set_title('Summary image = Mean')
axes[0].plot(np.arange(1, total_states_number, 1), crispness_mean_original)
axes[0].plot(np.arange(1, total_states_number, 1), crispness_mean)
axes[0].legend(('Original', 'Motion_corrected'))
axes[0].set_ylabel('Crispness')
axes[1].set_title('Summary image = Corr')
axes[1].plot(np.arange(1, total_states_number, 1), crispness_corr_original)
axes[1].plot(np.arange(1, total_states_number, 1), crispness_corr)
axes[1].legend(('Original', 'Motion_corrected'))
axes[1].set_ylabel('Crispness')
# Get output file paths
data_dir = 'data/interim/motion_correction/'
sql = "SELECT mouse,session,trial,is_rest,cropping_v,decoding_v,motion_correction_v FROM Analysis WHERE motion_correction_main=%s "
val = [selected_rows, ]
mycursor.execute(sql, val)
myresult = mycursor.fetchall()
data = []
for x in myresult:
data += x
file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[5]}.{data[4]}.{data[6]}"
output_meta_crispness = data_dir + f'meta/figures/crispness/{file_name}.png'
fig.savefig(output_meta_crispness)
return fig
def plot_corr_pnr(mouse_row, parameters_source_extraction):
"""
Plots the summary images correlation and pnr. Also the pointwise product between them (used in Caiman paper Zhou
et al 2018)
:param mouse_row:
:param parameters_source_extraction: parameters that will be used for source
extraction. the relevant parameter here are min_corr and min_pnr because the source extraction algorithm is
initialized (initial cell templates) in all values that surpasses that threshold
:return: figure
"""
input_mmap_file_path = eval(mouse_row.loc['motion_correction_output'])['main']
# Load memory mappable input file
if os.path.isfile(input_mmap_file_path):
Yr, dims, T = cm.load_memmap(input_mmap_file_path)
# logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
images = Yr.T.reshape((T,) + dims, order='F')
else:
logging.warning(f'{mouse_row.name} .mmap file does not exist. Cancelling')
# Determine output paths
step_index = db.get_step_index('motion_correction')
data_dir = 'data/interim/source_extraction/trial_wise/'
# Check if the summary images are already there
gSig = parameters_source_extraction['gSig'][0]
corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(mouse_row.name, gSig_abs=(gSig, gSig))
if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):
# Already computed summary images
logging.info(f'{mouse_row.name} Already computed summary images')
cn_filter = np.load(corr_npy_file_path)
pnr = np.load(pnr_npy_file_path)
else:
# Compute summary images
t0 = datetime.datetime.today()
logging.info(f'{mouse_row.name} Computing summary images')
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=parameters_source_extraction['gSig'][0],
swap_dim=False)
# Saving summary images as npy files
corr_npy_file_path = data_dir + f'meta/corr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.npy'
pnr_npy_file_path = data_dir + f'meta/pnr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.npy'
with open(corr_npy_file_path, 'wb') as f:
np.save(f, cn_filter)
with open(pnr_npy_file_path, 'wb') as f:
np.save(f, pnr)
fig = plt.figure(figsize=(15, 15))
min_corr = round(parameters_source_extraction['min_corr'], 2)
min_pnr = round(parameters_source_extraction['min_pnr'], 1)
max_corr = round(cn_filter.max(), 2)
max_pnr = 20
# continuous
cmap = 'viridis'
fig, axes = plt.subplots(1, 3, sharex=True)
corr_fig = axes[0].imshow(np.clip(cn_filter, min_corr, max_corr), cmap=cmap)
axes[0].set_title('Correlation')
fig.colorbar(corr_fig, ax=axes[0])
pnr_fig = axes[1].imshow(np.clip(pnr, min_pnr, max_pnr), cmap=cmap)
axes[1].set_title('PNR')
fig.colorbar(pnr_fig, ax=axes[1])
combined = cn_filter * pnr
max_combined = 10
min_combined = np.min(combined)
corr_pnr_fig = axes[2].imshow(np.clip(cn_filter * pnr, min_combined, max_combined), cmap=cmap)
axes[2].set_title('Corr * PNR')
fig.colorbar(corr_pnr_fig, ax=axes[2])
fig_dir = 'data/interim/source_extraction/trial_wise/meta/'
fig_name = fig_dir + f'figures/corr_pnr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.png'
fig.savefig(fig_name)
return fig
def plot_corr_pnr_binary(mouse_row, corr_limits, pnr_limits, parameters_source_extraction, session_wise=False):
'''
Plot 2 matrix of binary selected and not selected seeds for different corr_min and pnr_min
:param mouse_row: analysis states data
:param corr_limits: array of multiple values of corr_min to test
:param pnr_limits: arrey of multiple values of pnr_min to test
:param parameters_source_extraction: dictionary with parameters
:return: figure pointer
'''
input_mmap_file_path = eval(mouse_row.loc['motion_correction_output'])['main']
# Load memory mappable input file
if os.path.isfile(input_mmap_file_path):
Yr, dims, T = cm.load_memmap(input_mmap_file_path)
# logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
images = Yr.T.reshape((T,) + dims, order='F')
else:
logging.warning(f'{mouse_row.name} .mmap file does not exist. Cancelling')
# Determine output paths
step_index = db.get_step_index('motion_correction')
data_dir = 'data/interim/source_extraction/trial_wise/'
# Check if the summary images are already there
gSig = parameters_source_extraction['gSig'][0]
corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(mouse_row.name, gSig_abs=(gSig, gSig))
if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):
# Already computed summary images
logging.info(f'{mouse_row.name} Already computed summary images')
cn_filter = np.load(corr_npy_file_path)
pnr = np.load(pnr_npy_file_path)
else:
# Compute summary images
t0 = datetime.datetime.today()
logging.info(f'{mouse_row.name} Computing summary images')
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=parameters_source_extraction['gSig'][0],
swap_dim=False)
# Saving summary images as npy files
corr_npy_file_path = data_dir + f'meta/corr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.npy'
pnr_npy_file_path = data_dir + f'meta/pnr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.npy'
with open(corr_npy_file_path, 'wb') as f:
np.save(f, cn_filter)
with open(pnr_npy_file_path, 'wb') as f:
np.save(f, pnr)
fig1 = plt.figure(figsize=(50, 50))
combined_image = cn_filter * pnr
fig1, axes1 = plt.subplots(len(corr_limits), len(pnr_limits), sharex=True)
fig2, axes2 = plt.subplots(len(corr_limits), len(pnr_limits), sharex=True)
fig3, axes3 = plt.subplots(len(corr_limits), len(pnr_limits), sharex=True)
ii = 0
for min_corr in corr_limits:
min_corr = round(min_corr, 2)
jj = 0
for min_pnr in pnr_limits:
min_pnr = round(min_pnr, 2)
# binary
limit = min_corr * min_pnr
axes1[ii, jj].imshow(combined_image > limit, cmap='binary')
axes1[ii, jj].set_title(f'{min_corr}')
axes1[ii, jj].set_ylabel(f'{min_pnr}')
axes2[ii, jj].imshow(cn_filter > min_corr, cmap='binary')
axes2[ii, jj].set_title(f'{min_corr}')
axes2[ii, jj].set_ylabel(f'{min_pnr}')
axes3[ii, jj].imshow(pnr > min_pnr, cmap='binary')
axes3[ii, jj].set_title(f'{min_corr}')
axes3[ii, jj].set_ylabel(f'{min_pnr}')
jj = jj + 1
ii = ii + 1
fig_dir = 'data/interim/source_extraction/trial_wise/meta/'
if session_wise:
fig_dir = 'data/interim/source_extraction/session_wise/meta/'
fig_name = fig_dir + f'figures/min_corr_pnr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}_comb.png'
fig1.savefig(fig_name)
fig_name = fig_dir + f'figures/min_corr_pnr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}_corr.png'
fig2.savefig(fig_name)
fig_name = fig_dir + f'figures/min_corr_pnr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}_pnr.png'
fig3.savefig(fig_name)
return fig1, fig2, fig3
def plot_histogram(position, value, title='title', xlabel='x_label', ylabel='y_label'):
'''
This function plots a histogram for...
:param position: x marks
:param value: y marks
:param title:
:param xlabel:
:param ylabel:
:return:
'''
fig, axes = plt.subplots(1, 1, sharex=True)
normalization = sum(value)
axes.plot(position, value / normalization)
axes.set_title(title)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_ylim(0, np.max(value / normalization) + 0.01 * np.max(value / normalization))
return fig
def plot_multiple_contours(row, version=None, corr_array=None, pnr_array=None, session_wise=False):
'''
Plots different versions of contour images that change the initialization parameters for source extraction.
The idea is to see the impact of different seed selection in the final source extraction result.
:param row: one analysis state row
:param version: array containing the version numbers of source extraction that will be plotted
:param corr_array: array of the same length of version and pnr_array containing the min_corr values for those versions
:param pnr_array: array of the same length of version and corr_array containing the min_pnr values for those versions
:return: figure
'''
states_df = db.open_analysis_states_database()
index = row.name
figure, axes = plt.subplots(len(corr_array), len(pnr_array), figsize=(15, 15))
for ii in range(corr_array.shape[0]):
for jj in range(pnr_array.shape[0]):
new_row = db.select(states_df, 'component_evaluation', mouse=index[0], session=index[1],
trial=index[2], is_rest=index[3], cropping_v=index[5], motion_correction_v=index[6],
source_extraction_v=version[ii * len(pnr_array) + jj])
new_row = new_row.iloc[0]
output = eval(new_row.loc['source_extraction_output'])
cnm_file_path = output['main']
cnm = load_CNMF(db.get_file(cnm_file_path))
corr_path = output['meta']['corr']['main']
cn_filter = np.load(db.get_file(corr_path))
axes[ii, jj].imshow(cn_filter)
coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
for c in coordinates:
v = c['coordinates']
c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
axes[ii, jj].plot(*v.T, c='w')
axes[ii, jj].set_title('min_corr = ' + f'{round(corr_array[ii], 2)}')
axes[ii, jj].set_ylabel('min_pnr = ' + f'{round(pnr_array[jj], 2)}')
fig_dir = 'data/interim/source_extraction/trial_wise/meta/figures/contours/'
if session_wise:
fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/contours/'
fig_name = fig_dir + db.create_file_name(3,
new_row.name) + '_corr_min' + f'{round(corr_array[0], 1)}' + '_pnr_min' + f'{round(pnr_array[0], 1)}' + '_.png'
figure.savefig(fig_name)
return figure
def plot_session_contours(selected_rows, version=None, corr_array=None, pnr_array=None):
'''
Plots different versions of contour images that change the initialization parameters for source extraction.
The idea is to see the impact of different seed selection in the final source extraction result.
:param selected_rows: rows corresponding to different trials
:param version: array containing the version numbers of source extraction that will be plotted
:param corr_array: array of the same length of version and pnr_array containing the min_corr values for those versions
:param pnr_array: array of the same length of version and corr_array containing the min_pnr values for those versions
:return: (saves multiple figures)
'''
states_df = db.open_analysis_states_database()
for ii in range(corr_array.shape[0]):
for jj in range(pnr_array.shape[0]):
figure, axes = plt.subplots(1, len(selected_rows), figsize=(50, 10))
for i in range(len(selected_rows)):
row = selected_rows.iloc[i]
index = row.name
new_row = db.select(states_df, 'component_evaluation', mouse=index[0], session=index[1],
trial=index[2], is_rest=index[3], cropping_v=index[5], motion_correction_v=index[6],
alignment_v=index[7], source_extraction_v=version[ii * len(pnr_array) + jj])
new_row = new_row.iloc[0]
output = eval(new_row.loc['source_extraction_output'])
cnm_file_path = output['main']
cnm = load_CNMF(db.get_file(cnm_file_path))
corr_path = output['meta']['corr']['main']
cn_filter = np.load(db.get_file(corr_path))
# axes[i].imshow(np.clip(cn_filter, min_corr, max_corr), cmap='viridis')
axes[i].imshow(cn_filter)
coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
for c in coordinates:
v = c['coordinates']
c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
axes[i].plot(*v.T, c='w')
axes[i].set_title('Trial = ' + f'{i + 1}', fontsize=30)
axes[i].set_xlabel('#cells = ' + f'{cnm.estimates.A.shape[1]}', fontsize=30)
figure.suptitle('min_corr = ' + f'{round(corr_array[ii], 2)}' + 'min_pnr = ' + f'{round(pnr_array[jj], 2)}',
fontsize=50)
fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/contours/'
fig_name = fig_dir + db.create_file_name(3,
new_row.name) + '_version_' + f'{version[ii * len(pnr_array) + jj]}' + '.png'
figure.savefig(fig_name)
return
def plot_multiple_contours_session_wise(selected_rows, version=None, corr_array=None, pnr_array=None):
'''
Plots different versions of contour images that change the initialization parameters for source extraction.
The idea is to see the impact of different seed selection in the final source extraction result.
:param selected_rows: all analysis state selected
:param version: array containing the version numbers of source extraction that will be plotted
:param corr_array: array of the same length of version and pnr_array containing the min_corr values for those versions
:param pnr_array: array of the same length of version and corr_array containing the min_pnr values for those versions
:return: figure
'''
states_df = db.open_analysis_states_database()
figure, axes = plt.subplots(len(corr_array), len(pnr_array), figsize=(15, 15))
color = ['w', 'b', 'r', 'm', 'c']
for row in range(len(selected_rows)):
mouse_row = selected_rows.iloc[row]
index = mouse_row.name
output = eval(mouse_row.loc['source_extraction_output'])
corr_path = output['meta']['corr']['main']
cn_filter = np.load(db.get_file(corr_path))
for ii in range(corr_array.shape[0]):
for jj in range(pnr_array.shape[0]):
axes[ii, jj].imshow(cn_filter)
new_row = db.select(states_df, 'component_evaluation', mouse=index[0], session=index[1],
trial=index[2], is_rest=index[3], cropping_v=index[5], motion_correction_v=index[6],
source_extraction_v=version[ii * len(pnr_array) + jj])
new_row = new_row.iloc[0]
output = eval(new_row.loc['source_extraction_output'])
cnm_file_path = output['main']
cnm = load_CNMF(db.get_file(cnm_file_path))
coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
for c in coordinates:
v = c['coordinates']
c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
axes[ii, jj].plot(*v.T, c=color[row])
axes[ii, jj].set_title('min_corr = ' + f'{round(corr_array[ii], 2)}')
axes[ii, jj].set_ylabel('min_pnr = ' + f'{round(pnr_array[jj], 2)}')
fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/contours/'
fig_name = fig_dir + db.create_file_name(3,
new_row.name) + '_corr_min' + f'{round(corr_array[0], 1)}' + '_pnr_min' + f'{round(pnr_array[0], 1)}' + '_all.png'
figure.savefig(fig_name)
return figure
def plot_multiple_contours_session_wise_evaluated(selected_rows):
## IN DEVELOPMENT!!!!!!!
'''
Plots different versions of contour images that change the initialization parameters for source extraction.
The idea is to see the impact of different seed selection in the final source extraction result.
:param selected_rows: all analysis state selected
:return: figure
'''
figure, axes = plt.subplots(3, 5, figsize=(50, 30))
for row in range(len(selected_rows)):
mouse_row = selected_rows.iloc[row]
index = mouse_row.name
output = eval(mouse_row.loc['source_extraction_output'])
corr_path = output['meta']['corr']['main']
cn_filter = np.load(db.get_file(corr_path))
axes[0, row].imshow(cn_filter)
axes[1, row].imshow(cn_filter)
axes[2, row].imshow(cn_filter)
output = eval(mouse_row.loc['source_extraction_output'])
cnm_file_path = output['main']
cnm = load_CNMF(db.get_file(cnm_file_path))
coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
for c in coordinates:
v = c['coordinates']
c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
axes[0, row].plot(*v.T, c='w', linewidth=3)
axes[0, row].set_title('Trial = ' + f'{row}')
axes[0, row].set_ylabel('')
output = eval(mouse_row.loc['component_evaluation_output'])
cnm_file_path = output['main']
cnm = load_CNMF(db.get_file(cnm_file_path))
idx = cnm.estimates.idx_components
coordinates = cm.utils.visualization.get_contours(cnm.estimates.A[:, idx], np.shape(cn_filter), 0.2, 'max')
for c in coordinates:
v = c['coordinates']
c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
axes[1, row].plot(*v.T, c='b', linewidth=3)
idx_b = cnm.estimates.idx_components_bad
coordinates_b = cm.utils.visualization.get_contours(cnm.estimates.A[:, idx_b], | np.shape(cn_filter) | numpy.shape |
#Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#These are all the builtins that should by in astropysics instead of pymodelfit
"""
==================================================
models -- astronomy-specific models for pymodelfit
==================================================
This module makes use of the :mod:`pymodelfit` package to define a number of
models inteded for data-fitting that are astronomy-specific. Note that
:mod:`pymodelfit` was originally written as part of astropysics, and hence it is
tightly integrated (including the Fit GUI, where relevant) to other parts of
astropysics, often using the models in this module.
.. note::
Everything in the :mod:`pymodelfit` package is imported to this package.
This is rather bad form, but is currently done because other parts of
astropysics is written for when pymodelfit was part of astropysics. This
may change in the future, though, so it is recommended that user code always
use ``from pymodelfit import ...`` instead of
``from astropysics.models import ...``.
Classes and Inheritance Structure
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. inheritance-diagram:: astropysics.models
:parts: 1
Module API
^^^^^^^^^^
"""
from __future__ import division,with_statement
import numpy as np
from pymodelfit.core import *
from pymodelfit.builtins import *
from .spec import HasSpecUnits as _HasSpecUnits
class BlackbodyModel(FunctionModel1DAuto,_HasSpecUnits):
"""
A Planck blackbody radiation model.
Output/y-axis is taken to to be specific intensity.
"""
from .constants import h,c,kb
def __init__(self,unit='wl'):
_HasSpecUnits.__init__(self,unit)
self.unit = unit #need to explicitly set the unit to initialize the correct f
self.stephanBoltzmannLaw = self._instanceSBLaw
def f(self,x,A=1,T=5800):
x = x*self._xscaling
if self._phystype == 'wavelength':
val = self._flambda(x,A,T)
elif self._phystype == 'frequency':
val = self._fnu(x,A,T)
elif self._phystype == 'energy':
val = self._fen(x,A,T)
else:
raise ValueError('unrecognized physical unit type!')
return val*self._xscaling
def _flambda(self,x,A=1,T=5800):
h,c,k=self.h,self.c,self.kb
return A*2*h*c*c*x**-5/(np.exp(h*c/(k*T*x))-1)
def _fnu(self,x,A=1,T=5800):
h,c,k=self.h,self.c,self.kb
return A*2*h/c/c*x**3/(np.exp(h*x/(k*T))-1)
def _fen(self,x,A=1,T=5800):
return self._fnu(x,A,T)/self.h
def _applyUnits(self,xtrans,xitrans,xftrans,xfinplace):
pass #do nothing because the checking is done in the f-function
# if self._phystype == 'wavelength': #TODO:check
# self.f = self._flambda
# elif self._phystype == 'frequency':
# self.f = self._fnu
# elif self._phystype == 'energy':
# self.f = self._fen
# else:
# raise ValueError('unrecognized physical unit type!')
@property
def xaxisname(self):
if self._phystype == 'wavelength':
return 'lambda'
elif self._phystype == 'frequency':
return 'nu'
elif self._phystype == 'energy':
return 'E'
yaxisname = 'I'
@property
def rangehint(self):
cen = self.wienDisplacementLaw(None)
return(cen/3,cen*3)
def setIntensity(self):
"""
Sets A so that the output is specific intensity/surface brightness.
"""
self.A = 1
def setFlux(self,radius,distance):
"""
Sets A so that the output is the flux at the specified distance from
a spherical blackbody with the specified radius.
:param radius: Radius of the blackbody in cm
:type radius: float
:param distance: distance to the blackbody in cm
:type distance: float
"""
from .phot import intensity_to_flux
self.A = intensity_to_flux(radius,distance)
def getFlux(self,x,radius=None,distance=None):
"""
Returns the flux density at the requested wavelength for a blackbody
of the given radius at a specified distance.
:param x: x-value of the model
:type x: float
:param radius: radius of the blackbody in cm
:type radius: float
:param distance: distance to the blackbody in cm
:type distance: float
:returns: flux/wl at the specified distance from the blackbody
"""
if distance is None:
if radius is None:
pass
else:
distance = self.getFluxDistance(radius)
self.setFlux(radius,distance)
else:
if radius is None:
radius = self.getFluxRadius(distance)
self.setFlux(radius,distance)
else:
self.setFlux(radius,distance)
return self(x)
def getFluxRadius(self,distance):
"""
Determines the radius of a spherical blackbody at the specified distance
assuming the flux is given by the model at the given temperature.
"""
return (self.A*distance*distance/pi)**0.5
def getFluxDistance(self,radius):
"""
Determines the distance to a spherical blackbody of the specified radius
assuming the flux is given by the model at the given temperature.
"""
return (pi*radius*radius/self.A)**0.5
def _getPeak(self):
h,k = self.h,self.kb
if 'wavelength' in self.unit:
b = .28977685 #cm * K
peakval = b/self.T/self._xscaling
elif 'frequency' in self.unit:
a=2.821439 #constant from optimizing BB function
peakval=a/h*k*self.T/self._xscaling
elif 'energy' in self.unit:
raise NotImplementedError
else:
raise RuntimeError('Should never see this - bug in BB code')
return self(peakval)
def _setPeak(self,peakval):
self.A = 1
self.A = peakval/self._getPeak()
peak=property(_getPeak,_setPeak)
def wienDisplacementLaw(self,peakval):
"""
Uses the Wien Displacement Law to calculate the temperature given a peak
*input* location (wavelength, frequency, etc) or compute the peak
location given the current temperature.
:param peakval:
The peak value of the model to use to compute the new temperature.
:type peakval: float or None
:returns:
The temperature for the provided peak location or the peak location
for the current temperature if `peakval` is None.
"""
h,k = self.h,self.kb
if self._phystype == 'wavelength':
b = .28977685 #cm * K
if peakval is None:
out = b/self.T/self._xscaling
else:
out = b/peakval/self._xscaling
elif self._phystype == 'frequency':
a=2.821439 #constant from optimizing BB function
if peakval is None:
out = a*k*self.T/h/self._xscaling
else:
out = peakval*h/a/k/self._xscaling
elif self._phystype == 'energy':
a=2.821439 #constant from optimizing BB function
if peakval is None:
out = a*self.T/h/self._xscaling
else:
out = peakval*h/a/self._xscaling
else:
raise RuntimeError('Should never see this - bug in BB code')
return out
def _instanceSBLaw(self,T=None,area=1):
if T is not None:
self.T = T
return BlackbodyModel.stephanBoltzmannLaw(self.T,area)*self._enscale
@staticmethod
def stephanBoltzmannLaw(T,area=1):
"""
assumes cgs units
"""
h,c,kb=BlackbodyModel.h,BlackbodyModel.c,BlackbodyModel.kb
sigma = 2*pi**5*kb**4*h**-3*c**-2/15
return area*sigma*T**4
class BurkertModel(FunctionModel1DAuto):
r"""
Burkert (1996) profile defined as:
.. math::
\frac{\rho_0 r_0^3}{(r+r_0)(r^2+r_0^2)}
where :attr:`rho0` is the central density.
"""
xaxisname = 'r'
yaxisname = 'rho'
def f(self,r,rho0=1,r0=1):
return rho0*r0**3/((r+r0)*(r**2+r0**2))
@property
def rangehint(self):
return 0,self.r0*3
class EinastoModel(FunctionModel1DAuto):
"""
Einasto profile given by
.. math::
\\ln(\\rho(r)/A) = (-2/\\alpha)((r/r_s)^{\\alpha} - 1) .
:attr:`A` is the density where the log-slope is -2, and :attr:`rs` is the
corresponding radius. The `alpha` parameter defaults to .17 as suggested by
Navarro et al. 2010
.. note::
The Einasto profile is is mathematically identical to the Sersic profile
(:class:`SersicModel`), although the parameterization is different. By
Convention, Sersic generally refers to a 2D surface brightness/surface
density profile, while Einasto is usually treated as a 3D density
profile.
"""
xaxisname = 'r'
yaxisname = 'rho'
def f(self,r,A=1,rs=1,alpha=.17):
return A*np.exp((-2/alpha)*((r/rs)**alpha - 1))
class ExponentialDiskModel(FunctionModel2DScalarAuto):
"""
A disk with an exponential profile along both vertical and horizontal axes.
The first coordinate is the horizontal/in-disk coordinate (scaled by `l`)
wile the second is `z`. i.e.
.. math::
A e^{-|s/l|} e^{-|z/h|}
for `pa` = 0 ; non-0 `pa` rotates the profile counter-clockwise by `pa`
radians.
"""
_fcoordsys='cartesian'
def f(self,inarr,A=1,l=2,h=1,pa=0):
s,z = inarr
sinpa,cospa = np.sin(pa), | np.cos(pa) | numpy.cos |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This source includes modified version of sample codes in OpenCV
# distribution, licensed under 3-clause BSD License.
#
# By downloading, copying, installing or using the software you agree to this license.
# If you do not agree to this license, do not download, install,
# copy or use the software.
#
#
# License Agreement
# For Open Source Computer Vision Library
# (3-clause BSD License)
#
# Copyright (C) 2000-2015, Intel Corporation, all rights reserved.
# Copyright (C) 2009-2011, <NAME> Inc., all rights reserved.
# Copyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.
# Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
# Copyright (C) 2015, OpenCV Foundation, all rights reserved.
# Copyright (C) 2015, Itseez Inc., all rights reserved.
# Third party copyrights are property of their respective owners.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the names of the copyright holders nor the names of the contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# This software is provided by the copyright holders and contributors "as is" and
# any express or implied warranties, including, but not limited to, the implied
# warranties of merchantability and fitness for a particular purpose are disclaimed.
# In no event shall copyright holders or contributors be liable for any direct,
# indirect, incidental, special, exemplary, or consequential damages
# (including, but not limited to, procurement of substitute goods or services;
# loss of use, data, or profits; or business interruption) however caused
# and on any theory of liability, whether in contract, strict liability,
# or tort (including negligence or otherwise) arising in any way out of
# the use of this software, even if advised of the possibility of such damage.
#
import os
import pickle
import cv2
import numpy as np
from ikalog.inputs.filters import Filter, WarpFilterModel
from ikalog.utils import *
class WarpCalibrationException(Exception):
pass
class WarpCalibrationNotFound(WarpCalibrationException):
pass
class WarpCalibrationUnacceptableSize(WarpCalibrationException):
def __init__(self, shape):
self.shape = shape
class WarpFilter(Filter):
def filter_matches(self, kp1, kp2, matches, ratio=0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append(kp1[m.queryIdx])
mkp2.append(kp2[m.trainIdx])
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, kp_pairs
def set_bbox(self, x, y, w, h):
corners = np.float32(
[[x, y], [x + w, y], [w + x, y + h], [x, y + h]]
)
self.pts1 = np.float32(corners)
IkaUtils.dprint('pts1: %s' % [self.pts1])
IkaUtils.dprint('pts2: %s' % [self.pts2])
self.M = cv2.getPerspectiveTransform(self.pts1, self.pts2)
return True
def calibrateWarp(self, capture_image, validation_func=None):
capture_image_gray = cv2.cvtColor(capture_image, cv2.COLOR_BGR2GRAY)
capture_image_keypoints, capture_image_descriptors = self.detector.detectAndCompute(
capture_image_gray, None)
print('caputure_image - %d features' % (len(capture_image_keypoints)))
print('matching...')
raw_matches = self.matcher.knnMatch(
self.calibration_image_descriptors,
trainDescriptors=capture_image_descriptors,
k=2
)
p1, p2, kp_pairs = self.filter_matches(
self.calibration_image_keypoints,
capture_image_keypoints,
raw_matches,
)
if len(p1) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
self.calibration_requested = False
raise WarpCalibrationNotFound()
if H is None:
# Should never reach there...
self.calibration_requested = False
raise WarpCalibrationNotFound()
if len(status) < 1000:
raise WarpCalibrationNotFound()
calibration_image_height, calibration_image_width = self.calibration_image_size
corners = np.float32(
[[0, 0],
[calibration_image_width, 0],
[calibration_image_width, calibration_image_height],
[0, calibration_image_height]]
)
pts1 = np.float32(cv2.perspectiveTransform(
corners.reshape(1, -1, 2), H).reshape(-1, 2) + (0, 0))
IkaUtils.dprint('pts1: %s' % [pts1])
IkaUtils.dprint('pts2: %s' % [self.pts2])
if validation_func is not None:
if not validation_func(pts1):
w = int(pts1[1][0] - pts1[0][0])
h = int(pts1[2][1] - pts1[1][1])
raise WarpCalibrationUnacceptableSize((w, h))
self.M = cv2.getPerspectiveTransform(pts1, self.pts2)
return True
def tuples2keyPoints(self, tuples):
new_l = []
for point in tuples:
pt, size, angle, response, octave, class_id = point
new_l.append(cv2.KeyPoint(
pt[0], pt[1], size, angle, response, octave, class_id))
return new_l
def keyPoints2tuples(self, points):
new_l = []
for point in points:
new_l.append((point.pt, point.size, point.angle, point.response, point.octave,
point.class_id))
return new_l
def loadModelFromFile(self, file):
f = open(file, 'rb')
l = pickle.load(f)
f.close()
self.calibration_image_size = l[0]
self.calibration_image_keypoints = self.tuples2keyPoints(l[1])
self.calibration_image_descriptors = l[2]
def saveModelToFile(self, file):
f = open(file, 'wb')
pickle.dump([
self.calibration_image_size,
self.keyPoints2tuples(self.calibration_image_keypoints),
self.calibration_image_descriptors,
], f)
f.close()
def initializeCalibration(self):
model_object = WarpFilterModel()
if not model_object.trained:
raise Exception('Could not intialize WarpFilterModel')
self.detector = model_object.detector
self.norm = model_object.norm
self.matcher = model_object.matcher
self.calibration_image_size = model_object.calibration_image_size
self.calibration_image_keypoints = model_object.calibration_image_keypoints
self.calibration_image_descriptors = model_object.calibration_image_descriptors
self.reset()
def reset(self):
# input source
w = 1280
h = 720
self.pts2 = | np.float32([[0, 0], [w, 0], [w, h], [0, h]]) | numpy.float32 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 22:12:24 2021
@author: leonl42
print useful statistics about the data
"""
import numpy as np
import matplotlib as plt
from scripts.util import get_freq_dist, PANDAS_DTYPE,COLUMN_LABEL,COLUMN_HASHTAGS, COLUMN_TIMEDELTAS,SUFFIX_POST,COLUMN_TWEET, COLUMN_LIKES,COLUMN_RETWEETS
import pandas as pd
import csv
from nltk import ngrams
from ast import literal_eval
# load data
df = pd.read_csv("data/preprocessing/preprocessed.csv", quoting = csv.QUOTE_NONNUMERIC, lineterminator = "\n", dtype = PANDAS_DTYPE)
################################################
#####plot a pair of frequency distributions#####
################################################
def plot_freq_dist(freq_dist_pos,freq_dist_neg,num, c1, c2):
fig = plt.pyplot.figure()
ax = fig.add_axes([0,0,1,1])
X = np.arange(num)
# ectract relative frequency of the 'num' most common elements and store them in the data vector
data = [[freq_dist_pos.freq(element[0])for element in freq_dist_pos.most_common(num)],
[freq_dist_neg.freq(element[0]) for element in freq_dist_pos.most_common(num)]]
# extract the most common elements and store their labels
labels = [element[0] for element in freq_dist_pos.most_common(num)]
ax.set_xticklabels(labels, rotation=90)
ax.xaxis.set_ticks([i for i in range(num)])
ax.bar(X + 0.00, data[0], color = c1, width = 0.25)
ax.bar(X + 0.25, data[1], color = c2, width = 0.25)
plt.pyplot.show()
###########################################################
#####Split dataframe into positive and negative labels#####
###########################################################
grouped = df.groupby(COLUMN_LABEL)
df_true = grouped.get_group(True)
df_false= grouped.get_group(False)
#################################################
#####Plot most common hashtags per dataframe#####
#################################################
def get_most_common_hashtags(df):
freq_dist = get_freq_dist(df[COLUMN_HASHTAGS])
return freq_dist
freq_dist_hashtags_pos = get_most_common_hashtags(df_true)
freq_dist_hashtags_neg = get_most_common_hashtags(df_false)
plot_freq_dist(freq_dist_hashtags_pos,freq_dist_hashtags_neg,50,'g','r')
plot_freq_dist(freq_dist_hashtags_neg,freq_dist_hashtags_pos,50,'r','g')
#################################
#####Plot average time delta#####
#################################
def statistics_time_deltas(df):
year = np.array([])
date = np.array([])
time = np.array([])
for entry in (df[COLUMN_TIMEDELTAS]):
entry = literal_eval(str(entry))
year = | np.append(year,entry[0]) | numpy.append |
#!/usr/bin/env python
"""compare two tractor catalogues that should have same objects
"""
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg') #display backend
import os
import sys
import logging
import argparse
import numpy as np
from scipy import stats as sp_stats
#import seaborn as sns
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import vstack, Table
from astrometry.libkd.spherematch import match_radec
#import thesis_code.targets as targets
from legacyanalysis import targets
from legacyanalysis.pathnames import get_outdir
class Matched_Cats():
def __init__(self):
self.data={}
def initialize(self,data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos):
self.d12= d12 #deg separations between matches objects
self.deg2_decam= deg2_decam
self.deg2_bokmos= deg2_bokmos
self.data['m_decam']= targets.data_extract(data_1,m1)
self.data['m_bokmos']= targets.data_extract(data_2,m2)
self.data['u_decam']= targets.data_extract(data_1,m1_unm)
self.data['u_bokmos']= targets.data_extract(data_2,m2_unm)
def add_d12(self,d12):
'''concatenate new d12 with existing matched deg separation array'''
self.d12= np.concatenate([self.d12, d12])
def add_dict(self,match_type,new_data):
'''match_type -- m_decam,m_bokmos,u_decam, etc
new data -- data returend from read_from..() to be concatenated with existing m_decam, etc'''
for key in self.data[match_type].keys():
self.data[match_type][key]= np.concatenate([self.data[match_type][key],new_data[key]])
def deg2_lower_limit(data):
'''deg2 spanned by objects in each data set, lower limit'''
ra= data['ra'].max()-data['ra'].min()
assert(ra > 0.)
dec= abs(data['dec'].max()-data['dec'].min())
return ra*dec
def match_it(cat1,cat2):
'''cat1,2 are tractor catalogue to match objects between'''
#match cats
data_1= targets.read_from_tractor_cat(cat1)
data_2= targets.read_from_tractor_cat(cat2)
#deg2 spanned by objects in each data set
deg2_decam= deg2_lower_limit(data_1)
deg2_bokmos= deg2_lower_limit(data_2)
#all the 'all1' objects that have match in 'all2'
m1, m2, d12 = match_radec(data_1['ra'],data_1['dec'],data_2['ra'],data_2['dec'],\
1.0/3600.0,nearest=True)
m1_unm = np.delete(np.arange(len(data_1['ra'])),m1,axis=0)
m2_unm = np.delete(np.arange(len(data_2['ra'])),m2,axis=0)
return data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos
def read_lines(fn):
fin=open(fn,'r')
lines=fin.readlines()
fin.close()
return list(np.char.strip(lines))
#plotting vars
laba=dict(fontweight='bold',fontsize='medium')
kwargs_axtext=dict(fontweight='bold',fontsize='large',va='top',ha='left')
leg_args=dict(frameon=True,fontsize='x-small')
def plot_nobs(b):
'''make histograms of nobs so can compare depths of g,r,z between the two catalogues'''
hi=0
for cam in ['m_decam','m_bokmos']:
for band in 'grz':
hi= np.max((hi, b[cam].data[band+'_nobs'].max()))
bins= hi
for cam in ['m_decam','m_bokmos']:
for band in 'grz':
junk=plt.hist(b[cam].data[band+'_nobs'],bins=bins,normed=True,cumulative=True,align='mid')
xlab=plt.xlabel('nobs %s' % band, **laba)
ylab=plt.ylabel('CDF', **laba)
plt.savefig(os.path.join(get_outdir('bmd'),'hist_nobs_%s_%s.png' % (band,cam[2:])), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
#def plot_nobs_2(b):
# '''improved version of plot_nobs'''
# for cam in ['m_decam','m_bokmos']:
# for band in 'grz':
# junk=plt.hist(b[cam].data[band+'_nobs'],bins=10,normed=True,cumulative=True,align='mid')
# xlab=plt.xlabel('nobs %s' % band, **laba)
# ylab=plt.xlabel('CDF', **laba)
# plt.savefig(os.path.join(get_outdir('bmd'),'hist_nobs_%s_%s.png' % (band,cam[2:])), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
# plt.close()
#
#
# c1= 'b'
# c2= 'r'
# ###
# decam_max= [b['m_decam'].data[b+'_nobs'].max() for b in 'grz']
# bokmos_max= [b['m_bokmos'].data[b+'_nobs'].max() for b in 'grz']
# types= np.arange(1, np.max((decam_max,bokmos_max)) +1)
# ind = types.copy() # the x locations for the groups
# width = 1 # the width of the bars
# ###
# ht_decam, ht_bokmos= np.zeros(5,dtype=int),np.zeros(5,dtype=int)
# for cnt,typ in enumerate(types):
# ht_decam[cnt]= np.where(obj[m_types[0]].data['type'] == typ)[0].shape[0] / float(obj['deg2_decam'])
# ht_bokmos[cnt]= np.where(obj[m_types[1]].data['type'] == typ)[0].shape[0] / float(obj['deg2_bokmos'])
# ###
# fig, ax = plt.subplots()
# rects1 = ax.bar(ind, ht_decam, width, color=c1)
# rects2 = ax.bar(ind + width, ht_bokmos, width, color=c2)
# ylab= ax.set_ylabel("counts/deg2")
# if matched: ti= ax.set_title('Matched')
# else: ti= ax.set_title('Unmatched')
# ax.set_xticks(ind + width)
# ax.set_xticklabels(types)
# ax.legend((rects1[0], rects2[0]), ('decam', 'bokmos'),**leg_args)
# #save
# if matched: name='hist_types_Matched.png'
# else: name='hist_types_Unmatched.png'
# plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[ylab,ti], bbox_inches='tight',dpi=150)
# plt.close()
#
def plot_radec(obj, addname=''):
'''obj[m_types] -- DECaLS() objects with matched OR unmatched indices'''
#set seaborn panel styles
#sns.set_style('ticks',{"axes.facecolor": ".97"})
#sns.set_palette('colorblind')
#setup plot
fig,ax=plt.subplots(1,2,figsize=(9,6),sharey=True,sharex=True)
plt.subplots_adjust(wspace=0.25)
#plt.subplots_adjust(wspace=0.5)
#plot
ax[0].scatter(obj['m_decam'].data['ra'], obj['m_decam'].data['dec'], \
edgecolor='b',c='none',lw=1.)
ax[1].scatter(obj['u_decam'].data['ra'], obj['u_decam'].data['dec'], \
edgecolor='b',c='none',lw=1.,label='DECaLS')
ax[1].scatter(obj['u_bokmos'].data['ra'], obj['u_bokmos'].data['dec'], \
edgecolor='g',c='none',lw=1.,label='BASS/MzLS')
for cnt,ti in zip(range(2),['Matched','Unmatched']):
ti=ax[cnt].set_title(ti,**laba)
xlab=ax[cnt].set_xlabel('RA', **laba)
ylab=ax[0].set_ylabel('DEC', **laba)
ax[0].legend(loc='upper left',**leg_args)
#save
#sns.despine()
plt.savefig(os.path.join(get_outdir('bmd'),'radec%s.png' % addname), bbox_extra_artists=[xlab,ylab,ti], bbox_inches='tight',dpi=150)
plt.close()
def plot_HistTypes(obj,m_types=['m_decam','m_bokmos'], addname=''):
'''decam,bokmos -- DECaLS() objects with matched OR unmatched indices'''
#matched or unmatched objects
if m_types[0].startswith('m_') and m_types[1].startswith('m_'): matched=True
elif m_types[0].startswith('u_') and m_types[1].startswith('u_'): matched=False
else: raise ValueError
#sns.set_style("whitegrid")
#sns.set_palette('colorblind')
#c1=sns.color_palette()[2]
#c2=sns.color_palette()[0] #'b'
c1= 'b'
c2= 'r'
###
types= ['PSF','SIMP','EXP','DEV','COMP']
ind = np.arange(len(types)) # the x locations for the groups
width = 0.35 # the width of the bars
###
ht_decam, ht_bokmos= np.zeros(5,dtype=int),np.zeros(5,dtype=int)
for cnt,typ in enumerate(types):
ht_decam[cnt]= np.where(obj[m_types[0]].data['type'] == typ)[0].shape[0] / float(obj['deg2_decam'])
ht_bokmos[cnt]= np.where(obj[m_types[1]].data['type'] == typ)[0].shape[0] / float(obj['deg2_bokmos'])
###
fig, ax = plt.subplots()
rects1 = ax.bar(ind, ht_decam, width, color=c1)
rects2 = ax.bar(ind + width, ht_bokmos, width, color=c2)
ylab= ax.set_ylabel("counts/deg2")
if matched: ti= ax.set_title('Matched')
else: ti= ax.set_title('Unmatched')
ax.set_xticks(ind + width)
ax.set_xticklabels(types)
ax.legend((rects1[0], rects2[0]), ('DECaLS', 'BASS/MzLS'),**leg_args)
#save
if matched: name='hist_types_Matched%s.png' % addname
else: name='hist_types_Unmatched%s.png' % addname
plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[ylab,ti], bbox_inches='tight',dpi=150)
plt.close()
def bin_up(data_bin_by,data_for_percentile, bin_edges=np.arange(20.,26.,0.25)):
'''finds indices for 0.25 bins, returns bin centers and q25,50,75 percentiles of data_percentile in each bin
bin_edges: compute percentiles for each sample between bin_edges
'''
count= np.zeros(len(bin_edges)-1)+np.nan
q25,q50,q75= count.copy(),count.copy(),count.copy()
for i,low,hi in zip(range(len(count)), bin_edges[:-1],bin_edges[1:]):
ind= np.all((low <= data_bin_by,data_bin_by < hi),axis=0)
if np.where(ind)[0].size > 0:
count[i]= np.where(ind)[0].size
q25[i]= np.percentile(data_for_percentile[ind],q=25)
q50[i]= np.percentile(data_for_percentile[ind],q=50)
q75[i]= np.percentile(data_for_percentile[ind],q=75)
else:
pass #given qs nan, which they already have
return (bin_edges[1:]+bin_edges[:-1])/2.,count,q25,q50,q75
def indices_for_type(obj,inst='m_decam',type='all'):
'''return mask for selecting type == all,psf,lrg
data -- obj['m_decam'].data
lrg mask -- obje['m_decam'].lrg'''
if type == 'all':
return np.ones(obj[inst].data['type'].size, dtype=bool) #1 = True
elif type == 'psf':
return obj[inst].data['type'] == 'PSF'
elif type == 'lrg':
return obj[inst].lrg
else: raise ValueError
def plot_SN_vs_mag(obj, found_by='matched',type='all', addname=''):
'''obj['m_decam'] is DECaLS() object
found_by -- 'matched' or 'unmatched'
type -- all,psf,lrg'''
#indices for type == all,psf, or lrg
assert(found_by == 'matched' or found_by == 'unmatched')
prefix= found_by[0]+'_' # m_ or u_
index={}
for key in ['decam','bokmos']:
index[key]= indices_for_type(obj,inst=prefix+key,type=type)
#bin up SN values
min,max= 18.,25.
bin_SN=dict(decam={},bokmos={})
for key in bin_SN.keys():
for band in ['g','r','z']:
bin_SN[key][band]={}
i= index[key]
bin_edges= np.linspace(min,max,num=30)
bin_SN[key][band]['binc'],count,bin_SN[key][band]['q25'],bin_SN[key][band]['q50'],bin_SN[key][band]['q75']=\
bin_up(obj[prefix+key].data[band+'mag'][i], \
obj[prefix+key].data[band+'flux'][i]*np.sqrt(obj[prefix+key].data[band+'flux_ivar'][i]),\
bin_edges=bin_edges)
#setup plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
#plot SN
for cnt,band in zip(range(3),['g','r','z']):
#horiz line at SN = 5
ax[cnt].plot([1,40],[5,5],'k--',lw=2)
#data
for inst,color,lab in zip(['decam','bokmos'],['b','g'],['DECaLS','BASS/MzLS']):
ax[cnt].plot(bin_SN[inst][band]['binc'], bin_SN[inst][band]['q50'],c=color,ls='-',lw=2,label=lab)
ax[cnt].fill_between(bin_SN[inst][band]['binc'],bin_SN[inst][band]['q25'],bin_SN[inst][band]['q75'],color=color,alpha=0.25)
#labels
ax[2].legend(loc=1,**leg_args)
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].set_yscale('log')
xlab=ax[cnt].set_xlabel('%s' % band, **laba)
ax[cnt].set_ylim(1,100)
ax[cnt].set_xlim(20.,26.)
ylab=ax[0].set_ylabel('S/N', **laba)
text_args= dict(verticalalignment='bottom',horizontalalignment='right',fontsize=10)
ax[2].text(26,5,'S/N = 5 ',**text_args)
plt.savefig(os.path.join(get_outdir('bmd'),'sn_%s_%s%s.png' % (found_by,type,addname)), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_matched_dmag_vs_psf_fwhm(obj, type='psf'):
'''using matched sample, plot diff in mags vs. DECAM psf_fwhm in bins
obj['m_decam'] is DECaLS() object'''
#indices
index= np.all((indices_for_type(b,inst='m_decam',type=type),\
indices_for_type(b,inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type
#bin up by DECAM psf_fwhm
bin_edges= np.linspace(0,3,num=6)
vals={}
for band in ['g','r','z']:
vals[band]={}
vals[band]['binc'],count,vals[band]['q25'],vals[band]['q50'],vals[band]['q75']=\
bin_up(obj['m_decam'].data[band+'_psf_fwhm'][index], \
obj['m_bokmos'].data[band+'mag'][index]- obj['m_decam'].data[band+'mag'][index], \
bin_edges=bin_edges)
#setup plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
text_args= dict(verticalalignment='center',horizontalalignment='left',fontsize=10)
#plot
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].plot(vals[band]['binc'], vals[band]['q50'],c='b',ls='-',lw=2)
ax[cnt].fill_between(vals[band]['binc'],vals[band]['q25'],vals[band]['q75'],color='b',alpha=0.25)
ax[cnt].text(0.05,0.95,band,transform=ax[cnt].transAxes,**text_args)
#finish
xlab=ax[1].set_xlabel('decam PSF_FWHM', **laba)
ylab=ax[0].set_ylabel(r'Median $\Delta \, m$ (decam - bokmos)', **laba)
ti= plt.suptitle('%s Objects, Matched' % type.upper())
plt.savefig(os.path.join(get_outdir('bmd'),'dmag_vs_psf_fwhm_%s.png' % type), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_matched_decam_vs_bokmos_psf_fwhm(obj, type='psf'):
'''using matched sample, plot decam psf_fwhm vs. bokmos psf_fwhm
obj['m_decam'] is DECaLS() object'''
#indices
index= np.all((indices_for_type(b,inst='m_decam',type=type),\
indices_for_type(b,inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type
#setup plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
text_args= dict(verticalalignment='center',horizontalalignment='left',fontsize=10)
#plot
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].scatter(obj['m_bokmos'].data[band+'_psf_fwhm'][index], obj['m_decam'].data[band+'_psf_fwhm'][index],\
edgecolor='b',c='none',lw=1.)
ax[cnt].text(0.05,0.95,band,transform=ax[cnt].transAxes,**text_args)
#finish
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].set_xlim(0,3)
ax[cnt].set_ylim(0,3)
xlab=ax[1].set_xlabel('PSF_FWHM (bokmos)', **laba)
ylab=ax[0].set_ylabel('PSF_FWHM (decam)', **laba)
ti= plt.suptitle('%s Objects, Matched' % type.upper())
plt.savefig(os.path.join(get_outdir('bmd'),'decam_vs_bokmos_psf_fwhm_%s.png' % type), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_confusion_matrix(cm,ticknames, addname=''):
'''cm -- NxN array containing the Confusion Matrix values
ticknames -- list of strings of length == N, column and row names for cm plot'''
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues,vmin=0,vmax=1)
cbar=plt.colorbar()
plt.xticks(range(len(ticknames)), ticknames)
plt.yticks(range(len(ticknames)), ticknames)
ylab=plt.ylabel('True (DECaLS)')
xlab=plt.xlabel('Predicted (BASS/MzLS)')
for row in range(len(ticknames)):
for col in range(len(ticknames)):
if np.isnan(cm[row,col]):
plt.text(col,row,'n/a',va='center',ha='center')
elif cm[row,col] > 0.5:
plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='yellow')
else:
plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='black')
plt.savefig(os.path.join(get_outdir('bmd'),'confusion_matrix%s.png' % addname), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def create_confusion_matrix(obj):
'''compares MATCHED decam (truth) to bokmos (prediction)
return 5x5 confusion matrix and colum/row names
obj[m_decam'] is DECaLS object'''
cm=np.zeros((5,5))-1
types=['PSF','SIMP','EXP','DEV','COMP']
for i_dec,dec_type in enumerate(types):
ind= np.where(obj['m_decam'].data['type'] == dec_type)[0]
for i_bass,bass_type in enumerate(types):
n_bass= np.where(obj['m_bokmos'].data['type'][ind] == bass_type)[0].size
if ind.size > 0: cm[i_dec,i_bass]= float(n_bass)/ind.size #ind.size is constant for each loop over bass_types
else: cm[i_dec,i_bass]= np.nan
return cm,types
def plot_matched_separation_hist(d12):
'''d12 is array of distances in degress between matched objects'''
#pixscale to convert d12 into N pixels
pixscale=dict(decam=0.25,bokmos=0.45)
#sns.set_style('ticks',{"axes.facecolor": ".97"})
#sns.set_palette('colorblind')
#setup plot
fig,ax=plt.subplots()
#plot
ax.hist(d12*3600,bins=50,color='b',align='mid')
ax2 = ax.twiny()
ax2.hist(d12*3600./pixscale['bokmos'],bins=50,color='g',align='mid',visible=False)
xlab= ax.set_xlabel("arcsec")
xlab= ax2.set_xlabel("pixels [BASS]")
ylab= ax.set_ylabel("Matched")
#save
#sns.despine()
plt.savefig(os.path.join(get_outdir('bmd'),"separation_hist.png"), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_psf_hists(decam,bokmos, zoom=False):
'''decam,bokmos are DECaLS() objects matched to decam ra,dec'''
#divide into samples of 0.25 mag bins, store q50 of each
width=0.25 #in mag
low_vals= np.arange(20.,26.,width)
med={}
for b in ['g','r','z']: med[b]=np.zeros(low_vals.size)-100
for i,low in enumerate(low_vals):
for band in ['g','r','z']:
ind= np.all((low <= decam[band+'mag'],decam[band+'mag'] < low+width),axis=0)
if np.where(ind)[0].size > 0:
med[band][i]= np.percentile(bokmos[band+'mag'][ind] - decam[band+'mag'][ind],q=50)
else:
med[band][i]= np.nan
#make plot
#set seaborn panel styles
#sns.set_style('ticks',{"axes.facecolor": ".97"})
#sns.set_palette('colorblind')
#setup plot
fig,ax=plt.subplots(1,3,figsize=(9,3)) #,sharey=True)
plt.subplots_adjust(wspace=0.5)
#plot
for cnt,band in zip(range(3),['r','g','z']):
ax[cnt].scatter(low_vals, med[band],\
edgecolor='b',c='none',lw=2.) #,label=m_type.split('_')[-1])
xlab=ax[cnt].set_xlabel('bins of %s (decam)' % band, **laba)
ylab=ax[cnt].set_ylabel('q50[%s bokmos - decam]' % band, **laba)
if zoom: ax[cnt].set_ylim(-0.25,0.25)
# sup=plt.suptitle('decam with matching bokmos',**laba)
#save
#sns.despine()
if zoom: name="median_color_diff_zoom.png"
else: name="median_color_diff.png"
plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
##########
#funcs for flux diff / sqrt(inv var + inv var)
def n_gt_3_sigma(sample, low=-8.,hi=8.):
'''for a sample that should be distributed as N(mean=0,stddev=1), returns mask for the N that are greater 3 sigma
low,hi -- minimum and maximum sample values that will be considered'''
i_left= np.all((sample >= low,sample <= -3.),axis=0)
i_right= np.all((sample <= hi,sample>=3),axis=0)
#assert i_left and i_right are mutually exclusive
false_arr= np.all((i_left,i_right),axis=0) #should be array of Falses
assert( np.all(false_arr == False) ) #should be np.all([True,True,...]) which evaluates to True
return np.any((i_left,i_right),axis=0)
def gauss_stats(n_samples=10000):
'''returns mean,std,q25, frac outliers > 3 sigma for n_samples drawn from unit gaussian N(0,1)'''
G= sp_stats.norm(0,1)
mean=std=q25=perc_out=0.
for i in range(10): #draw 10 times, take avg of the 10 measurements of each statistic
draws= G.rvs(n_samples)
mean+= np.mean(draws)
std+= np.std(draws)
q25+= np.percentile(draws,q=25)
perc_out+= 2*G.cdf(-3)*100 #HACH same number ea time
mean/= 10.
std/= 10.
q25/= 10.
perc_out/= 10.
tol=1e-1
assert(abs(mean) <= tol)
assert(abs(std-1.) <= tol)
return mean,std,q25,perc_out
def sample_gauss_stats(sample, low=-20,hi=20):
'''return dictionary of stats about the data and stats for a sample that is unit gaussian distributed
low,hi -- minimum and maximum sample values that will be considered'''
a=dict(sample={},gauss={})
#vals for unit gaussian distributed data
a['gauss']['mean'],a['gauss']['std'],a['gauss']['q25'],a['gauss']['perc_out']= gauss_stats(n_samples=sample.size)
#vals for actual sample
a['sample']['mean'],a['sample']['std'],a['sample']['q25'],a['sample']['q75']= \
np.mean(sample),np.std(sample),np.percentile(sample,q=25),np.percentile(sample,q=75)
i_outliers= n_gt_3_sigma(sample, low=low,hi=hi)
a['sample']['perc_out']= sample[i_outliers].size/float(sample.size)*100.
return a
text_args= dict(verticalalignment='center',fontsize=8)
def plot_dflux_chisq(b,type='psf', low=-8.,hi=8.,addname=''):
#join indices b/c matched
i_type= np.all((indices_for_type(b, inst='m_decam',type=type),\
indices_for_type(b, inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type
#get flux diff for each band
hist= dict(g=0,r=0,z=0)
binc= dict(g=0,r=0,z=0)
stats=dict(g=0,r=0,z=0)
#chi
sample,mag={},{}
for band in ['g','r','z']:
sample[band]= (b['m_decam'].data[band+'flux'][i_type]-b['m_bokmos'].data[band+'flux'][i_type])/np.sqrt(\
np.power(b['m_decam'].data[band+'flux_ivar'][i_type],-1)+np.power(b['m_bokmos'].data[band+'flux_ivar'][i_type],-1))
mag[band]= 22.5-2.5*np.log10(b['m_decam'].data[band+'flux'][i_type])
#loop over mag bins, one 3 panel for each mag bin
for b_low,b_hi in zip([18,19,20,21,22,23],[19,20,21,22,23,24]):
#plot each filter
for band in ['g','r','z']:
imag= np.all((b_low <= mag[band],mag[band] < b_hi),axis=0)
#print("len(imag)=",len(imag),"len(sample)=",len(sample),"len(sample[imag])=",len(sample[imag]))
hist[band],bins,junk= plt.hist(sample[band][imag],range=(low,hi),bins=50,normed=True)
db= (bins[1:]-bins[:-1])/2
binc[band]= bins[:-1]+db
plt.close() #b/c plt.hist above
#for drawing unit gaussian N(0,1)
G= sp_stats.norm(0,1)
xvals= np.linspace(low,hi)
#plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
for cnt,band in zip(range(3),['g','r','z']):
ax[cnt].step(binc[band],hist[band], where='mid',c='b',lw=2)
ax[cnt].plot(xvals,G.pdf(xvals))
#labels
for cnt,band in zip(range(3),['g','r','z']):
if band == 'r': xlab=ax[cnt].set_xlabel(r'%s $(F_{d}-F_{bm})/\sqrt{\sigma^2_{d}+\sigma^2_{bm}}$' % band, **laba)
else: xlab=ax[cnt].set_xlabel('%s' % band, **laba)
#xlab=ax[cnt].set_xlabel('%s' % band, **laba)
ax[cnt].set_ylim(0,0.6)
ax[cnt].set_xlim(low,hi)
ylab=ax[0].set_ylabel('PDF', **laba)
ti=ax[1].set_title("%s (%.1f <= %s < %.1f)" % (type,b_low,band,b_hi),**laba)
#put stats in suptitle
plt.savefig(os.path.join(get_outdir('bmd'),'dflux_chisq_%s_%.1f-%s-%.1f%s.png' % (type,b_low,band,b_hi,addname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
################
def plot_magRatio_vs_mag(b,type='psf',addname=''):
#join indices b/c matched
i_type= np.all((indices_for_type(b, inst='m_decam',type=type),\
indices_for_type(b, inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type
#plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
for cnt,band in zip(range(3),['g','r','z']):
magRatio= np.log10(b['m_bokmos'].data[band+'flux'][i_type])/np.log10(b['m_decam'].data[band+'flux'][i_type]) -1.
mag= 22.5-2.5*np.log10(b['m_decam'].data[band+'flux'][i_type])
ax[cnt].scatter(mag,magRatio, c='b',edgecolor='b',s=5) #,c='none',lw=2.)
#labels
for cnt,band in zip(range(3),['g','r','z']):
xlab=ax[cnt].set_xlabel('%s AB' % band, **laba)
ax[cnt].set_ylim(-0.5,0.5)
ax[cnt].set_xlim(18,26)
ylab=ax[0].set_ylabel(r'$m_{bm}/m_d - 1$', **laba)
ti=ax[1].set_title("%s" % type,**laba)
#put stats in suptitle
plt.savefig(os.path.join(get_outdir('bmd'),'magRatio_vs_mag_%s%s.png' % (type,addname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
################
text_args= dict(verticalalignment='center',fontsize=8)
def plot_N_per_deg2(obj,type='all',req_mags=[24.,23.4,22.5],addname=''):
'''image requirements grz<=24,23.4,22.5
compute number density in each bin for each band mag [18,requirement]'''
#indices for type for matched and unmatched samples
index={}
for inst in ['m_decam','u_decam','m_bokmos','u_bokmos']:
index[inst]= indices_for_type(obj, inst=inst,type=type)
bin_nd=dict(decam={},bokmos={})
for inst in ['decam','bokmos']:
bin_nd[inst]={}
for band,req in zip(['g','r','z'],req_mags):
bin_nd[inst][band]={}
bin_edges= np.linspace(18.,req,num=15)
i_m,i_u= index['m_'+inst], index['u_'+inst] #need m+u
#join m_decam,u_decam OR m_bokmos,u_bokmos and only with correct all,psf,lrg index
sample= np.ma.concatenate((obj['m_'+inst].data[band+'mag'][i_m], obj['u_'+inst].data[band+'mag'][i_u]),axis=0)
bin_nd[inst][band]['binc'],bin_nd[inst][band]['cnt'],q25,q50,q75=\
bin_up(sample,sample,bin_edges=bin_edges)
#plot
fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)
plt.subplots_adjust(wspace=0.25)
for cnt,band in zip(range(3),['g','r','z']):
for inst,color,lab in zip(['decam','bokmos'],['b','g'],['DECaLS','BASS/MzLS']):
ax[cnt].step(bin_nd[inst][band]['binc'],bin_nd[inst][band]['cnt']/obj['deg2_'+inst], where='mid',c=color,lw=2,label=lab)
#labels
for cnt,band in zip(range(3),['g','r','z']):
xlab=ax[cnt].set_xlabel('%s' % band) #, **laba)
#ax[cnt].set_ylim(0,0.6)
#ax[cnt].set_xlim(maglow,maghi)
ax[0].legend(loc='upper left', **leg_args)
ylab=ax[0].set_ylabel('counts/deg2') #, **laba)
ti=plt.suptitle("%ss" % type.upper(),**laba)
# Make space for and rotate the x-axis tick labels
fig.autofmt_xdate()
#put stats in suptitle
plt.savefig(os.path.join(get_outdir('bmd'),'n_per_deg2_%s%s.png' % (type,addname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='DECaLS simulations.')
parser.add_argument('-fn1', type=str, help='process this brick (required input)',required=True)
parser.add_argument('-fn2', type=str, help='object type (STAR, ELG, LRG, BGS)',required=True)
args = parser.parse_args()
# Set the debugging level
if args.verbose:
lvl = logging.DEBUG
else:
lvl = logging.INFO
logging.basicConfig(format='%(message)s', level=lvl, stream=sys.stdout)
log = logging.getLogger('__name__')
#get lists of tractor cats to compare
fns_1= read_lines(args.fn1)
log.info('Combining tractor catalogues: ',fns_1)
#if fns_1.size == 1: fns_1,fns_2= [fns_1],[fns_2]
#object to store concatenated matched tractor cats
a=Matched_Cats()
for cnt,cat1,cat2 in zip(range(len(fns_1)),fns_1,fns_2):
data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos= match_it(cat1,cat2)
if cnt == 0:
a.initialize(data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos)
else:
a.add_d12(d12)
a.deg2_decam+= deg2_decam
a.deg2_bokmos+= deg2_bokmos
a.add_dict('m_decam', targets.data_extract(data_1,m1) )
a.add_dict('m_bokmos', targets.data_extract(data_2,m2))
a.add_dict('u_decam', targets.data_extract(data_1,m1_unm))
a.add_dict('u_bokmos', targets.data_extract(data_2,m2_unm))
#each key a.data[key] becomes DECaLS() object with grz mags,i_lrg, etc
b={}
b['d12']= a.d12
b['deg2_decam']= a.deg2_decam
b['deg2_bokmos']= a.deg2_bokmos
for match_type in a.data.keys(): b[match_type]= targets.DECaLS(a.data[match_type], w1=True)
#store N matched objects not masked before join decam,bokmos masks
m_decam_not_masked,m_bokmos_not_masked= b['m_decam'].count_not_masked(),b['m_bokmos'].count_not_masked()
#update masks for matched objects to be the join of decam and bokmos masks
mask= np.any((b['m_decam'].mask, b['m_bokmos'].mask),axis=0)
b['m_decam'].update_masks_for_everything(mask=np.any((b['m_decam'].mask, b['m_bokmos'].mask),axis=0),\
mask_wise=np.any((b['m_decam'].mask_wise, b['m_bokmos'].mask_wise),axis=0) )
b['m_bokmos'].update_masks_for_everything(mask=np.any((b['m_decam'].mask, b['m_bokmos'].mask),axis=0),\
mask_wise= | np.any((b['m_decam'].mask_wise, b['m_bokmos'].mask_wise),axis=0) | numpy.any |
class input_data:
## Value Initilization
import numpy as np
from matplotlib import pyplot as plt
import math
# input data for temperature profile calculation
rf = (4.18/2)*1E-3
dr = 1E-4 # mesh spacing
alphat = 1E-6 # alpha only contains 1/rho*Cp
dt_max = (dr ** 2) / (2 * alphat) # stability condition
dt = dt_max / 10 # time step size
sp = int(rf/dr) # no of spatial points
time_step = int(1E4) # time points
kc = 29.0 # clad conductance
kg = 58.22E-3 # conductance in gap between clad and gap
G = 427E-6 # gap
hg = kg / G # gap conductance
linear_heat_rate = 400*1E2
Qe = linear_heat_rate/(3.14*rf**2)
# Parameter for calculation of k
k = [2 for i in range(sp + 1)] # initial array of k
x = 2.0 - 1.97 # 1.97 is O/M ratio
A = 2.85 * x + 0.035
B = -0.715 * x + 0.286
space = [i for i in range(1, sp)]
# Input data for crack surface area calculation
a = 5E-6 # grain radius
R = rf-0.09E-3 # radius of the pellet
H = 14.0E-3 # height of the pellet
thickness_of_annuli = 0.5E-3
no_of_annuli = int(R / thickness_of_annuli) # The pellet is divided in n annuli
q = 35 # E3 # LHR of pin (kW/m)
Nc = q / 2.0 # No of radial cracks
S = np.zeros(no_of_annuli + 1) # initialization suRf_dotace area
r = np.ones(no_of_annuli + 1)
r[0] = R
V = float((3.14 * (R ** 2) * H) / no_of_annuli)
temp_T = np.ones(no_of_annuli)
n = int(5E3)
s_v_gb_cummulative = 0
s_v_gb = np.zeros(n)
fc = np.ones(n)
Dfc = np.ones(n)
Q = np.ones(n)
DQ = np.ones(n)
Rf_dot = np.ones(n)
# time required for the establishment of grain bubble t_est
# n = int(1E2)
fc = np.ones(n)
Dfc = np.ones(n)
DQ = np.ones(n)
Rf_dot = np.zeros(n)
to = 1 * np.zeros(n)
tc = 1E2*np.ones(n)
rt = 1E-10*np.ones(n)
# rt[1] = 1E-/7
rt[1] = 1E-09
rc = np.ones(n)
rc[1] = 1E-7
Re_dot = np.zeros(n)
Re_dot[1] = 1E1
Rc_dot = np.zeros(n)
Rc_dot[1] = 1E1
# rt[1] = 1E-7
# rc[1] = 1E-7
a1 = 0.1
a2 = 2.2
omega = 4.1E-29
Del_S_by_S = np.zeros(n)
N_dot = np.zeros(n)
N = np.zeros(n)
phi1 = | np.zeros(n) | numpy.zeros |
import __init_paths
import os
import cv2
import epdb
import datetime
import numpy as np
from easydict import EasyDict as edict
import torch
from torch.autograd import Variable
from experiments.siamese_fc.Config import Config
from experiments.siamese_fc.network import SiamNet
from lib.Tracking_Utils import *
print(torch.__version__)
def tracker_init(im, target_position, target_size, net):
'''
im: cv2.imread
target_position: [cy, cx]
target_sz: [h, w]
net: loaded net.cuda()
'''
# get the default parameters
p = Config()
# first frame
img_uint8 = im
# img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)
img_uint8 = img_uint8[:, :, ::-1]
img_double = np.double(img_uint8) # uint8 to float
# compute avg for padding
avg_chans = np.mean(img_double, axis=(0, 1))
wc_z = target_size[1] + p.context_amount * sum(target_size)
hc_z = target_size[0] + p.context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = p.examplar_size / s_z
# crop examplar z in the first frame
z_crop = get_subwindow_tracking(img_double, target_position, p.examplar_size, round(s_z), avg_chans)
# z_crop = np.uint8(z_crop) # you need to convert it to uint8
# convert image to tensor
# z_crop_tensor = 255.0 * _to_tensor(z_crop).unsqueeze(0)
z_crop_tensor = _to_tensor(z_crop).unsqueeze(0)
d_search = (p.instance_size - p.examplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# arbitrary scale saturation
min_s_x = p.scale_min * s_x
max_s_x = p.scale_max * s_x
# generate cosine window
if p.windowing == 'cosine':
window = np.outer(np.hanning(p.score_size * p.response_UP), np.hanning(p.score_size * p.response_UP))
elif p.windowing == 'uniform':
window = np.ones((p.score_size * p.response_UP, p.score_size * p.response_UP))
window = window / sum(sum(window))
# pyramid scale search
scales = p.scale_step**np.linspace(-np.ceil(p.num_scale/2), | np.ceil(p.num_scale/2) | numpy.ceil |
import numpy as np
import tensorflow as tf
from timeit import default_timer as timer
import math
from nnet_data_compact import *
#-----------------------------------------------------------------------------------------------------
# Neural net
#
class nnet(object):
def __init__(self, NN_architecture):
self.dataDim = NN_architecture['dataDim']
self.loss_choice = NN_architecture['loss_choice']
self.labels = np.array(NN_architecture['labels'])
self.transfer_func = NN_architecture['transfer_func']
self.layer_type = NN_architecture['layer_type']
self.layer_dim = NN_architecture['layer_dim']
self.conv_strides = NN_architecture['conv_strides']
self.pool_dim = NN_architecture['pool_dim']
self.pool_strides = NN_architecture['pool_strides']
self.pool_type = NN_architecture['pool_type']
self.padding_type = NN_architecture['padding_type']
self.W_init = NN_architecture['W_init']
self.bias_init = NN_architecture['bias_init']
GPU_memory_fraction = NN_architecture['GPU_memory_fraction']
GPU_which = NN_architecture['GPU_which']
Tensorflow_randomSeed = NN_architecture['Tensorflow_randomSeed']
self.dtype = NN_architecture['dtype']
self.depth = len(self.layer_dim)
# check if valid
self._is_params_valid()
# reset graph
tf.reset_default_graph()
if Tensorflow_randomSeed is not None:
tf.set_random_seed(Tensorflow_randomSeed)
# create graph, either using CPU or 1 GPU
if GPU_which is not None:
with tf.device('/device:GPU:' + str(GPU_which)):
self._create_graph()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=GPU_memory_fraction)
config = tf.ConfigProto(gpu_options = gpu_options,\
allow_soft_placement=True, log_device_placement=True)
else:
self._create_graph()
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True, \
device_count = {'GPU': 0})
# initialize tf variables & launch the session
init = tf.global_variables_initializer()
self.sess = tf.Session(config=config)
self.sess.run(init)
# to save model
self.saver = tf.train.Saver()
#--------------------------------------------------------------------------
# Check validity
#
def _is_params_valid(self):
for layer in range(2, self.depth):
if self._layer_type(layer)=='conv' and self._layer_type(layer-1)=='fc':
raise ValueError('Invalid params: all conv layers first, then all fc layers!')
if self.loss_choice=='cross entropy':
if len(self.labels)!=self._output_size():
raise ValueError('Invalid params: with logit loss, output dimension = number of classes!')
if self.labels.dtype!=np.int32:
raise ValueError('Invalid params: with logit loss, labels must be np.int32!')
if np.setdiff1d(self.labels, np.array(range(len(self.labels)), dtype=np.int32)).size>0:
raise ValueError('Invalid params: with logit loss, labels must be [0,1,...,number of classes-1]!')
if self.loss_choice=='squared':
if self._output_size()>1:
raise ValueError('Invalid params: with squared loss, output dimension = 1!')
if self.labels.dtype!=np.float32:
raise ValueError('Invalid params: with squared, labels must be np.float32!')
#--------------------------------------------------------------------------
# Create the graph
#
def _create_graph(self):
self.x = tf.placeholder(self.dtype, [None, self.dataDim[0], self.dataDim[1], self.dataDim[2]])
self.y = self._label_placeholder()
self.learning_rate = tf.placeholder(tf.float32)
self.is_training = tf.placeholder(tf.bool)
self._create_nnet()
self._create_loss_optimizer()
def _label_placeholder(self):
if self.loss_choice=='squared':
return tf.placeholder(tf.float32, [None, 1])
elif self.loss_choice=='cross entropy':
return tf.placeholder(tf.int32, [None, 1])
#--------------------------------------------------------------------------
# Create the neural net graph
#
def _create_nnet(self):
x = self.x
W = []
b = []
preact = []
for layer in range(1,self.depth+1):
# Reshape if needed
if (layer==1 and self._layer_type(layer)=='fc') or \
(layer>1 and self._layer_type(layer)=='fc' and self._layer_type(layer-1)=='conv'):
_, _, channel_in, _, _ = self._layer_dim(layer)
x = tf.reshape(x, (-1, channel_in))
# Applying weight
W.append(self._make_weight(layer))
b.append(tf.Variable(self._bias_init(layer)))
if layer>1:
_, _, _, _, dim = self._layer_dim(layer)
mul = 1.0/dim
else:
mul = 1.0
if self._layer_type(layer)=='conv':
x = tf.nn.conv2d(x, W[layer-1], strides=self._conv_strides(layer), padding=self.padding_type)*mul + b[layer-1]
else:
x = tf.matmul(x, W[layer-1])*mul + b[layer-1]
preact.append(x)
# Nonlinearity
if layer < self.depth:
x = self._transfer(x, layer)
# Pooling
if self._layer_type(layer)=='conv' and self._is_pool(layer):
x = tf.nn.pool(x, window_shape=self._pool_dim(layer), pooling_type=self._pool_type(layer),
strides=self._pool_strides(layer), padding=self.padding_type, data_format='NHWC')
self.yhat = self._reshape_output(x)
self.W = W
self.b = b
self.preact = preact
def _reshape_output(self, yhat):
if self.loss_choice=='squared':
return tf.reshape(yhat, (-1, 1))
elif self.loss_choice=='cross entropy':
return yhat
#--------------------------------------------------------------------------
# Return the dimension for the 'layer'-th layer
#
def _layer_type(self, layer):
return self.layer_type[layer-1]
def _layer_dim(self, layer):
if self._layer_type(layer)=='conv':
temp = self.layer_dim[layer-1]
width, height, channel_out = temp[0], temp[1], temp[2]
if layer==1:
channel_in = self.dataDim[2]
else:
channel_in = self.layer_dim[layer-2][2]
dim = channel_in*width*height
else:
if layer==1:
channel_in = | np.prod(self.dataDim) | numpy.prod |
import pandas as pd
import numpy as np
import os
import csv
import pickle
import h5sparse
import scipy.sparse as ss
from itertools import compress
def write_gvm(gvm, output_fname, fmt='h5'):
'''
Writes a gvm to a .csv or .h5 file.
Parameters:
gvm (h5sparse): gvm to write
output_fname (str): file path to save to
fmt: format to save as. either 'csv' or 'h5'
Returns:
None
'''
if fmt == 'csv':
temp = pd.DataFrame(gvm['gvm'].todense())
if ('idx' not in gvm) or np.all(np.array(gvm['idx'] == None)):
gvm['idx'] = np.array(range(gvm['gvm'].shape[0]))
if ('col' not in gvm) or np.all(np.array(gvm['col'] == None)):
gvm['col'] = np.array(range(gvm['gvm'].shape[1]))
temp.index = gvm['idx']
temp.columns = gvm['col']
gvm = temp
gvm = gvm.replace(to_replace=False, value='')
gvm.to_csv(output_fname, sep='\t')
elif fmt == 'h5':
if type(gvm) == pd.DataFrame:
gvm = {'gvm': ss.csc_matrix(gvm.values),
'idx': gvm.index.values,
'col': gvm.columns.values}
if ('idx' not in gvm) or np.all(np.array(gvm['idx'] == None)):
gvm['idx'] = np.array(range(gvm['gvm'].shape[0]))
if ('col' not in gvm) or np.all(np.array(gvm['col'] == None)):
gvm['col'] = np.array(range(gvm['gvm'].shape[1]))
if not ( np.all([isinstance(i, int) for i in gvm['idx']]) or
np.all([isinstance(i, float) for i in gvm['idx']]) ):
try: gvm['idx'] = np.array([i.encode('utf-8','ignore') for i in gvm['idx']], dtype=np.string_)
except AttributeError: gvm['idx'] = | np.array(gvm['idx'], dtype=np.string_) | numpy.array |
import os
import sys
import math
import itertools
import warnings
import json
import time
import psutil
import socket
import shelve
import threading
import traceback
import inspect
import logging
from multiprocessing import Pool, Value, Manager, Queue
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from io import StringIO
from types import SimpleNamespace
from typing import Tuple, Dict, List, Any, Union, TypeVar, Type, Sequence
from datetime import datetime, timedelta
from dateutil import tz
from functools import partial
from dataclasses import dataclass, field
from collections import OrderedDict
import numpy as np
import pandas as pd
import h5py
import humanize
from scipy.stats import pearsonr
from sklearn.utils import shuffle, resample
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (
precision_recall_fscore_support,
f1_score,
accuracy_score,
confusion_matrix,
confusion_matrix,
)
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.dummy import DummyClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
import fire
from utils.libutils import swfe # pylint: disable-msg=E0611
np.set_printoptions(precision=4, linewidth=120)
pd.set_option("precision", 4)
pd.set_option("display.width", 300)
@dataclass(frozen=True)
class Results:
experiment: str
timestamp: int
class_: str
seed: int
foldoutter: int
foldinner: int
classifier: str
classifiercfg: int
classifiercfgs: int
f1binp: float
f1binn: float
f1micro: float
f1macro: float
f1weighted: float
f1samples: float
precision: float
recall: float
accuracy: float
accuracy2: float
timeinnertrain: float
timeouttertrain: float
positiveclasses: str
negativeclasses: str
features: str
nfeaturesvar: int
nfeaturestotal: int
ynegfinaltrain: int
yposfinaltrain: int
ynegfinaltest: int
yposfinaltest: int
yposfinalpred: int
ynegfinalpred: int
yfinaltrain: int
yfinaltest: int
yfinalpred: int
postrainsamples: int
negtrainsamples: int
postestsamples: int
negtestsamples: int
tp: int
tn: int
fp: int
fn: int
bestfeatureidx: int
bestvariableidx: int
featurerank: str # em ordem decrescente, tem o IDX da feature
rankfeature: str # em ordem das features, tem o RANK de cada uma
def __post_init__(self):
pass
P = TypeVar("T")
def humantime(*args, **kwargs):
"""
Return time (duration) in human readable format.
>>> humantime(seconds=3411)
56 minutes, 51 seconds
>>> humantime(seconds=800000)
9 days, 6 hours, 13 minutes, 20 seconds
"""
secs = float(timedelta(*args, **kwargs).total_seconds())
units = [("day", 86400), ("hour", 3600), ("minute", 60), ("second", 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
# n = secs if secs != int(secs) else int(secs)
n = int(secs) if secs != int(secs) else int(secs)
parts.append("%s %s%s" % (n, unit, "" if n == 1 else "s"))
return ", ".join(parts)
def get_md5(params):
import hashlib
experiment = f'nr{params.nrounds}_nf{params.nfolds}_w{params.windowsize}_s{params.stepsize}'.encode('utf-8')
return hashlib.md5(experiment).hexdigest()
def loggerthread(q):
"""
Main process thread receiver (handler) for log records.
"""
while True:
record = q.get()
if record is None:
break
logger = logging.getLogger(record.name)
logger.handle(record)
def one_hot(array, num_classes):
return np.squeeze(np.eye(num_classes)[array.reshape(-1)])
def one_hot_inverse(array):
return np.argmax(array, axis=1)
def readdir(path) -> Dict[str, List[Tuple[np.ndarray, str]]]:
"""
Read the CSV content of a directory into a list of numpy arrays.
The return type is actually a dict with the "class" as key.
"""
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
r = []
class_ = path[-1]
with os.scandir(path) as it:
for entry in it:
if not entry.name.startswith(".") and entry.is_file():
frame = pd.read_csv(entry, sep=",", header=0, names=columns)
# str timestamp to float
frame["timestamp"] = np.array(
[
pd.to_datetime(d).to_pydatetime().timestamp()
for d in frame.loc[:, "timestamp"]
],
dtype=np.float64,
)
# cast int to float
frame["class"] = frame["class"].astype(np.float64)
# remember that scikit has problems with float64
array = frame.loc[:, columns].to_numpy()
r.append((array, entry.name))
rd = {}
rd[class_] = r
return rd
def get_logging_config():
return {
"version": 1,
"formatters": {
"detailed": {
"class": "logging.Formatter",
"format": (
"%(asctime)s %(name)-12s %(levelname)-8s %(processName)-10s "
"%(module)-12s %(funcName)-15s %(message)s"
),
}
},
"handlers": {
"console": {"class": "logging.StreamHandler", "level": "INFO",},
"file": {
"class": "logging.FileHandler",
"filename": "experiment1a.log",
"mode": "w",
"formatter": "detailed",
},
"errors": {
"class": "logging.FileHandler",
"filename": "experiment1a_errors.log",
"mode": "w",
"level": "ERROR",
"formatter": "detailed",
},
},
"root": {"level": "DEBUG", "handlers": ["console", "file", "errors"]},
}
def readdirparallel(path):
"""
Read all CSV content of a directory in parallel.
"""
njobs = psutil.cpu_count()
results = []
# with Pool(processes=njobs) as p:
with ThreadPoolExecutor(max_workers=njobs) as p:
# with ProcessPoolExecutor(max_workers=njobs) as p:
# results = p.starmap(
results = p.map(
readdir, [os.path.join(path, str(c)) for c in [0, 1, 2, 3, 4, 5, 6, 7, 8]],
)
return results
def csv2bin(*args, **kwargs) -> None:
"""
Read 3W dataset CSV files and save in a single numpy binary file.
"""
raise Exception("not implemented")
def csv2hdf(*args, **kwargs) -> None:
"""
Read 3W dataset CSV files and save in a single HDF5 file.
"""
path: str = kwargs.get("path")
useclasses = [0, 1, 2, 3, 4, 5, 6, 7, 8]
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
print("read CSV and save HDF5 ...", end="", flush=True)
t0 = time.time()
with h5py.File("datasets.h5", "w") as f:
for c in useclasses:
grp = f.create_group(f"/{c}")
with os.scandir(os.path.join(path, str(c))) as it:
for entry in it:
if not entry.name.startswith(".") and entry.is_file() and 'WELL' in entry.name:
frame = pd.read_csv(entry, sep=",", header=0, names=columns)
# str timestamp to float
frame["timestamp"] = np.array(
[
pd.to_datetime(d).to_pydatetime().timestamp()
for d in frame.loc[:, "timestamp"]
],
dtype=np.float64,
)
# cast int to float
frame["class"] = frame["class"].astype(np.float64)
# remember that scikit has problems with float64
array = frame.loc[:, columns].to_numpy()
# entire dataset is float, incluinding timestamp & class labels
grp.create_dataset(
f"{entry.name}", data=array, dtype=np.float64
)
print(f"finished in {time.time()-t0:.1}s.")
def csv2hdfpar(*args, **kwargs) -> None:
"""
Read 3W dataset CSV files and save in a single HDF5 file.
"""
path: str = kwargs.get("path")
print("read CSV and save HDF5 ...", end="", flush=True)
t0 = time.time()
with h5py.File("datasets.h5", "w") as f:
datalist = readdirparallel(path)
for dd in datalist:
for key in dd:
grp = f.create_group(f"/{key}")
for (array, name) in dd[key]:
grp.create_dataset(f"{name}", data=array, dtype=np.float64)
print(
f"finished {humanize.naturalsize(os.stat('datasets.h5').st_size)} "
f"in {humantime(seconds=time.time()-t0)}."
)
def cleandataset(*args, **kwargs) -> None:
"""
Read the the single file (with whole dataset), remove NaN and save 1 file per class.
"""
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
print("Reading dataset...")
with h5py.File("datasets.h5", "r") as f:
for c in range(0, 9):
print(f"Processing class {c}")
k = f"/{c}"
soma = 0
for s in f[k]:
n = f[k][s].shape[0]
soma = soma + n
data = np.zeros([soma, 10], dtype=np.float64)
i1 = 0
# manual concatenation
for s in f[k]:
i2 = i1 + f[k][s].shape[0]
data[i1:i2, :] = f[k][s][()]
i1 = i2
frame = pd.DataFrame(data=data, columns=columns)
for col in ["P-PDG", "P-TPT", "T-TPT", "P-MON-CKP", "T-JUS-CKP"]:
frame[col].fillna(method="ffill", axis=0, inplace=True)
fp = np.memmap(
f"datasets_clean_{c}.dat", dtype="float64", mode="w+", shape=frame.shape
)
fp[:, ...] = frame.to_numpy()
del fp
print("finished")
def cleandataseth5(*args, **kwargs) -> None:
"""
Read the the single file (with whole dataset), remove NaN and save 1 file per class.
"""
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
logger = logging.getLogger(f"clean")
formatter = logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(lineno)-5d %(funcName)-10s %(module)-10s %(message)s"
)
fh = logging.FileHandler(f"experiments_clean.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
usecols = [1, 2, 3, 4, 5]
good = [columns[i] for i, _ in enumerate(columns) if i in usecols]
with h5py.File("datasets.h5", "r") as f:
logger.debug("reading input file")
with h5py.File("datasets_clean.h5", "w") as fc:
logger.debug("created output file")
for c in range(0, 9):
grp = fc.create_group(f"/{c}")
logger.debug(f"Processing class {c}")
k = f"/{c}"
for s in f[k]:
if s[0] != "W":
continue
logger.debug(f"{c} {s}")
data = f[k][s][()]
frame = pd.DataFrame(data=data, columns=columns)
frame.dropna(inplace=True, how="any", subset=good, axis=0)
array = frame.to_numpy()
n = check_nan(array[:, [1, 2, 3, 4, 5]], logger)
if n > 0:
logger.info(f"{c} {s} dataset contains NaN")
grp.create_dataset(f"{s}", data=array, dtype=np.float64)
return None
def check_nan(array, logger) -> None:
"""
Check array for inf, nan of null values.
"""
logger.debug("*" * 50)
n = 0
test = array[array > np.finfo(np.float32).max]
logger.debug(f"test for numpy float32overflow {test.shape}")
n = n + test.shape[0]
test = array[~np.isfinite(array)]
logger.debug(f"test for numpy non finite {test.shape}")
n = n + test.shape[0]
test = array[np.isinf(array)]
logger.debug(f"test for numpy inf {test.shape}")
n = n + test.shape[0]
test = array[np.isnan(array)]
logger.debug(f"test for numpy NaN {test.shape}")
n = n + test.shape[0]
test = array[pd.isna(array)]
logger.debug(f"test for pandas NA {test.shape}")
n = n + test.shape[0]
test = array[pd.isnull(array)]
logger.debug(f"test for pandas isnull {test.shape}")
n = n + test.shape[0]
logger.debug("*" * 50)
return n
def get_config_combination_list(settings, default=None) -> List:
"""
Given a list of hyperparameters return all combinations of that.
"""
keys = list(settings)
r = []
for values in itertools.product(*map(settings.get, keys)):
d = dict(zip(keys, values))
if default is not None:
d.update(default)
r.append(d)
return r
def get_classifiers(clflist, n_jobs=1, default=False) -> Dict:
"""
Classifiers and combinations of hyperparameters.
"""
classifiers = OrderedDict()
classifiers["ADA"] = {
"config": get_config_combination_list(
{
"n_estimators": [5, 25, 50, 75, 100, 250, 500, 1000],
"algorithm": ["SAMME", "SAMME.R"],
},
{
"random_state": None
},
),
"default": {"random_state": None},
"model": AdaBoostClassifier,
}
classifiers["DT"] = {
"config": get_config_combination_list(
{
"criterion": ["gini", "entropy"],
"splitter": ["best", "random"],
"max_depth": [None, 5, 10, 50],
"min_samples_split": [2, 5, 10],
},
{"random_state": None},
),
"default": {"random_state": None},
"model": DecisionTreeClassifier,
}
classifiers["GBOOST"] = {
"config": get_config_combination_list(
{
"n_estimators": [50, 100, 250],
"min_samples_split": [2, 5, 10],
"max_depth": [5, 10, 50],
},
{"random_state": None},
),
"default": {"random_state": None},
"model": GradientBoostingClassifier,
}
classifiers["1NN"] = {
"config": [],
"default": {
"n_neighbors": 1,
"weights": "distance",
"algorithm": "auto",
"leaf_size": 30,
"p": 2,
"n_jobs": 1,
},
"model": KNeighborsClassifier,
}
classifiers["5NN"] = {
"config": [],
"default": {
"n_neighbors": 5,
"weights": "distance",
"algorithm": "auto",
"leaf_size": 30,
"p": 2,
"n_jobs": 1,
},
"model": KNeighborsClassifier,
}
classifiers["3NN"] = {
"config": [],
"default": {
"n_neighbors": 3,
"weights": "distance",
"algorithm": "auto",
"leaf_size": 30,
"p": 2,
"n_jobs": 1,
},
"model": KNeighborsClassifier,
}
classifiers["KNN"] = {
"config": get_config_combination_list(
{
"n_neighbors": [1, 3, 5, 7, 10, 15],
},
{"n_jobs": n_jobs}
),
"default": {"n_jobs": n_jobs},
"model": KNeighborsClassifier,
}
classifiers["RF"] = {
"config": get_config_combination_list(
{
'bootstrap': [True],
"criterion": ["gini"],
'max_features': ['auto'],
"max_depth": [None, 5, 10, 50],
"min_samples_split": [2],
'min_samples_leaf': [1],
"n_estimators": [10, 25, 50, 100, 500],
},
{"n_jobs": n_jobs, "random_state": None},
),
"default": {"random_state": None},
"model": RandomForestClassifier,
}
classifiers["SVM"] = {
"config": get_config_combination_list({
#"kernel": ["linear", "poly", "rbf", "sigmoid", "precomputed"],
"kernel": ["linear", "poly", "rbf", "sigmoid"],
#"gamma": ["scale", "auto"],
"gamma": [0.001, 0.01, 0.1, 1.0],
#"C": [1.0, 2.0],
"C": [1.0],
}),
"default": {},
"model": SVC,
}
classifiers["GNB"] = {
"config": [],
"default": {},
"model": "GaussianNB",
}
classifiers["LDA"] = {
"config": [],
"default": {},
"model": "LinearDiscriminantAnalysis",
}
classifiers["QDA"] = {
"config": [],
"default": {},
"model": "QuadraticDiscriminantAnalysis"
}
classifiers["MLP"] = {
"config": get_config_combination_list({
"hidden_layer_sizes": [128, 256, 512],
"solver": ["lbfgs", "sgd", "adam"],
"activation": ["relu"],
"max_iter": [500],
"shuffle": [True],
"momentum": [0.9],
"power_t": [0.5],
"learning_rate": ["constant"],
"batch_size": ["auto"],
"alpha": [0.0001],
}),
"default": {"random_state": None},
"model": MLPClassifier,
}
classifiers["ZERORULE"] = {
"config": get_config_combination_list(
{
"strategy": [
"constant"
],
},
{"random_state": None},
),
"default": {"strategy": "most_frequent", "random_state": None},
"model": DummyClassifier,
}
if isinstance(clflist, str):
if not clflist or clflist.lower() != "all":
clflist = clflist.split(",")
elif isinstance(clflist, tuple):
clflist = list(clflist)
if default:
for c in classifiers.keys():
"""
if c[:5] != "DUMMY":
classifiers[c]['config'] = {}
else:
del classifiers[c]
"""
classifiers[c]["config"] = {}
return classifiers
else:
ret = {}
for c in clflist:
if c in classifiers.keys():
ret[c] = classifiers[c]
return ret
def _split_data(n: int, folds: int) -> Sequence[Tuple[int, int]]:
"""
Return list of tuples with array index for each fold.
"""
raise Exception("depends on which experiment")
def _read_and_split_h5(fold: int, params: P) -> Tuple:
"""
HDF files offer at least a couple advantages:
1 - reading is faster than CSV
2 - you dont have to read the whole dataset to get its size (shape)
H5PY fancy indexing is very slow.
https://github.com/h5py/h5py/issues/413
"""
raise Exception("depends on which experiment")
def _read_and_split_bin(fold: int, params: P) -> Tuple:
"""
HDF files offer at least a couple advantages:
1 - reading is faster than CSV
2 - you dont have to read the whole dataset to get its size (shape)
Numpy needs to know 'shape' beforehand.
https://numpy.org/doc/stable/reference/generated/numpy.memmap.html
"""
raise Exception("depends on which experiment")
def train_test_binary(
classif, xtrain, ytrain, xtest, ytest
) -> Tuple[Tuple, Tuple, Tuple, Tuple]:
"""
Execute training and testing (binary) and return metrics (F1, Accuracy, CM)
"""
p = 0.0
r = 0.0
f1binp, f1binn = 0.0, 0.0
f1mic = 0.0
f1mac = 0.0
f1weigh = 0.0
f1sam = 0.0
acc = 0.0
excp = []
excptb = []
ypred = np.full([xtrain.shape[0]], 0, dtype="int")
ynpred = 0
yppred = 0
tn = 0
tp = 0
fp = 0
fn = 0
trt1 = 0.0
try:
trt0 = time.time()
classif.fit(xtrain, ytrain)
trt1 = time.time() - trt0
try:
ypred = classif.predict(xtest)
yppred = np.sum(ypred)
ynpred = ytest.shape[0] - yppred
try:
p, r, f1binp, _ = precision_recall_fscore_support(
ytest, ypred, average="binary", pos_label=1,
)
_, _, f1binn, _ = precision_recall_fscore_support(
ytest, ypred, average="binary", pos_label=0,
)
except Exception as ef1bin:
excp.append(ef1bin)
try:
f1mic = f1_score(ytest, ypred, average="micro")
except Exception as e2:
excp.append(e2)
try:
f1mac = f1_score(ytest, ypred, average="macro")
except Exception as e3:
excp.append(e3)
try:
f1weigh = f1_score(ytest, ypred, average="weighted")
except Exception as e4:
excp.append(e4)
try:
acc = accuracy_score(ytest, ypred)
except Exception as e_acc:
excp.append(e_acc)
try:
tn, fp, fn, tp = confusion_matrix(
ytest, ypred, labels=[0, 1], sample_weight=None, normalize=None,
).ravel()
except Exception as ecm:
excp.append(ecm)
except Exception as e_pred:
excp.append(e_pred)
raise e_pred
except Exception as efit:
excp.append(efit)
einfo = sys.exc_info()
excptb.append(einfo[2])
raise efit
return (
(ypred, ynpred, yppred, trt1),
(f1binp, f1binn, f1mic, f1mac, f1weigh, f1sam, p, r, acc),
(tn, fp, fn, tp),
tuple(excp),
)
def vertical_split_bin(negative, positive):
x = np.concatenate([negative, positive], axis=0).astype(np.float32)
y = np.concatenate(
[
np.zeros(negative.shape[0], dtype=np.int32),
np.ones(positive.shape[0], dtype=np.int32),
],
axis=0,
)
return x, y
def horizontal_split_file(fold, nfolds, files, seed):
count = 0
samples = []
sessions = []
for s in files:
if s[0] != "W":
continue
n = files[s].shape[0]
count += 1
samples.append(n)
sessions.append(s)
samples = np.array(samples)
nf = int(count / nfolds)
testfidx = np.reshape(np.arange(nf * nfolds), (5, -1)).T
testsize = sum(samples[testfidx[:, fold]])
test = np.zeros((testsize, 10), dtype=np.float64)
stest = [sessions[i] for i in testfidx[:, fold]]
i1 = 0
i2 = 0
for s in stest:
i2 = i1 + files[s].shape[0]
test[i1:i2, :] = files[s][()]
i1 = i2
print(f"fold {fold} ate {i2}")
nstp = 0
for s in sessions:
if s in stest:
continue
nstp += files[s].shape[0]
train = np.zeros((nstp, 10), dtype=np.float64)
i1 = 0
i2 = 0
for k, s in enumerate(sessions):
if s in stest:
continue
n = files[s].shape[0]
i2 = i1 + n
train[i1:i2, :] = files[s][()]
i1 = i2
return train, test
def horizontal_split_well(fold, nfolds, file, seed=None):
wellstest = [1, 2, 4, 5, 7]
welltest = wellstest[fold]
count = 0
wells = {}
stest = []
for s in file:
if s[0] != "W":
continue
n = file[s].shape[0]
count += 1
welli = int(str(s[6:10]))
if welli not in wells:
wells[welli] = 0
wells[welli] += n
if welli == welltest:
stest.append(s)
if wellstest[fold] in wells:
ntest = wells[wellstest[fold]]
test = np.zeros((ntest, 10), dtype=np.float64)
i1 = 0
i2 = 0
for s in stest:
if s[0] != "W":
continue
i2 = i1 + file[s].shape[0]
test[i1:i2, :] = file[s][()]
i1 = i2
else:
print("data for this fault and well not available")
test = np.empty((0, 10), dtype=np.float64)
ntrain = sum(wells[k] for k in wells if k != welltest)
train = np.zeros((ntrain, 10), dtype=np.float64)
i1 = 0
i2 = 0
for s in file:
if s[0] != "W":
continue
if s in stest:
continue
i2 = i1 + file[s].shape[0]
train[i1:i2, :] = file[s][()]
i1 = i2
# print('well', s, i1, i2, ntrain)
return train, test
def drop_nan(*args):
for a in args:
mask = np.any(
np.isnan(a)
# | (trainnegative > np.finfo(np.float32).max)
| np.isinf(a) | ~np.isfinite(a),
axis=1,
)
a = a[~mask]
return args
def get_mask(*args):
m = []
for a in args:
mask = np.any(
np.isnan(a)
# | (trainnegative > np.finfo(np.float32).max)
| np.isinf(a) | ~np.isfinite(a),
axis=1,
)
m.append(mask)
return m
def split_and_save1(params, case, group, classes):
"""
"""
win = params.windowsize
step = params.stepsize
#filename = get_md5(params)
with h5py.File(f"datasets_clean.h5", "r") as file:
n = 0
skipped = 0
for c in classes:
f = file[f"/{c}"]
for s in f:
if s[0] != "W":
continue
if len(params.skipwell) > 0:
# skip well by ID
if s[:10] in params.skipwell:
skipped += f[s].shape[0]
continue
n += f[s].shape[0]
data = np.zeros([n, 10], dtype=np.float64)
test = np.zeros((skipped, 10), dtype=np.float64)
for c in classes:
f = file[f"/{c}"]
for s in f:
i1, i2 = 0, 0
j1, j2 = 0, 0
for s in f:
if s[0] != "W":
continue
if len(params.skipwell) > 0:
if s[:10] in params.skipwell:
j2 = j1 + f[s].shape[0]
test[j1:j2, :] = f[s][()]
j1 = j2
continue
i2 = i1 + f[s].shape[0]
data[i1:i2, :] = f[s][()]
i1 = i2
xdata = swfe(params.windowsize, n, params.stepsize, data[:, params.usecols],)
tdata = swfe(
params.windowsize, skipped, params.stepsize, test[:, params.usecols],
)
if group == "pos":
ydata = np.ones(xdata.shape[0], dtype=np.float64)
elif group == "neg":
ydata = np.zeros(xdata.shape[0], dtype=np.float64)
with h5py.File(f"datasets_folds_exp{case}.h5", "a") as ffolds:
for round_ in range(1, params.nrounds + 1):
if params.shuffle:
kf = KFold(
n_splits=params.nfolds, random_state=round_, shuffle=True
)
else:
kf = KFold(n_splits=params.nfolds, random_state=None, shuffle=False)
for fold, (train_index, test_index) in enumerate(kf.split(xdata)):
gk = f"/case{case}_{group}_r{round_}_nf{params.nfolds}_f{fold}_w{win}_s{step}"
if gk in ffolds:
del ffolds[gk]
grp = ffolds.create_group(gk)
xtrain, ytrain = xdata[train_index], ydata[train_index]
xtest, ytest = xdata[test_index], ydata[test_index]
print(
gk,
"original data shape",
data.shape,
"final",
xdata.shape,
"xtrain",
xtrain.shape,
"xtest",
xtest.shape,
)
grp.create_dataset(f"xtrain", data=xtrain, dtype=np.float64)
grp.create_dataset(f"ytrain", data=ytrain, dtype=np.float64)
grp.create_dataset(f"xvalid", data=xtest, dtype=np.float64)
grp.create_dataset(f"yvalid", data=ytest, dtype=np.float64)
if tdata.shape[0] > 0:
gkt = f"/case{case}_{group}_r{round_}_nf{params.nfolds}_f-test_w{win}_s{step}"
if gkt in ffolds:
del ffolds[gkt]
grpt = ffolds.create_group(gkt)
grpt.create_dataset(f"xtest", data=tdata, dtype=np.float64)
def split_and_save2(params, case, group, classes):
win = params.windowsize
step = params.stepsize
nfolds = params.nfolds
filename = get_md5(params)
with h5py.File(f"datasets_clean.h5", "r") as file:
with h5py.File(f"datasets_folds_exp{case}.h5", "a") as ffolds:
for round_ in range(1, params.nrounds + 1):
samples = []
sessions = []
for class_ in classes:
files = file[f"/{class_}"]
for s in files:
if s[0] != "W":
continue
n = files[s].shape[0]
samples.append(n)
sessions.append(f"/{class_}/{s}")
count = len(samples)
samples = np.array(samples)
nf = int(count / nfolds)
# random
if params.shuffle:
testfidx = np.random.RandomState(round_).choice(
range(0, count), size=(nf, params.nfolds), replace=False,
)
else:
# sequence
testfidx = np.reshape(np.arange(nf * nfolds), (5, -1)).T
for fold in range(0, params.nfolds):
gk = f"/case{case}_{group}_r{round_}_nf{params.nfolds}_f{fold}_w{win}_s{step}"
testsize = sum(samples[testfidx[:, fold]])
test = np.zeros((testsize, 10), dtype=np.float64)
stest = [sessions[i] for i in testfidx[:, fold]]
i1, i2 = 0, 0
#for class_ in classes:
# files = file[f"/{class_}"]
for s in stest:
i2 = i1 + file[s].shape[0]
test[i1:i2, :] = file[s][()]
i1 = i2
# print(s)
# print(f'fold {fold} ate {i2}')
nstp = 0
for s in sessions:
if s in stest:
continue
nstp += file[s].shape[0]
train = np.zeros((nstp, 10), dtype=np.float64)
i1, i2 = 0, 0
for s in sessions:
if s in stest:
continue
i2 = i1 + file[s].shape[0]
train[i1:i2, :] = file[s][()]
i1 = i2
xtrain = swfe(
params.windowsize,
nstp,
params.stepsize,
train[:, params.usecols],
)
if classes == params.negative:
ytrain = np.zeros(xtrain.shape[0], dtype=np.float64)
else:
ytrain = np.ones(xtrain.shape[0], dtype=np.float64)
xtest = swfe(
params.windowsize,
testsize,
params.stepsize,
test[:, params.usecols],
)
if classes == params.negative:
ytest = np.zeros(xtest.shape[0], dtype=np.float64)
else:
ytest = np.ones(xtest.shape[0], dtype=np.float64)
if gk in ffolds:
del ffolds[gk]
grp = ffolds.create_group(gk)
print(
gk,
"original data shape",
np.sum(samples),
"train",
train.shape,
"test",
test.shape,
"xtrain",
xtrain.shape,
"xtest",
xtest.shape,
)
grp.create_dataset(f"xtrain", data=xtrain, dtype=np.float64)
grp.create_dataset(f"ytrain", data=ytrain, dtype=np.float64)
grp.create_dataset(f"xvalid", data=xtest, dtype=np.float64)
grp.create_dataset(f"yvalid", data=ytest, dtype=np.float64)
def split_and_save3(params, case, group, classes):
win = params.windowsize
step = params.stepsize
wellstest = [1, 2, 4, 5, 7]
filename = get_md5(params)
with h5py.File(f"datasets_clean.h5", "r") as clean:
with h5py.File(f"datasets_folds_exp{case}.h5", "a") as ffolds:
for round_ in range(1, params.nrounds + 1):
for fold in range(0, params.nfolds):
gk = f"/case{case}_{group}_r{round_}_nf{params.nfolds}_f{fold}_w{win}_s{step}"
welltest = wellstest[fold]
count = 0
wells = {}
strain = []
stest = []
n = 0
for class_ in classes:
files = clean[f"/{class_}"]
for s in files:
if s[0] != "W":
continue
count += 1
welli = int(str(s[6:10]))
if welli not in wells:
wells[welli] = 0
wells[welli] += files[s].shape[0]
if welli == welltest:
stest.append(f"/{class_}/{s}")
else:
strain.append(f"/{class_}/{s}")
ntrain = sum(wells[k] for k in wells if k != welltest)
train = np.zeros((ntrain, 10), dtype=np.float64)
if wellstest[fold] in wells:
ntest = wells[wellstest[fold]]
test = np.zeros((ntest, 10), dtype=np.float64)
i1, i2 = 0, 0
for s in stest:
i2 = i1 + clean[s].shape[0]
test[i1:i2, :] = clean[s][()]
i1 = i2
else:
print("data for this fault and well not available")
test = np.empty((0, 10), dtype=np.float64)
i1, i2 = 0, 0
for s in strain:
i2 = i1 + clean[s].shape[0]
train[i1:i2, :] = clean[s][()]
i1 = i2
xtrain = swfe(
params.windowsize,
ntrain,
params.stepsize,
train[:, params.usecols],
)
if classes == params.negative:
ytrain = np.zeros(xtrain.shape[0], dtype=np.float64)
else:
ytrain = np.ones(xtrain.shape[0], dtype=np.float64)
xtest = swfe(
params.windowsize,
ntest,
params.stepsize,
test[:, params.usecols],
)
if classes == params.negative:
ytest = np.zeros(xtest.shape[0], dtype=np.float64)
else:
ytest = np.ones(xtest.shape[0], dtype=np.float64)
if params.shuffle:
xtrain, ytrain = resample(xtrain, ytrain, random_state=round_, replace=False)
xtest, ytest = resample(xtest, ytest, random_state=round_, replace=False)
if gk in ffolds:
del ffolds[gk]
grp = ffolds.create_group(gk)
print(gk, "xtrain", xtrain.shape, "xtest", xtest.shape)
grp.create_dataset(f"xtrain", data=xtrain, dtype=np.float64)
grp.create_dataset(f"ytrain", data=ytrain, dtype=np.float64)
grp.create_dataset(f"xvalid", data=xtest, dtype=np.float64)
grp.create_dataset(f"yvalid", data=ytest, dtype=np.float64)
def foldfn(round_: int, fold: int, params: P) -> List[Dict]:
"""
Run one fold.
It can be executed in parallel.
"""
logging.captureWarnings(True)
logger = logging.getLogger(f"fold{fold}")
formatter = logging.Formatter(params.logformat)
fh = logging.FileHandler(f"{params.experiment}_fold{fold}.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
logger.debug(f"round {round_}")
logger.debug(f"fold {fold}")
# 0 => timestamp
# 1 "P-PDG",
# 2 "P-TPT",
# 3 "T-TPT",
# 4 "P-MON-CKP",
# 5 "T-JUS-CKP",
# 6 "P-JUS-CKGL",
# 7 "T-JUS-CKGL",
# 8 "QGL",
# 9 => class label
# 6, 7, 8 are gas lift related
# usecols = [1, 2, 3, 4, 5]
# usecols = params.usecols
classifiers = get_classifiers(params.classifierstr)
try:
# ==============================================================================
# Read data from disk and split in folds
# ==============================================================================
logger.debug(f"read and split data in folds")
try:
xtrainneg, xtrainpos, xtestneg, xtestpos, xfn, xfp = params.read_and_split(
fold, round_, params
)
except Exception as e000:
print(e000)
raise e000
x_outter_train, y_outter_train = vertical_split_bin(xtrainneg, xtrainpos)
x_outter_test, y_outter_test = vertical_split_bin(xtestneg, xtestpos)
if len(params.usecols) > 0:
usecols = []
for c in params.usecols:
for ck in range((c - 1) * params.nfeaturesvar, c * params.nfeaturesvar):
usecols.append(ck)
print('use measured variables', str(params.usecols), ' keep features ', str(usecols))
logger.info('use measured variables' + str(params.usecols) + ' keep features ' + str(usecols))
x_outter_train = x_outter_train[:, usecols]
x_outter_test = x_outter_test[:, usecols]
logger.debug(f"train neg={str(xtrainneg.shape)} pos={str(xtrainpos.shape)}")
logger.debug(f"test neg={str(xtestneg.shape)} pos={str(xtestpos.shape)}")
if 0 in xtestpos.shape or 0 in xtestneg.shape:
breakpoint()
raise Exception("dimension zero")
if xtestpos.shape[0] > xtestneg.shape[0]:
#
# print('Binary problem with unbalanced classes: NEG is not > POS')
logger.warn("Binary problem with unbalanced classes: NEG is not > POS")
# raise Exception()
logger.debug(
f"shapes train={str(x_outter_train.shape)} test={str(x_outter_test.shape)}"
)
# ==============================================================================
# After feature extraction, some NaN appear again in the arrays
# ==============================================================================
logger.debug(f"check NaN #1")
mask = np.any(
np.isnan(x_outter_train)
| (x_outter_train > np.finfo(np.float32).max)
| | np.isinf(x_outter_train) | numpy.isinf |
"""
Nothing here but dictionaries for generating LinearSegmentedColormaps,
and a dictionary of these dictionaries.
Documentation for each is in pyplot.colormaps()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
_binary_data = {
'red': ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue': ((0., 1., 1.), (1., 0., 0.))
}
_autumn_data = {'red': ((0., 1.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.), (1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),
(0.746032, 0.652778, 0.652778),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),
(0.365079, 0.444444, 0.444444),
(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),
(0.809524, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),
(1.0, 0.4975, 0.4975))}
_flag_data = {
'red': lambda x: 0.75 * np.sin((x * 31.5 + 0.25) * np.pi) + 0.5,
'green': lambda x: np.sin(x * 31.5 * np.pi),
'blue': lambda x: 0.75 * np.sin((x * 31.5 - 0.25) * np.pi) + 0.5,
}
_prism_data = {
'red': lambda x: 0.75 * np.sin((x * 20.9 + 0.25) * np.pi) + 0.67,
'green': lambda x: 0.75 * np.sin((x * 20.9 - 0.25) * np.pi) + 0.33,
'blue': lambda x: -1.1 * np.sin((x * 20.9) * np.pi),
}
def cubehelix(gamma=1.0, s=0.5, r=-1.5, h=1.0):
"""Return custom data dictionary of (r,g,b) conversion functions, which
can be used with :func:`register_cmap`, for the cubehelix color scheme.
Unlike most other color schemes cubehelix was designed by D.A. Green to
be monotonically increasing in terms of perceived brightness.
Also, when printed on a black and white postscript printer, the scheme
results in a greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b values produced
can be visualised as a squashed helix around the diagonal in the
r,g,b color cube.
For a unit color cube (i.e. 3-D coordinates for r,g,b each in the
range 0 to 1) the color scheme starts at (r,g,b) = (0,0,0), i.e. black,
and finishes at (r,g,b) = (1,1,1), i.e. white. For some fraction *x*,
between 0 and 1, the color is the corresponding grey value at that
fraction along the black to white diagonal (x,x,x) plus a color
element. This color element is calculated in a plane of constant
perceived intensity and controlled by the following parameters.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
gamma gamma factor to emphasise either low intensity values
(gamma < 1), or high intensity values (gamma > 1);
defaults to 1.0.
s the start color; defaults to 0.5 (i.e. purple).
r the number of r,g,b rotations in color that are made
from the start to the end of the color scheme; defaults
to -1.5 (i.e. -> B -> G -> R -> B).
h the hue parameter which controls how saturated the
colors are. If this parameter is zero then the color
scheme is purely a greyscale; defaults to 1.0.
========= =======================================================
"""
def get_color_function(p0, p1):
def color(x):
# Apply gamma factor to emphasise low or high intensity values
xg = x ** gamma
# Calculate amplitude and angle of deviation from the black
# to white diagonal in the plane of constant
# perceived intensity.
a = h * xg * (1 - xg) / 2
phi = 2 * np.pi * (s / 3 + r * x)
return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return color
return {
'red': get_color_function(-0.14861, 1.78277),
'green': get_color_function(-0.29227, -0.90649),
'blue': get_color_function(1.97294, 0.0),
}
_cubehelix_data = cubehelix()
_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))
_brg_data = ((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0))
# Gnuplot palette functions
gfunc = {
0: lambda x: 0,
1: lambda x: 0.5,
2: lambda x: 1,
3: lambda x: x,
4: lambda x: x ** 2,
5: lambda x: x ** 3,
6: lambda x: x ** 4,
7: lambda x: np.sqrt(x),
8: lambda x: np.sqrt(np.sqrt(x)),
9: lambda x: np.sin(x * np.pi / 2),
10: lambda x: np.cos(x * np.pi / 2),
11: lambda x: np.abs(x - 0.5),
12: lambda x: (2 * x - 1) ** 2,
13: lambda x: np.sin(x * np.pi),
14: lambda x: np.abs(np.cos(x * np.pi)),
15: lambda x: np.sin(x * 2 * np.pi),
16: lambda x: np.cos(x * 2 * np.pi),
17: lambda x: np.abs(np.sin(x * 2 * np.pi)),
18: lambda x: np.abs( | np.cos(x * 2 * np.pi) | numpy.cos |
import warnings
import numpy as np
import vtk
import vedo
import vedo.addons as addons
import vedo.colors as colors
import vedo.shapes as shapes
import vedo.utils as utils
from vedo.assembly import Assembly
from vedo.mesh import merge
from vedo.mesh import Mesh
__doc__ = """
.. image:: https://vedo.embl.es/images/pyplot/fitPolynomial2.png
Advanced plotting utility functions
"""
__all__ = [
"Figure",
#"Histogram1D", # uncomment to generate docs
#"Histogram2D",
#"PlotXY",
#"PlotBars",
"plot",
"histogram",
"fit",
"donut",
"violin",
"whisker",
"streamplot",
"matrix",
"DirectedGraph",
]
##########################################################################
class LabelData(object):
def __init__(self):
self.text = 'dataset'
self.tcolor = 'black'
self.marker = 's'
self.mcolor = 'black'
##########################################################################
class Figure(Assembly):
"""
Parameters
----------
xlim : list
range of the x-axis as [x0, x1]
ylim : list
range of the y-axis as [y0, y1]
aspect : float, str
the desired aspect ratio of the histogram. Default is 4/3.
Use `aspect="equal"` to force the same units in x and y.
padding : float, list
keep a padding space from the axes (as a fraction of the axis size).
This can be a list of four numbers.
xtitle : str
title for the x-axis, can also be set using `axes=dict(xtitle="my x axis")`
ytitle : str
title for the y-axis, can also be set using `axes=dict(ytitle="my y axis")`
grid : bool
show the backgound grid for the axes, can also be set using `axes=dict(xyGrid=True)`
axes : dict
an extra dictionary of options for the axes
"""
def __init__(
self,
xlim,
ylim,
aspect=4/3,
padding=[0.05, 0.05, 0.05, 0.05],
**kwargs,
):
self.xlim = np.array(xlim)
self.ylim = np.array(ylim)
self.aspect = aspect
self.padding = padding
if not utils.isSequence(self.padding):
self.padding = [self.padding, self.padding,self.padding, self.padding]
self.force_scaling_types = (
shapes.Glyph,
shapes.Line,
shapes.Rectangle,
shapes.DashedLine,
shapes.Tube,
shapes.Ribbon,
shapes.GeoCircle,
shapes.Arc,
shapes.Grid,
# shapes.Arrows, # todo
# shapes.Arrows2D, # todo
shapes.Brace, # todo
)
options = dict(kwargs)
self.title = options.pop("title", "")
self.xtitle = options.pop("xtitle", " ")
self.ytitle = options.pop("ytitle", " ")
numberOfDivisions = 6
self.legend = None
self.labels = []
self.label = options.pop("label", None)
if self.label:
self.labels = [self.label]
self.axopts = options.pop("axes", {})
if isinstance(self.axopts, (bool,int,float)):
if self.axopts:
self.axopts = {}
if self.axopts or isinstance(self.axopts, dict):
numberOfDivisions = self.axopts.pop('numberOfDivisions', numberOfDivisions)
self.axopts['xtitle'] = self.xtitle
self.axopts['ytitle'] = self.ytitle
if 'xyGrid' not in self.axopts: ## modify the default
self.axopts['xyGrid'] = options.pop("grid", False)
if 'xyGridTransparent' not in self.axopts: ## modify the default
self.axopts['xyGridTransparent'] = True
if 'xTitlePosition' not in self.axopts: ## modify the default
self.axopts['xTitlePosition'] = 0.5
self.axopts['xTitleJustify'] = "top-center"
if 'yTitlePosition' not in self.axopts: ## modify the default
self.axopts['yTitlePosition'] = 0.5
self.axopts['yTitleJustify'] = "bottom-center"
if self.label:
if 'c' in self.axopts:
self.label.tcolor = self.axopts['c']
x0, x1 = self.xlim
y0, y1 = self.ylim
dx = x1 - x0
dy = y1 - y0
x0lim, x1lim = (x0-self.padding[0]*dx, x1+self.padding[1]*dx)
y0lim, y1lim = (y0-self.padding[2]*dy, y1+self.padding[3]*dy)
dy = y1lim - y0lim
self.axes = None
if xlim[0] >= xlim[1] or ylim[0] >= ylim[1]:
vedo.logger.warning(f"Null range for Figure {self.title}... returning an empty Assembly.")
Assembly.__init__(self)
self.yscale = 0
return
if aspect == "equal":
self.aspect = dx / dy # so that yscale becomes 1
self.yscale = dx / dy / self.aspect
y0lim *= self.yscale
y1lim *= self.yscale
self.x0lim = x0lim
self.x1lim = x1lim
self.y0lim = y0lim
self.y1lim = y1lim
self.ztolerance = options.pop("ztolerance", None)
if self.ztolerance is None:
self.ztolerance = dx / 5000
############## create axes
if self.axopts:
axesopts = self.axopts
if self.axopts is True or self.axopts == 1:
axesopts = {}
tp, ts = utils.makeTicks(
y0lim / self.yscale,
y1lim / self.yscale,
numberOfDivisions,
)
labs = []
for i in range(1, len(tp) - 1):
ynew = utils.linInterpolate(tp[i], [0, 1], [y0lim, y1lim])
labs.append([ynew, ts[i]])
if self.title:
axesopts["htitle"] = self.title
axesopts["yValuesAndLabels"] = labs
axesopts["xrange"] = (x0lim, x1lim)
axesopts["yrange"] = (y0lim, y1lim)
axesopts["zrange"] = (0, 0)
axesopts["yUseBounds"] = True
if 'c' not in axesopts and 'ac' in options:
axesopts["c"] = options['ac']
self.axes = addons.Axes(**axesopts)
Assembly.__init__(self, [self.axes])
self.name = "Figure"
return
def __add__(self, *obj):
# just to avoid confusion, superseed Assembly.__add__
return self.__iadd__(*obj)
def __iadd__(self, *obj):
if len(obj) == 1 and isinstance(obj[0], Figure):
return self._check_unpack_and_insert(obj[0])
else:
obj = utils.flatten(obj)
return self.insert(*obj)
def _check_unpack_and_insert(self, fig):
if fig.label:
self.labels.append(fig.label)
if abs(self.yscale - fig.yscale) > 0.0001:
colors.printc("ERROR: adding incompatible Figure. Y-scales are different:",
c='r', invert=True)
colors.printc(" first figure:", self.yscale, c='r')
colors.printc(" second figure:", fig.yscale, c='r')
colors.printc("One or more of these parameters can be the cause:", c='r')
if list(self.xlim) != list(fig.xlim):
colors.printc("xlim --------------------------------------------\n",
" first figure:", self.xlim, "\n",
" second figure:", fig.xlim, c='r')
if list(self.ylim) != list(fig.ylim):
colors.printc("ylim --------------------------------------------\n",
" first figure:", self.ylim, "\n",
" second figure:", fig.ylim, c='r')
if list(self.padding) != list(fig.padding):
colors.printc("padding -----------------------------------------\n",
" first figure:", self.padding,
" second figure:", fig.padding, c='r')
if self.aspect != fig.aspect:
colors.printc("aspect ------------------------------------------\n",
" first figure:", self.aspect,
" second figure:", fig.aspect, c='r')
colors.printc("\n*Consider using fig2 = histogram(..., like=fig1)", c='r')
colors.printc(" Or fig += histogram(..., like=fig)\n", c='r')
return self
offset = self.zbounds()[1] + self.ztolerance
for ele in fig.unpack():
if "Axes" in ele.name:
continue
ele.z(offset)
self.insert(ele, rescale=False)
return self
def insert(self, *objs, rescale=True, as3d=True, adjusted=True, cut=True):
"""
Insert objects into a Figure.
The reccomended syntax is to use "+=", which calls `insert()` under the hood.
If a whole Figure is added with "+=", it is unpacked and its objects are added
one by one.
Parameters
----------
rescale : bool
rescale the y axis position while inserting the object.
as3d : bool
if True keep the aspect ratio of the 3d obect, otherwise stretch it in y.
adjusted : bool
adjust the scaling according to the shortest axis
cut : bool
cut off the parts of the object which go beyond the axes frame.
"""
for a in objs:
if a in self.actors:
# should not add twice the same object in plot
continue
if isinstance(a, vedo.Points): # hacky way to identify Points
if a.NCells()==a.NPoints():
poly = a.polydata(False)
if poly.GetNumberOfPolys()==0 and poly.GetNumberOfLines()==0:
as3d = False
rescale = True
if isinstance(a, (shapes.Arrow, shapes.Arrow2D)):
# discard input Arrow and substitute it with a brand new one
# (because scaling would fatally distort the shape)
prop = a.GetProperty()
prop.LightingOff()
py = a.base[1]
a.top[1] = (a.top[1]-py) * self.yscale + py
b = shapes.Arrow2D(a.base, a.top, s=a.s, fill=a.fill).z(a.z())
b.SetProperty(prop)
b.y(py * self.yscale)
a = b
# elif isinstance(a, shapes.Rectangle) and a.radius is not None:
# # discard input Rectangle and substitute it with a brand new one
# # (because scaling would fatally distort the shape of the corners)
# py = a.corner1[1]
# rx1,ry1,rz1 = a.corner1
# rx2,ry2,rz2 = a.corner2
# ry2 = (ry2-py) * self.yscale + py
# b = shapes.Rectangle([rx1,0,rz1], [rx2,ry2,rz2], radius=a.radius).z(a.z())
# b.SetProperty(a.GetProperty())
# b.y(py / self.yscale)
# a = b
else:
if rescale:
if not isinstance(a, Figure):
if as3d and not isinstance(a, self.force_scaling_types):
if adjusted:
scl = np.min([1, self.yscale])
else:
scl = self.yscale
a.scale(scl)
else:
a.scale([1, self.yscale, 1])
# shift it in y
a.y(a.y() * self.yscale)
if cut:
try:
bx0, bx1, by0, by1, _, _ = a.GetBounds()
if self.y0lim > by0:
a.cutWithPlane([0, self.y0lim, 0], [ 0, 1, 0])
if self.y1lim < by1:
a.cutWithPlane([0, self.y1lim, 0], [ 0,-1, 0])
if self.x0lim > bx0:
a.cutWithPlane([self.x0lim, 0, 0], [ 1, 0, 0])
if self.x1lim < bx1:
a.cutWithPlane([self.x1lim, 0, 0], [-1, 0, 0])
except:
# print("insert(): cannot cut", [a])
pass
self.AddPart(a)
self.actors.append(a)
return self
def addLabel(self, text, c=None, marker="", mc='black'):
"""
Manually add en entry label to the legend.
Parameters
----------
text : str
text string for the label.
c : str
color of the text
marker : str, Mesh
a marker char or a Mesh object to be used as marker
mc : str, optional
color for the marker
"""
newlabel = LabelData()
newlabel.text = text.replace("\n"," ")
newlabel.tcolor = c
newlabel.marker = marker
newlabel.mcolor = mc
self.labels.append(newlabel)
return self
def addLegend(self,
pos="top-right",
relative=True,
font=None,
s=1,
c=None,
vspace=1.75,
padding=0.1,
radius=0,
alpha=1,
bc='k7',
lw=1,
lc='k4',
z=0,
):
"""
Add existing labels to form a legend box.
Labels have been previously filled with eg: `plot(..., label="text")`
Parameters
----------
pos : str, list
A string or 2D coordinates. The default is "top-right".
relative : bool
control whether `pos` is absolute or relative, e.i. normalized
to the x and y ranges so that x and y in `pos=[x,y]` should be
both in the range [0,1].
This flag is ignored if a string despcriptor is passed.
Default is True.
font : str, int
font name or number
s : float
global size of the legend
c : str
color of the text
vspace : float
vertical spacing of lines
padding : float
padding of the box as a fraction of the text size
radius : float
border radius of the box
alpha : float
opacity of the box. Values below 1 may cause poor rendering
bacause of antialiasing.
Use alpha = 0 to remove the box.
bc : str
box color
lw : int
border line width of the box in pixel units
lc : int
border line color of the box
z : float
set the zorder as z position (useful to avoid overlap)
"""
sx = self.x1lim - self.x0lim
s = s * sx / 55 # so that input can be about 1
ds = 0
texts = []
mks = []
for i, t in enumerate(self.labels):
label = self.labels[i]
t = label.text
if label.tcolor is not None:
c = label.tcolor
tx = vedo.shapes.Text3D(t, s=s, c=c, justify="center-left", font=font)
y0, y1 = tx.ybounds()
ds = max( y1 - y0, ds)
texts.append(tx)
mk = label.marker
if isinstance(mk, vedo.Points):
mk = mk.clone(deep=False).lighting('off')
cm = mk.centerOfMass()
ty0, ty1 = tx.ybounds()
oby0, oby1 = mk.ybounds()
mk.shift(-cm)
mk.origin(cm)
mk.scale((ty1-ty0)/(oby1-oby0))
mk.scale([1.1,1.1,0.01])
elif mk == '-':
mk = vedo.shapes.Marker(mk, s=s*2)
mk.color(label.mcolor)
else:
mk = vedo.shapes.Marker(mk, s=s)
mk.color(label.mcolor)
mks.append(mk)
for i, tx in enumerate(texts):
tx.shift(0,-(i+0)*ds* vspace)
for i, mk in enumerate(mks):
mk.shift(-ds*1.75,-(i+0)*ds* vspace,0)
acts = texts + mks
aleg = Assembly(acts)#.show(axes=1).close()
x0,x1, y0,y1, _,_ = aleg.GetBounds()
if alpha:
dx = x1-x0
dy = y1-y0
if not utils.isSequence(padding):
padding = [padding] * 4
padding = min(padding)
padding = min(padding*dx, padding*dy)
if len(self.labels) == 1:
padding *= 4
x0 -= padding
x1 += padding
y0 -= padding
y1 += padding
box = shapes.Rectangle(
[x0,y0], [x1,y1], radius=radius, c=bc, alpha=alpha,
)
box.shift(0, 0, -dy/100).pickable(False)
if lc:
box.lc(lc).lw(lw)
aleg.AddPart(box)
xlim = self.xlim
ylim = self.ylim
if isinstance(pos, str):
px, py = 0, 0
rx, ry = (xlim[1]+xlim[0])/2, (ylim[1]+ylim[0])/2
shx, shy = 0, 0
if "top" in pos:
if "cent" in pos:
px, py = rx, ylim[1]
shx, shy = (x0+x1)/2, y1
elif "left" in pos:
px, py = xlim[0], ylim[1]
shx, shy = x0, y1
else: # "right"
px, py = xlim[1], ylim[1]
shx, shy = x1, y1
elif "bot" in pos:
if "left" in pos:
px, py = xlim[0], ylim[0]
shx, shy = x0, y0
elif "right" in pos:
px, py = xlim[1], ylim[0]
shx, shy = x1, y0
else: # "cent"
px, py = rx, ylim[0]
shx, shy = (x0+x1)/2, y0
elif "cent" in pos:
if "left" in pos:
px, py = xlim[0], ry
shx, shy = x0, (y0+y1)/2
elif "right" in pos:
px, py = xlim[1], ry
shx, shy = x1, (y0+y1)/2
else:
vedo.logger.error(f"in addLegend(), cannot understand {pos}")
raise RuntimeError
else:
if relative:
rx, ry = pos[0], pos[1]
px = (xlim[1] - xlim[0]) * rx + xlim[0]
py = (ylim[1] - ylim[0]) * ry + ylim[0]
z *= xlim[1] - xlim[0]
else:
px, py = pos[0], pos[1]
shx, shy = x0, y1
aleg.pos(px - shx, py * self.yscale - shy, self.z() + sx/50 + z)
self.insert(aleg, rescale=False, cut=False)
self.legend = aleg
aleg.name = "Legend"
return self
#########################################################################################
class Histogram1D(Figure):
"""
Creates a `Histogram1D(Figure)` object.
Parameters
----------
weights : list
An array of weights, of the same shape as `data`. Each value in `data`
only contributes its associated weight towards the bin count (instead of 1).
bins : int
number of bins
density : bool
normalize the area to 1 by dividing by the nr of entries and bin size
logscale : bool
use logscale on y-axis
fill : bool
fill bars woth solid color `c`
gap : float
leave a small space btw bars
radius : float
border radius of the top of the histogram bar. Default value is 0.1.
texture : str
url or path to an image to be used as texture for the bin
outline : bool
show outline of the bins
errors : bool
show error bars
xtitle : str
title for the x-axis, can also be set using `axes=dict(xtitle="my x axis")`
ytitle : str
title for the y-axis, can also be set using `axes=dict(ytitle="my y axis")`
padding : float, list
keep a padding space from the axes (as a fraction of the axis size).
This can be a list of four numbers.
aspect : float
the desired aspect ratio of the histogram. Default is 4/3.
grid : bool
show the backgound grid for the axes, can also be set using `axes=dict(xyGrid=True)`
ztolerance : float
a tolerance factor to superimpose objects (along the z-axis).
.. hint:: examples/pyplot/histo_1d_a.py histo_1d_b.py histo_1d_c.py histo_1d_d.py
.. image:: https://vedo.embl.es/images/pyplot/histo_1D.png
"""
def __init__(
self,
data,
weights=None,
bins=None,
errors=False,
density=False,
logscale=False,
fill=True,
radius=0.1,
c="olivedrab",
gap=0.02,
alpha=1,
outline=False,
lw=2,
lc="k",
texture="",
marker="",
ms=None,
mc=None,
ma=None,
# Figure and axes options:
like=None,
xlim=None,
ylim=(0, None),
aspect=4/3,
padding=[0., 0., 0., 0.05],
title="",
xtitle=" ",
ytitle=" ",
ac="k",
grid=False,
ztolerance=None,
label="",
**fig_kwargs,
):
if like is not None:
xlim = like.xlim
ylim = like.ylim
aspect = like.aspect
padding = like.padding
if bins is None:
bins = like.bins
if bins is None:
bins = 20
if utils.isSequence(xlim):
# deal with user passing eg [x0, None]
_x0, _x1 = xlim
if _x0 is None:
_x0 = data.min()
if _x1 is None:
_x1 = data.max()
xlim = [_x0, _x1]
# purge NaN from data
validIds = np.all(np.logical_not(np.isnan(data)))
data = np.array(data[validIds]).ravel()
fs, edges = np.histogram(data, bins=bins, weights=weights, range=xlim)
binsize = edges[1] - edges[0]
ntot = data.shape[0]
fig_kwargs['title'] = title
fig_kwargs['xtitle'] = xtitle
fig_kwargs['ytitle'] = ytitle
fig_kwargs['ac'] = ac
fig_kwargs['ztolerance'] = ztolerance
fig_kwargs['grid'] = grid
unscaled_errors = np.sqrt(fs)
if density:
scaled_errors = unscaled_errors / (ntot*binsize)
fs = fs / (ntot*binsize)
if ytitle == ' ':
ytitle = f"counts / ( {ntot}~x~{utils.precision(binsize,3)} )"
fig_kwargs['ytitle'] = ytitle
elif logscale:
se_up = np.log10(fs + unscaled_errors/2 + 1)
se_dw = np.log10(fs - unscaled_errors/2 + 1)
scaled_errors = np.c_[se_up, se_dw]
fs = np.log10(fs + 1)
if ytitle == ' ':
ytitle = 'log_10 (counts+1)'
fig_kwargs['ytitle'] = ytitle
x0, x1 = np.min(edges), np.max(edges)
y0, y1 = ylim[0], np.max(fs)
_errors = []
if errors:
if density:
y1 += max(scaled_errors) / 2
_errors = scaled_errors
elif logscale:
y1 = max(scaled_errors[:, 0])
_errors = scaled_errors
else:
y1 += max(unscaled_errors) / 2
_errors = unscaled_errors
if like is None:
ylim = list(ylim)
if xlim is None:
xlim = [x0, x1]
if ylim[1] is None:
ylim[1] = y1
if ylim[0] != 0:
ylim[0] = y0
self.entries = ntot
self.frequencies = fs
self.errors = _errors
self.edges = edges
self.centers = (edges[0:-1] + edges[1:]) / 2
self.mean = data.mean()
self.std = data.std()
self.bins = edges # internally used by "like"
############################### stats legend as htitle
addstats = False
if not title:
if 'axes' not in fig_kwargs:
addstats = True
axesopts = dict()
fig_kwargs['axes'] = axesopts
elif fig_kwargs['axes'] is False:
pass
else:
axesopts = fig_kwargs['axes']
if "htitle" not in axesopts:
addstats = True
if addstats:
htitle = f"Entries:~~{int(self.entries)} "
htitle += f"Mean:~~{utils.precision(self.mean, 4)} "
htitle += f"STD:~~{utils.precision(self.std, 4)} "
axesopts["htitle"] = htitle
axesopts["hTitleJustify"] = "bottom-left"
axesopts["hTitleSize"] = 0.016
axesopts["hTitleOffset"] = [-0.49, 0.01, 0]
if mc is None:
mc = lc
if ma is None:
ma = alpha
if label:
nlab = LabelData()
nlab.text = label
nlab.tcolor = ac
nlab.marker = marker
nlab.mcolor = mc
if not marker:
nlab.marker = 's'
nlab.mcolor = c
fig_kwargs['label'] = nlab
############################################### Figure init
Figure.__init__(self, xlim, ylim, aspect, padding, **fig_kwargs)
if not self.yscale:
return None
if utils.isSequence(bins):
myedges = np.array(bins)
bins = len(bins) - 1
else:
myedges = edges
bin_centers = []
for i in range(bins):
x = (myedges[i] + myedges[i + 1]) / 2
bin_centers.append([x, fs[i], 0])
rs = []
maxheigth = 0
if not fill and not outline and not errors and not marker:
outline = True # otherwise it's empty..
if fill: #####################
if outline:
gap = 0
for i in range(bins):
F = fs[i]
if not F:
continue
p0 = (myedges[i] + gap * binsize, 0, 0)
p1 = (myedges[i + 1] - gap * binsize, F, 0)
if radius:
if gap:
rds = np.array([0, 0, radius, radius])
else:
rd1 = 0 if i < bins-1 and fs[i+1] >= F else radius/2
rd2 = 0 if i > 0 and fs[i-1] >= F else radius/2
rds = np.array([0, 0, rd1, rd2])
p1_yscaled = [p1[0], p1[1]*self.yscale, 0]
r = shapes.Rectangle(p0, p1_yscaled, radius=rds*binsize, res=6)
r.scale([1, 1/self.yscale, 1])
r.radius = None # so it doesnt get recreated and rescaled by insert()
else:
r = shapes.Rectangle(p0, p1)
if texture:
r.texture(texture)
c = 'w'
# if texture: # causes Segmentation fault vtk9.0.3
# if i>0 and rs[i-1].GetTexture(): # reuse the same texture obj
# r.texture(rs[i-1].GetTexture())
# else:
# r.texture(texture)
# c = 'w'
r.PickableOff()
maxheigth = max(maxheigth, p1[1])
if c in colors.cmaps_names:
col = colors.colorMap((p0[0]+p1[0])/2, c, myedges[0], myedges[-1])
else:
col = c
r.color(col).alpha(alpha).lighting('off')
r.z(self.ztolerance)
rs.append(r)
if outline: #####################
lns = [[myedges[0], 0, 0]]
for i in range(bins):
lns.append([myedges[i], fs[i], 0])
lns.append([myedges[i + 1], fs[i], 0])
maxheigth = max(maxheigth, fs[i])
lns.append([myedges[-1], 0, 0])
outl = shapes.Line(lns, c=lc, alpha=alpha, lw=lw)
outl.z(self.ztolerance*2)
rs.append(outl)
if errors: #####################
for i in range(bins):
x = self.centers[i]
f = fs[i]
if not f:
continue
err = _errors[i]
if utils.isSequence(err):
el = shapes.Line([x, err[0], 0], [x, err[1], 0], c=lc, alpha=alpha, lw=lw)
else:
el = shapes.Line([x, f-err/2, 0], [x, f+err/2, 0], c=lc, alpha=alpha, lw=lw)
el.z(self.ztolerance*3)
rs.append(el)
if marker: #####################
# remove empty bins (we dont want a marker there)
bin_centers = np.array(bin_centers)
bin_centers = bin_centers[bin_centers[:, 1] > 0]
if utils.isSequence(ms): ### variable point size
mk = shapes.Marker(marker, s=1)
mk.scale([1, 1/self.yscale, 1])
msv = np.zeros_like(bin_centers)
msv[:, 0] = ms
marked = shapes.Glyph(
bin_centers, glyphObj=mk, c=mc,
orientationArray=msv, scaleByVectorSize=True
)
else: ### fixed point size
if ms is None:
ms = (xlim[1]-xlim[0]) / 100.0
else:
ms = (xlim[1]-xlim[0]) / 100.0 * ms
if utils.isSequence(mc):
mk = shapes.Marker(marker, s=ms)
mk.scale([1, 1/self.yscale, 1])
msv = np.zeros_like(bin_centers)
msv[:, 0] = 1
marked = shapes.Glyph(
bin_centers, glyphObj=mk, c=mc,
orientationArray=msv, scaleByVectorSize=True
)
else:
mk = shapes.Marker(marker, s=ms)
mk.scale([1, 1/self.yscale, 1])
marked = shapes.Glyph(bin_centers, glyphObj=mk, c=mc)
marked.alpha(ma)
marked.z(self.ztolerance*4)
rs.append(marked)
self.insert(*rs, as3d=False)
self.name = "Histogram1D"
#########################################################################################
class Histogram2D(Figure):
"""
Input data formats `[(x1,x2,..), (y1,y2,..)] or [(x1,y1), (x2,y2),..]`
are both valid.
Use keyword `like=...` if you want to use the same format of a previously
created Figure (useful when superimposing Figures) to make sure
they are compatible and comparable. If they are not compatible
you will receive an error message.
Parameters
----------
bins : list
binning as (nx, ny)
weights : list
array of weights to assign to each entry
cmap : str, lookuptable
color map name or look up table
alpha : float
opacity of the histogram
gap : float
separation between adjacent bins as a fraction for their size
scalarbar : bool
add a scalarbar to right of the histogram
like : Figure
grab and use the same format of the given Figure (for superimposing)
xlim : list
[x0, x1] range of interest. If left to None will automatically
choose the minimum or the maximum of the data range.
Data outside the range are completely ignored.
ylim : list
[y0, y1] range of interest. If left to None will automatically
choose the minimum or the maximum of the data range.
Data outside the range are completely ignored.
aspect : float
the desired aspect ratio of the figure.
title : str
title of the plot to appear on top.
If left blank some statistics will be shown.
xtitle : str
x axis title
ytitle : str
y axis title
ztitle : str
title for the scalar bar
ac : str
axes color, additional keyword for Axes can also be added
using e.g. `axes=dict(xyGrid=True)`
.. hint:: examples/pyplot/histo_2d.py
.. image:: https://vedo.embl.es/images/pyplot/histo_2D.png
"""
def __init__(
self,
xvalues,
yvalues=None,
bins=25,
weights=None,
cmap="cividis",
alpha=1,
gap=0,
scalarbar=True,
# Figure and axes options:
like=None,
xlim=None,
ylim=(None, None),
zlim=(None, None),
aspect=1,
title="",
xtitle=" ",
ytitle=" ",
ztitle="",
ac="k",
**fig_kwargs,
):
if yvalues is None:
# assume [(x1,y1), (x2,y2) ...] format
yvalues = xvalues[:, 1]
xvalues = xvalues[:, 0]
padding=[0,0,0,0]
if like is not None:
xlim = like.xlim
ylim = like.ylim
aspect = like.aspect
padding = like.padding
if bins is None:
bins = like.bins
if bins is None:
bins = 20
if isinstance(bins, int):
bins = (bins, bins)
if utils.isSequence(xlim):
# deal with user passing eg [x0, None]
_x0, _x1 = xlim
if _x0 is None:
_x0 = xvalues.min()
if _x1 is None:
_x1 = xvalues.max()
xlim = [_x0, _x1]
if utils.isSequence(ylim):
# deal with user passing eg [x0, None]
_y0, _y1 = ylim
if _y0 is None:
_y0 = yvalues.min()
if _y1 is None:
_y1 = yvalues.max()
ylim = [_y0, _y1]
H, xedges, yedges = np.histogram2d(
xvalues, yvalues, weights=weights,
bins=bins, range=(xlim, ylim),
)
xlim = np.min(xedges), np.max(xedges)
ylim = np.min(yedges), np.max(yedges)
dx, dy = xlim[1] - xlim[0], ylim[1] - ylim[0]
fig_kwargs['title'] = title
fig_kwargs['xtitle'] = xtitle
fig_kwargs['ytitle'] = ytitle
fig_kwargs['ac'] = ac
self.entries = len(xvalues)
self.frequencies = H
self.edges = (xedges, yedges)
self.mean = (xvalues.mean(), yvalues.mean())
self.std = (xvalues.std(), yvalues.std())
self.bins = bins # internally used by "like"
############################### stats legend as htitle
addstats = False
if not title:
if 'axes' not in fig_kwargs:
addstats = True
axesopts = dict()
fig_kwargs['axes'] = axesopts
elif fig_kwargs['axes'] is False:
pass
else:
axesopts = fig_kwargs['axes']
if "htitle" not in fig_kwargs['axes']:
addstats = True
if addstats:
htitle = f"Entries:~~{int(self.entries)} "
htitle += f"Mean:~~{utils.precision(self.mean, 3)} "
htitle += f"STD:~~{utils.precision(self.std, 3)} "
axesopts["htitle"] = htitle
axesopts["hTitleJustify"] = "bottom-left"
axesopts["hTitleSize"] = 0.0175
axesopts["hTitleOffset"] = [-0.49, 0.01, 0]
############################################### Figure init
Figure.__init__(self, xlim, ylim, aspect, padding, **fig_kwargs)
if not self.yscale:
return None
##################### the grid
acts = []
g = shapes.Grid(
pos=[(xlim[0] + xlim[1]) / 2, (ylim[0] + ylim[1]) / 2, 0],
s=(dx, dy),
res=bins[:2],
)
g.alpha(alpha).lw(0).wireframe(False).flat().lighting('off')
g.cmap(cmap, np.ravel(H.T), on='cells', vmin=zlim[0], vmax=zlim[1])
if gap:
g.shrink(abs(1-gap))
if scalarbar:
sc = g.addScalarBar3D(ztitle, c=ac).scalarbar
sc.scale([self.yscale,1,1]) ## prescale trick
sbnds = sc.xbounds()
sc.x(self.x1lim + (sbnds[1]-sbnds[0])*0.75)
acts.append(sc)
acts.append(g)
self.insert(*acts, as3d=False)
self.name = "Histogram2D"
#########################################################################################
class PlotBars(Figure):
"""
Creates a `PlotBars(Figure)` object.
Input must be in format `[counts, labels, colors, edges]`.
Either or both `edges` and `colors` are optional and can be omitted.
Use keyword `like=...` if you want to use the same format of a previously
created Figure (useful when superimposing Figures) to make sure
they are compatible and comparable. If they are not compatible
you will receive an error message.
Parameters
----------
errors : bool
show error bars
logscale : bool
use logscale on y-axis
fill : bool
fill bars woth solid color `c`
gap : float
leave a small space btw bars
radius : float
border radius of the top of the histogram bar. Default value is 0.1.
texture : str
url or path to an image to be used as texture for the bin
outline : bool
show outline of the bins
xtitle : str
title for the x-axis, can also be set using `axes=dict(xtitle="my x axis")`
ytitle : str
title for the y-axis, can also be set using `axes=dict(ytitle="my y axis")`
ac : str
axes color
padding : float, list
keep a padding space from the axes (as a fraction of the axis size).
This can be a list of four numbers.
aspect : float
the desired aspect ratio of the figure. Default is 4/3.
grid : bool
show the backgound grid for the axes, can also be set using `axes=dict(xyGrid=True)`
.. hint:: examples/pyplot/histo_1d_a.py histo_1d_b.py histo_1d_c.py histo_1d_d.py
.. image:: https://vedo.embl.es/images/pyplot/histo_1D.png
"""
def __init__(
self,
data,
errors=False,
logscale=False,
fill=True,
gap=0.02,
radius=0.05,
c="olivedrab",
alpha=1,
texture="",
outline=False,
lw=2,
lc="k",
# Figure and axes options:
like=None,
xlim=(None, None),
ylim=(0, None),
aspect=4/3,
padding=[0.025, 0.025, 0, 0.05],
#
title="",
xtitle=" ",
ytitle=" ",
ac="k",
grid=False,
ztolerance=None,
**fig_kwargs,
):
ndata = len(data)
if ndata == 4:
counts, xlabs, cols, edges = data
elif ndata == 3:
counts, xlabs, cols = data
edges = np.array(range(len(counts)+1))+0.5
elif ndata == 2:
counts, xlabs = data
edges = np.array(range(len(counts)+1))+0.5
cols = [c] * len(counts)
else:
m = "barplot error: data must be given as [counts, labels, colors, edges] not\n"
vedo.logger.error(m + f" {data}\n bin edges and colors are optional.")
raise RuntimeError()
# sanity checks
assert len(counts) == len(xlabs)
assert len(counts) == len(cols)
assert len(counts) == len(edges)-1
counts = np.asarray(counts)
edges = np.asarray(edges)
if logscale:
counts = np.log10(counts + 1)
if ytitle == ' ':
ytitle = 'log_10 (counts+1)'
if like is not None:
xlim = like.xlim
ylim = like.ylim
aspect = like.aspect
padding = like.padding
if utils.isSequence(xlim):
# deal with user passing eg [x0, None]
_x0, _x1 = xlim
if _x0 is None:
_x0 = np.min(edges)
if _x1 is None:
_x1 = np.max(edges)
xlim = [_x0, _x1]
x0, x1 = np.min(edges), np.max(edges)
y0, y1 = ylim[0], np.max(counts)
if like is None:
ylim = list(ylim)
if xlim is None:
xlim = [x0, x1]
if ylim[1] is None:
ylim[1] = y1
if ylim[0] != 0:
ylim[0] = y0
fig_kwargs['title'] = title
fig_kwargs['xtitle'] = xtitle
fig_kwargs['ytitle'] = ytitle
fig_kwargs['ac'] = ac
fig_kwargs['ztolerance'] = ztolerance
fig_kwargs['grid'] = grid
centers = (edges[0:-1] + edges[1:]) / 2
binsizes = (centers - edges[0:-1]) * 2
if 'axes' not in fig_kwargs:
fig_kwargs['axes'] = dict()
_xlabs = []
for i in range(len(centers)):
_xlabs.append([centers[i], str(xlabs[i])])
fig_kwargs['axes']["xValuesAndLabels"] = _xlabs
############################################### Figure
self.statslegend = ""
self.edges = edges
self.centers = centers
self.bins = edges # internal used by "like"
Figure.__init__(self, xlim, ylim, aspect, padding, **fig_kwargs)
if not self.yscale:
return None
rs = []
maxheigth = 0
if fill: #####################
if outline:
gap = 0
for i in range(len(centers)):
binsize = binsizes[i]
p0 = (edges[i] + gap * binsize, 0, 0)
p1 = (edges[i+1] - gap * binsize, counts[i], 0)
if radius:
rds = np.array([0, 0, radius, radius])
p1_yscaled = [p1[0], p1[1]*self.yscale, 0]
r = shapes.Rectangle(p0, p1_yscaled, radius=rds*binsize, res=6)
r.scale([1, 1/self.yscale, 1])
r.radius = None # so it doesnt get recreated and rescaled by insert()
else:
r = shapes.Rectangle(p0, p1)
if texture:
r.texture(texture)
c = 'w'
r.PickableOff()
maxheigth = max(maxheigth, p1[1])
if c in colors.cmaps_names:
col = colors.colorMap((p0[0]+p1[0])/2, c, edges[0], edges[-1])
else:
col = cols[i]
r.color(col).alpha(alpha).lighting('off')
r.name = f'bar_{i}'
r.z(self.ztolerance)
rs.append(r)
elif outline: #####################
lns = [[edges[0], 0, 0]]
for i in range(len(centers)):
lns.append([edges[i], counts[i], 0])
lns.append([edges[i + 1], counts[i], 0])
maxheigth = max(maxheigth, counts[i])
lns.append([edges[-1], 0, 0])
outl = shapes.Line(lns, c=lc, alpha=alpha, lw=lw).z(self.ztolerance)
outl.name = f'bar_outline_{i}'
rs.append(outl)
if errors: #####################
for x, f in centers:
err = np.sqrt(f)
el = shapes.Line([x, f-err/2, 0], [x, f+err/2, 0], c=lc, alpha=alpha, lw=lw)
el.z(self.ztolerance * 2)
rs.append(el)
self.insert(*rs, as3d=False)
self.name = "PlotBars"
#########################################################################################
class PlotXY(Figure):
"""
Creates a `PlotXY(Figure)` object.
Parameters
----------
xerrors : bool
show error bars associated to each point in x
yerrors : bool
show error bars associated to each point in y
lw : int
width of the line connecting points in pixel units.
Set it to 0 to remove the line.
lc : str
line color
la : float
line "alpha", opacity of the line
dashed : bool
draw a dashed line instead of a continous line
splined : bool
spline the line joining the point as a countinous curve
elw : int
width of error bar lines in units of pixels
ec : color
color of error bar, by default the same as marker color
errorBand : bool
represent errors on y as a filled error band.
Use ``ec`` keyword to modify its color.
marker : str, int
use a marker for the data points
ms : float
marker size
mc : color
color of the marker
ma : float
opacity of the marker
xlim : list
set limits to the range for the x variable
ylim : list
set limits to the range for the y variable
aspect : float, str
Desired aspect ratio.
Use `aspect="equal"` to force the same units in x and y.
Scaling factor is saved in Figure.yscale.
padding : float, list
keep a padding space from the axes (as a fraction of the axis size).
This can be a list of four numbers.
title : str
title to appear on the top of the frame, like a header.
xtitle : str
title for the x-axis, can also be set using `axes=dict(xtitle="my x axis")`
ytitle : str
title for the y-axis, can also be set using `axes=dict(ytitle="my y axis")`
ac : str
axes color
grid : bool
show the backgound grid for the axes, can also be set using `axes=dict(xyGrid=True)`
ztolerance : float
a tolerance factor to superimpose objects (along the z-axis).
Example:
.. code-block:: python
import numpy as np
from vedo.pyplot import plot
x = np.arange(0, np.pi, 0.1)
fig = plot(x, np.sin(2*x), 'r0-', aspect='equal')
fig+= plot(x, np.cos(2*x), 'blue4 o-', like=fig)
fig.show()
.. image:: https://user-images.githubusercontent.com/32848391/74363882-c3638300-4dcb-11ea-8a78-eb492ad9711f.png
.. hint::
examples/pyplot/plot_errbars.py, plot_errband.py, plot_pip.py, scatter1.py, scatter2.py
.. image:: https://vedo.embl.es/images/pyplot/plot_pip.png
"""
def __init__(
self,
#
data,
xerrors=None,
yerrors=None,
#
lw=2,
lc="k",
la=1,
dashed=False,
splined=False,
#
elw=2, # error line width
ec=None, # error line or band color
errorBand=False, # errors in x are ignored
#
marker="",
ms=None,
mc=None,
ma=None,
# Figure and axes options:
like=None,
xlim=None,
ylim=(None, None),
aspect=4/3,
padding=0.05,
#
title="",
xtitle=" ",
ytitle=" ",
ac="k",
grid=True,
ztolerance=None,
label="",
**fig_kwargs,
):
line = False
if lw>0:
line = True
if marker == "" and not line and not splined:
marker = 'o'
if like is not None:
xlim = like.xlim
ylim = like.ylim
aspect = like.aspect
padding = like.padding
if utils.isSequence(xlim):
# deal with user passing eg [x0, None]
_x0, _x1 = xlim
if _x0 is None:
_x0 = data.min()
if _x1 is None:
_x1 = data.max()
xlim = [_x0, _x1]
# purge NaN from data
validIds = np.all(np.logical_not(np.isnan(data)))
data = np.array(data[validIds])[0]
fig_kwargs['title'] = title
fig_kwargs['xtitle'] = xtitle
fig_kwargs['ytitle'] = ytitle
fig_kwargs['ac'] = ac
fig_kwargs['ztolerance'] = ztolerance
fig_kwargs['grid'] = grid
x0, y0 = np.min(data, axis=0)
x1, y1 = np.max(data, axis=0)
if xerrors is not None and not errorBand:
x0 = min(data[:,0] - xerrors)
x1 = max(data[:,0] + xerrors)
if yerrors is not None:
y0 = min(data[:,1] - yerrors)
y1 = max(data[:,1] + yerrors)
if like is None:
if xlim is None:
xlim = (None, None)
xlim = list(xlim)
if xlim[0] is None:
xlim[0] = x0
if xlim[1] is None:
xlim[1] = x1
ylim = list(ylim)
if ylim[0] is None:
ylim[0] = y0
if ylim[1] is None:
ylim[1] = y1
self.entries = len(data)
self.mean = data.mean()
self.std = data.std()
######### the PlotXY marker
if mc is None:
mc = lc
if ma is None:
ma = la
if label:
nlab = LabelData()
nlab.text = label
nlab.tcolor = ac
nlab.marker = marker
if line and marker == "":
nlab.marker = '-'
nlab.mcolor = mc
fig_kwargs['label'] = nlab
############################################### Figure init
Figure.__init__(self, xlim, ylim, aspect, padding, **fig_kwargs)
if not self.yscale:
return None
acts = []
######### the PlotXY Line or Spline
if dashed:
l = shapes.DashedLine(data, c=lc, alpha=la, lw=lw)
acts.append(l)
elif splined:
l = shapes.KSpline(data).lw(lw).c(lc).alpha(la)
acts.append(l)
elif line:
l = shapes.Line(data, c=lc, alpha=la).lw(lw)
acts.append(l)
if marker:
pts = np.c_[data, np.zeros(len(data))]
if utils.isSequence(ms):
### variable point size
mk = shapes.Marker(marker, s=1)
mk.scale([1, 1/self.yscale, 1])
msv = np.zeros_like(pts)
msv[:, 0] = ms
marked = shapes.Glyph(
pts, glyphObj=mk, c=mc,
orientationArray=msv, scaleByVectorSize=True
)
else:
### fixed point size
if ms is None:
ms = (xlim[1] - xlim[0]) / 100.0
if utils.isSequence(mc):
fig_kwargs['marker_color'] = None # for labels
mk = shapes.Marker(marker, s=ms)
mk.scale([1, 1/self.yscale, 1])
msv = np.zeros_like(pts)
msv[:, 0] = 1
marked = shapes.Glyph(
pts, glyphObj=mk, c=mc,
orientationArray=msv, scaleByVectorSize=True
)
else:
mk = shapes.Marker(marker, s=ms)
mk.scale([1, 1/self.yscale, 1])
marked = shapes.Glyph(pts, glyphObj=mk, c=mc)
marked.name = "Marker"
marked.alpha(ma)
marked.z(3*self.ztolerance)
acts.append(marked)
######### the PlotXY marker errors
if ec is None:
if mc is None:
ec = lc
else:
ec = mc
ztol = self.ztolerance
if errorBand:
yerrors = np.abs(yerrors)
du = np.array(data)
dd = np.array(data)
du[:,1] += yerrors
dd[:,1] -= yerrors
if splined:
res = len(data) * 20
band1 = shapes.KSpline(du, res=res)
band2 = shapes.KSpline(dd, res=res)
band = shapes.Ribbon(band1, band2, res=(res,2))
else:
dd = list(reversed(dd.tolist()))
band = shapes.Line(du.tolist() + dd, closed=True)
band.triangulate().lw(0)
if ec is None:
band.c(lc)
else:
band.c(ec)
band.lighting('off').alpha(la).z(ztol)
acts.append(band)
else:
## xerrors
if xerrors is not None:
if len(xerrors) == len(data):
errs = []
for i, val in enumerate(data):
xval, yval = val
xerr = xerrors[i] / 2
el = shapes.Line((xval-xerr, yval, ztol), (xval+xerr, yval, ztol))
el.lw(elw)
errs.append(el)
mxerrs = merge(errs).c(ec).lw(lw).alpha(ma).z(2 * ztol)
acts.append(mxerrs)
else:
vedo.logger.error("in PlotXY(xerrors=...): mismatch in array length")
## yerrors
if yerrors is not None:
if len(yerrors) == len(data):
errs = []
for i, val in enumerate(data):
xval, yval = val
yerr = yerrors[i]
el = shapes.Line((xval, yval-yerr, ztol), (xval, yval+yerr, ztol))
el.lw(elw)
errs.append(el)
myerrs = merge(errs).c(ec).lw(lw).alpha(ma).z(2 * ztol)
acts.append(myerrs)
else:
vedo.logger.error("in PlotXY(yerrors=...): mismatch in array length")
self.insert(*acts, as3d=False)
self.name = "PlotXY"
def plot(*args, **kwargs):
"""
Draw a 2D line plot, or scatter plot, of variable x vs variable y.
Input format can be either `[allx], [allx, ally] or [(x1,y1), (x2,y2), ...]`
Use `like=...` if you want to use the same format of a previously
created Figure (useful when superimposing Figures) to make sure
they are compatible and comparable. If they are not compatible
you will receive an error message.
Parameters
----------
xerrors : bool
show error bars associated to each point in x
yerrors : bool
show error bars associated to each point in y
lw : int
width of the line connecting points in pixel units.
Set it to 0 to remove the line.
lc : str
line color
la : float
line "alpha", opacity of the line
dashed : bool
draw a dashed line instead of a continous line
splined : bool
spline the line joining the point as a countinous curve
elw : int
width of error bar lines in units of pixels
ec : color
color of error bar, by default the same as marker color
errorBand : bool
represent errors on y as a filled error band.
Use ``ec`` keyword to modify its color.
marker : str, int
use a marker for the data points
ms : float
marker size
mc : color
color of the marker
ma : float
opacity of the marker
xlim : list
set limits to the range for the x variable
ylim : list
set limits to the range for the y variable
aspect : float
Desired aspect ratio.
If None, it is automatically calculated to get a reasonable aspect ratio.
Scaling factor is saved in Figure.yscale
padding : float, list
keep a padding space from the axes (as a fraction of the axis size).
This can be a list of four numbers.
title : str
title to appear on the top of the frame, like a header.
xtitle : str
title for the x-axis, can also be set using `axes=dict(xtitle="my x axis")`
ytitle : str
title for the y-axis, can also be set using `axes=dict(ytitle="my y axis")`
ac : str
axes color
grid : bool
show the backgound grid for the axes, can also be set using `axes=dict(xyGrid=True)`
ztolerance : float
a tolerance factor to superimpose objects (along the z-axis).
Example:
.. code-block:: python
from vedo.pyplot import plot
import numpy as np
x = np.linspace(0, 6.28, num=50)
plot(np.sin(x), 'r').plot(np.cos(x), 'bo-').show()
.. image:: https://user-images.githubusercontent.com/32848391/74363882-c3638300-4dcb-11ea-8a78-eb492ad9711f.png
.. hint::
examples/pyplot/plot_errbars.py, plot_errband.py, plot_pip.py, scatter1.py, scatter2.py
.. image:: https://vedo.embl.es/images/pyplot/plot_pip.png
-------------------------------------------------------------------------
.. note:: mode="bar"
Creates a `PlotBars(Figure)` object.
Input must be in format `[counts, labels, colors, edges]`.
Either or both `edges` and `colors` are optional and can be omitted.
Parameters
----------
errors : bool
show error bars
logscale : bool
use logscale on y-axis
fill : bool
fill bars woth solid color `c`
gap : float
leave a small space btw bars
radius : float
border radius of the top of the histogram bar. Default value is 0.1.
texture : str
url or path to an image to be used as texture for the bin
outline : bool
show outline of the bins
xtitle : str
title for the x-axis, can also be set using `axes=dict(xtitle="my x axis")`
ytitle : str
title for the y-axis, can also be set using `axes=dict(ytitle="my y axis")`
ac : str
axes color
padding : float, list
keep a padding space from the axes (as a fraction of the axis size).
This can be a list of four numbers.
aspect : float
the desired aspect ratio of the figure. Default is 4/3.
grid : bool
show the backgound grid for the axes, can also be set using `axes=dict(xyGrid=True)`
.. hint:: examples/pyplot/histo_1d_a.py histo_1d_b.py histo_1d_c.py histo_1d_d.py
.. image:: https://vedo.embl.es/images/pyplot/histo_1D.png
----------------------------------------------------------------------
.. note:: 2D functions
If input is an external function or a formula, draw the surface
representing the function `f(x,y)`.
Parameters
----------
x : float
x range of values
y : float
y range of values
zlimits : float
limit the z range of the independent variable
zlevels : int
will draw the specified number of z-levels contour lines
showNan : bool
show where the function does not exist as red points
bins : list
number of bins in x and y
.. hint:: plot_fxy.py
.. image:: https://vedo.embl.es/images/pyplot/plot_fxy.png
--------------------------------------------------------------------
.. note:: mode="complex"
If ``mode='complex'`` draw the real value of the function and color map the imaginary part.
Parameters
----------
cmap : str
diverging color map (white means imag(z)=0)
lw : float
line with of the binning
bins : list
binning in x and y
.. hint:: examples/pyplot/plot_fxy.py
..image:: https://user-images.githubusercontent.com/32848391/73392962-1709a300-42db-11ea-9278-30c9d6e5eeaa.png
--------------------------------------------------------------------
.. note:: mode="polar"
If ``mode='polar'`` input arrays are interpreted as a list of polar angles and radii.
Build a polar (radar) plot by joining the set of points in polar coordinates.
Parameters
----------
title : str
plot title
tsize : float
title size
bins : int
number of bins in phi
r1 : float
inner radius
r2 : float
outer radius
lsize : float
label size
c : color
color of the line
ac : color
color of the frame and labels
alpha : float
opacity of the frame
ps : int
point size in pixels, if ps=0 no point is drawn
lw : int
line width in pixels, if lw=0 no line is drawn
deg : bool
input array is in degrees
vmax : float
normalize radius to this maximum value
fill : bool
fill convex area with solid color
splined : bool
interpolate the set of input points
showDisc : bool
draw the outer ring axis
nrays : int
draw this number of axis rays (continuous and dashed)
showLines : bool
draw lines to the origin
showAngles : bool
draw angle values
.. hint:: examples/pyplot/histo_polar.py
.. image:: https://user-images.githubusercontent.com/32848391/64992590-7fc82400-d8d4-11e9-9c10-795f4756a73f.png
--------------------------------------------------------------------
.. note:: mode="spheric"
If ``mode='spheric'`` input must be an external function rho(theta, phi).
A surface is created in spherical coordinates.
Return an ``Figure(Assembly)`` of 2 objects: the unit
sphere (in wireframe representation) and the surface `rho(theta, phi)`.
Parameters
----------
rfunc : function
handle to a user defined function `rho(theta, phi)`.
normalize : bool
scale surface to fit inside the unit sphere
res : int
grid resolution of the unit sphere
scalarbar : bool
add a 3D scalarbar to the plot for radius
c : color
color of the unit sphere
alpha : float
opacity of the unit sphere
cmap : str
color map for the surface
.. hint:: examples/pyplot/plot_spheric.py
.. image:: https://vedo.embl.es/images/pyplot/plot_spheric.png
"""
mode = kwargs.pop("mode", "")
if "spher" in mode:
return _plotSpheric(args[0], **kwargs)
if "bar" in mode:
return PlotBars(args[0], **kwargs)
if isinstance(args[0], str) or "function" in str(type(args[0])):
if "complex" in mode:
return _plotFz(args[0], **kwargs)
return _plotFxy(args[0], **kwargs)
# grab the matplotlib-like options
optidx = None
for i, a in enumerate(args):
if i > 0 and isinstance(a, str):
optidx = i
break
if optidx:
opts = args[optidx].replace(" ", "")
if "--" in opts:
opts = opts.replace("--", "")
kwargs["dashed"] = True
elif "-" in opts:
opts = opts.replace("-", "")
else:
kwargs["lw"] = 0
symbs = ['.','o','O', '0', 'p','*','h','D','d','v','^','>','<','s', 'x', 'a']
allcols = list(colors.colors.keys()) + list(colors.color_nicks.keys())
for cc in allcols:
if cc == "o":
continue
if cc in opts:
opts = opts.replace(cc, "")
kwargs["lc"] = cc
kwargs["mc"] = cc
break
for ss in symbs:
if ss in opts:
opts = opts.replace(ss, "", 1)
kwargs["marker"] = ss
break
opts.replace(' ', '')
if opts:
vedo.logger.error(f"in plot(), could not understand option(s): {opts}")
if optidx == 1 or optidx is None:
if utils.isSequence(args[0][0]):
# print('case 1', 'plot([(x,y),..])')
data = np.array(args[0])
x = np.array(data[:, 0])
y = np.array(data[:, 1])
elif len(args) == 1 or optidx == 1:
# print('case 2', 'plot(x)')
x = np.linspace(0, len(args[0]), num=len(args[0]))
y = np.array(args[0])
elif utils.isSequence(args[1]):
# print('case 3', 'plot(allx,ally)')
x = np.array(args[0])
y = np.array(args[1])
elif utils.isSequence(args[0]) and utils.isSequence(args[0][0]):
# print('case 4', 'plot([allx,ally])')
x = np.array(args[0][0])
y = np.array(args[0][1])
elif optidx == 2:
# print('case 5', 'plot(x,y)')
x = np.array(args[0])
y = np.array(args[1])
else:
vedo.logger.error(f"plot(): Could not understand input arguments {args}")
return None
if "polar" in mode:
return _plotPolar(np.c_[x, y], **kwargs)
return PlotXY(np.c_[x, y], **kwargs)
def histogram(*args, **kwargs):
"""
Histogramming for 1D and 2D data arrays.
This is meant as a convenience function that creates the appropriate object
based on the shape of the provided input data.
Use keyword `like=...` if you want to use the same format of a previously
created Figure (useful when superimposing Figures) to make sure
they are compatible and comparable. If they are not compatible
you will receive an error message.
-------------------------------------------------------------------------
.. note:: default mode, for 1D arrays
Creates a `Histogram1D(Figure)` object.
Parameters
----------
weights : list
An array of weights, of the same shape as `data`. Each value in `data`
only contributes its associated weight towards the bin count (instead of 1).
bins : int
number of bins
vrange : list
restrict the range of the histogram
density : bool
normalize the area to 1 by dividing by the nr of entries and bin size
logscale : bool
use logscale on y-axis
fill : bool
fill bars woth solid color `c`
gap : float
leave a small space btw bars
radius : float
border radius of the top of the histogram bar. Default value is 0.1.
texture : str
url or path to an image to be used as texture for the bin
outline : bool
show outline of the bins
errors : bool
show error bars
xtitle : str
title for the x-axis, can also be set using `axes=dict(xtitle="my x axis")`
ytitle : str
title for the y-axis, can also be set using `axes=dict(ytitle="my y axis")`
padding : float, list
keep a padding space from the axes (as a fraction of the axis size).
This can be a list of four numbers.
aspect : float
the desired aspect ratio of the histogram. Default is 4/3.
grid : bool
show the backgound grid for the axes, can also be set using `axes=dict(xyGrid=True)`
ztolerance : float
a tolerance factor to superimpose objects (along the z-axis).
.. hint:: examples/pyplot/histo_1d_a.py histo_1d_b.py histo_1d_c.py histo_1d_d.py
.. image:: https://vedo.embl.es/images/pyplot/histo_1D.png
-------------------------------------------------------------------------
.. note:: default mode, for 2D arrays
Input data formats `[(x1,x2,..), (y1,y2,..)] or [(x1,y1), (x2,y2),..]`
are both valid.
Parameters
----------
bins : list
binning as (nx, ny)
weights : list
array of weights to assign to each entry
cmap : str, lookuptable
color map name or look up table
alpha : float
opacity of the histogram
gap : float
separation between adjacent bins as a fraction for their size
scalarbar : bool
add a scalarbar to right of the histogram
like : Figure
grab and use the same format of the given Figure (for superimposing)
xlim : list
[x0, x1] range of interest. If left to None will automatically
choose the minimum or the maximum of the data range.
Data outside the range are completely ignored.
ylim : list
[y0, y1] range of interest. If left to None will automatically
choose the minimum or the maximum of the data range.
Data outside the range are completely ignored.
aspect : float
the desired aspect ratio of the figure.
title : str
title of the plot to appear on top.
If left blank some statistics will be shown.
xtitle : str
x axis title
ytitle : str
y axis title
ztitle : str
title for the scalar bar
ac : str
axes color, additional keyword for Axes can also be added
using e.g. `axes=dict(xyGrid=True)`
.. hint:: examples/pyplot/histo_2d.py
.. image:: https://vedo.embl.es/images/pyplot/histo_2D.png
-------------------------------------------------------------------------
.. note:: mode="hexbin"
If ``mode='hexbin'``, build a hexagonal histogram from a list of x and y values.
Parameters
----------
xtitle : str
x axis title
bins : int
nr of bins for the smaller range in x or y
vrange : list
range in x and y in format `[(xmin,xmax), (ymin,ymax)]`
norm : float
sets a scaling factor for the z axis (frequency axis)
fill : bool
draw solid hexagons
cmap : str
color map name for elevation
.. hint:: examples/pyplot/histo_hexagonal.py
.. image:: https://vedo.embl.es/images/pyplot/histo_hexagonal.png
-------------------------------------------------------------------------
.. note:: mode="polar"
If ``mode='polar'`` assume input is polar coordinate system (rho, theta):
Parameters
----------
weights : list
Array of weights, of the same shape as the input.
Each value only contributes its associated weight towards the bin count (instead of 1).
title : str
histogram title
tsize : float
title size
bins : int
number of bins in phi
r1 : float
inner radius
r2 : float
outer radius
phigap : float
gap angle btw 2 radial bars, in degrees
rgap : float
gap factor along radius of numeric angle labels
lpos : float
label gap factor along radius
lsize : float
label size
c : color
color of the histogram bars, can be a list of length `bins`
bc : color
color of the frame and labels
alpha : float
opacity of the frame
cmap : str
color map name
deg : bool
input array is in degrees
vmin : float
minimum value of the radial axis
vmax : float
maximum value of the radial axis
labels : list
list of labels, must be of length `bins`
showDisc : bool
show the outer ring axis
nrays : int
draw this number of axis rays (continuous and dashed)
showLines : bool
show lines to the origin
showAngles : bool
show angular values
showErrors : bool
show error bars
.. hint:: examples/pyplot/histo_polar.py
.. image:: https://vedo.embl.es/images/pyplot/histo_polar.png
-------------------------------------------------------------------------
.. note:: mode="spheric"
If ``mode='spheric'``, build a histogram from list of theta and phi values.
Parameters
----------
rmax : float
maximum radial elevation of bin
res : int
sphere resolution
cmap : str
color map name
lw : int
line width of the bin edges
scalarbar : bool
add a scalarbar to plot
.. hint:: examples/pyplot/histo_spheric.py
.. image:: https://vedo.embl.es/images/pyplot/histo_spheric.png
"""
mode = kwargs.pop("mode", "")
if len(args) == 2: # x, y
if "spher" in mode:
return _histogramSpheric(args[0], args[1], **kwargs)
if "hex" in mode:
return _histogramHexBin(args[0], args[1], **kwargs)
return Histogram2D(args[0], args[1], **kwargs)
elif len(args) == 1:
if isinstance(args[0], vedo.Volume):
data = args[0].pointdata[0]
elif isinstance(args[0], vedo.Points):
pd0 = args[0].pointdata[0]
if pd0:
data = pd0.ravel()
else:
data = args[0].celldata[0].ravel()
else:
data = np.array(args[0])
if "spher" in mode:
return _histogramSpheric(args[0][:, 0], args[0][:, 1], **kwargs)
if len(data.shape) == 1:
if "polar" in mode:
return _histogramPolar(data, **kwargs)
return Histogram1D(data, **kwargs)
else:
if "hex" in mode:
return _histogramHexBin(args[0][:, 0], args[0][:, 1], **kwargs)
return Histogram2D(args[0], **kwargs)
print("histogram(): Could not understand input", args[0])
return None
def fit(
points,
deg=1,
niter=0,
nstd=3,
xerrors=None,
yerrors=None,
vrange=None,
res=250,
lw=3,
c='red4',
):
"""
Polynomial fitting with parameter error and error bands calculation.
Returns a ``vedo.shapes.Line`` object.
Errors bars in both x and y are supported.
Additional information about the fitting output can be accessed with:
``fit = fitPolynomial(pts)``
- *fit.coefficients* will contain the coefficients of the polynomial fit
- *fit.coefficientErrors*, errors on the fitting coefficients
- *fit.MonteCarloCoefficients*, fitting coefficient set from MC generation
- *fit.covarianceMatrix*, covariance matrix as a numpy array
- *fit.reducedChi2*, reduced chi-square of the fitting
- *fit.ndof*, number of degrees of freedom
- *fit.dataSigma*, mean data dispersion from the central fit assuming Chi2=1
- *fit.errorLines*, a ``vedo.shapes.Line`` object for the upper and lower error band
- *fit.errorBand*, the ``vedo.mesh.Mesh`` object representing the error band
Errors on x and y can be specified. If left to `None` an estimate is made from
the statistical spread of the dataset itself. Errors are always assumed gaussian.
Parameters
----------
deg : int
degree of the polynomial to be fitted
niter : int
number of monte-carlo iterations to compute error bands.
If set to 0, return the simple least-squares fit with naive error estimation
on coefficients only. A reasonable non-zero value to set is about 500, in
this case *errorLines*, *errorBand* and the other class attributes are filled
nstd : float
nr. of standard deviation to use for error calculation
xerrors : list
array of the same length of points with the errors on x
yerrors : list
array of the same length of points with the errors on y
vrange : list
specify the domain range of the fitting line
(only affects visualization, but can be used to extrapolate the fit
outside the data range)
res : int
resolution of the output fitted line and error lines
.. hint:: examples/pyplot/fitPolynomial1.py
.. image:: https://vedo.embl.es/images/pyplot/fitPolynomial1.png
"""
if isinstance(points, vedo.pointcloud.Points):
points = points.points()
points = np.asarray(points)
if len(points) == 2: # assume user is passing [x,y]
points = np.c_[points[0],points[1]]
x = points[:,0]
y = points[:,1] # ignore z
n = len(x)
ndof = n - deg - 1
if vrange is not None:
x0, x1 = vrange
else:
x0, x1 = np.min(x), np.max(x)
if xerrors is not None:
x0 -= xerrors[0]/2
x1 += xerrors[-1]/2
tol = (x1-x0)/10000
xr = np.linspace(x0,x1, res)
# project x errs on y
if xerrors is not None:
xerrors = np.asarray(xerrors)
if yerrors is not None:
yerrors = np.asarray(yerrors)
w = 1.0/yerrors
coeffs = np.polyfit(x, y, deg, w=w, rcond=None)
else:
coeffs = np.polyfit(x, y, deg, rcond=None)
# update yerrors, 1 bootstrap iteration is enough
p1d = np.poly1d(coeffs)
der = (p1d(x+tol)-p1d(x))/tol
yerrors = np.sqrt(yerrors*yerrors + np.power(der*xerrors,2))
if yerrors is not None:
yerrors = np.asarray(yerrors)
w = 1.0/yerrors
coeffs, V = np.polyfit(x, y, deg, w=w, rcond=None, cov=True)
else:
w = 1
coeffs, V = np.polyfit(x, y, deg, rcond=None, cov=True)
p1d = np.poly1d(coeffs)
theor = p1d(xr)
fitl = shapes.Line(xr, theor, lw=lw, c=c).z(tol*2)
fitl.coefficients = coeffs
fitl.covarianceMatrix = V
residuals2_sum = np.sum(np.power(p1d(x)-y, 2))/ndof
sigma = np.sqrt(residuals2_sum)
fitl.reducedChi2 = np.sum(np.power((p1d(x)-y)*w, 2))/ndof
fitl.ndof = ndof
fitl.dataSigma = sigma # worked out from data using chi2=1 hypo
fitl.name = "LinePolynomialFit"
if not niter:
fitl.coefficientErrors = np.sqrt(np.diag(V))
return fitl ################################
if yerrors is not None:
sigma = yerrors
else:
w = None
fitl.reducedChi2 = 1
Theors, all_coeffs = [], []
for i in range(niter):
noise = np.random.randn(n)*sigma
Coeffs = np.polyfit(x, y + noise, deg, w=w, rcond=None)
all_coeffs.append(Coeffs)
P1d = np.poly1d(Coeffs)
Theor = P1d(xr)
Theors.append(Theor)
all_coeffs = np.array(all_coeffs)
fitl.MonteCarloCoefficients = all_coeffs
stds = np.std(Theors, axis=0)
fitl.coefficientErrors = np.std(all_coeffs, axis=0)
# check distributions on the fly
# for i in range(deg+1):
# histogram(all_coeffs[:,i],title='par'+str(i)).show(new=1)
# histogram(all_coeffs[:,0], all_coeffs[:,1],
# xtitle='param0', ytitle='param1',scalarbar=1).show(new=1)
# histogram(all_coeffs[:,1], all_coeffs[:,2],
# xtitle='param1', ytitle='param2').show(new=1)
# histogram(all_coeffs[:,0], all_coeffs[:,2],
# xtitle='param0', ytitle='param2').show(new=1)
error_lines = []
for i in [nstd, -nstd]:
el = shapes.Line(xr, theor+stds*i, lw=1, alpha=0.2, c='k').z(tol)
error_lines.append(el)
el.name = "ErrorLine for sigma="+str(i)
fitl.errorLines = error_lines
l1 = error_lines[0].points().tolist()
cband = l1 + list(reversed(error_lines[1].points().tolist())) + [l1[0]]
fitl.errorBand = shapes.Line(cband).triangulate().lw(0).c('k', 0.15)
fitl.errorBand.name = "PolynomialFitErrorBand"
return fitl
def _plotFxy(
z,
xlim=(0, 3),
ylim=(0, 3),
zlim=(None, None),
showNan=True,
zlevels=10,
c=None,
bc="aqua",
alpha=1,
texture="",
bins=(100, 100),
axes=True,
):
if c is not None:
texture = None # disable
ps = vtk.vtkPlaneSource()
ps.SetResolution(bins[0], bins[1])
ps.SetNormal([0, 0, 1])
ps.Update()
poly = ps.GetOutput()
dx = xlim[1] - xlim[0]
dy = ylim[1] - ylim[0]
todel, nans = [], []
for i in range(poly.GetNumberOfPoints()):
px, py, _ = poly.GetPoint(i)
xv = (px + 0.5) * dx + xlim[0]
yv = (py + 0.5) * dy + ylim[0]
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
zv = z(xv, yv)
if np.isnan(zv) or np.isinf(zv) or np.iscomplex(zv):
zv = 0
todel.append(i)
nans.append([xv, yv, 0])
except:
zv = 0
todel.append(i)
nans.append([xv, yv, 0])
poly.GetPoints().SetPoint(i, [xv, yv, zv])
if len(todel):
cellIds = vtk.vtkIdList()
poly.BuildLinks()
for i in todel:
poly.GetPointCells(i, cellIds)
for j in range(cellIds.GetNumberOfIds()):
poly.DeleteCell(cellIds.GetId(j)) # flag cell
poly.RemoveDeletedCells()
cl = vtk.vtkCleanPolyData()
cl.SetInputData(poly)
cl.Update()
poly = cl.GetOutput()
if not poly.GetNumberOfPoints():
vedo.logger.error("function is not real in the domain")
return None
if zlim[0]:
tmpact1 = Mesh(poly)
a = tmpact1.cutWithPlane((0, 0, zlim[0]), (0, 0, 1))
poly = a.polydata()
if zlim[1]:
tmpact2 = Mesh(poly)
a = tmpact2.cutWithPlane((0, 0, zlim[1]), (0, 0, -1))
poly = a.polydata()
cmap=''
if c in colors.cmaps_names:
cmap = c
c = None
bc= None
mesh = Mesh(poly, c, alpha).computeNormals().lighting("plastic")
if cmap:
mesh.addElevationScalars().cmap(cmap)
if bc:
mesh.bc(bc)
if texture:
mesh.texture(texture)
acts = [mesh]
if zlevels:
elevation = vtk.vtkElevationFilter()
elevation.SetInputData(poly)
bounds = poly.GetBounds()
elevation.SetLowPoint(0, 0, bounds[4])
elevation.SetHighPoint(0, 0, bounds[5])
elevation.Update()
bcf = vtk.vtkBandedPolyDataContourFilter()
bcf.SetInputData(elevation.GetOutput())
bcf.SetScalarModeToValue()
bcf.GenerateContourEdgesOn()
bcf.GenerateValues(zlevels, elevation.GetScalarRange())
bcf.Update()
zpoly = bcf.GetContourEdgesOutput()
zbandsact = Mesh(zpoly, "k", alpha).lw(1).lighting('off')
zbandsact._mapper.SetResolveCoincidentTopologyToPolygonOffset()
acts.append(zbandsact)
if showNan and len(todel):
bb = mesh.GetBounds()
if bb[4] <= 0 and bb[5] >= 0:
zm = 0.0
else:
zm = (bb[4] + bb[5]) / 2
nans = np.array(nans) + [0, 0, zm]
nansact = shapes.Points(nans, r=2, c="red5", alpha=alpha)
nansact.GetProperty().RenderPointsAsSpheresOff()
acts.append(nansact)
if isinstance(axes, dict):
axs = addons.Axes(mesh, **axes)
acts.append(axs)
elif axes:
axs = addons.Axes(mesh)
acts.append(axs)
assem = Assembly(acts)
assem.name = "plotFxy"
return assem
def _plotFz(
z,
x=(-1, 1),
y=(-1, 1),
zlimits=(None, None),
cmap="PiYG",
alpha=1,
lw=0.1,
bins=(75, 75),
axes=True,
):
ps = vtk.vtkPlaneSource()
ps.SetResolution(bins[0], bins[1])
ps.SetNormal([0, 0, 1])
ps.Update()
poly = ps.GetOutput()
dx = x[1] - x[0]
dy = y[1] - y[0]
arrImg = []
for i in range(poly.GetNumberOfPoints()):
px, py, _ = poly.GetPoint(i)
xv = (px + 0.5) * dx + x[0]
yv = (py + 0.5) * dy + y[0]
try:
zv = z(np.complex(xv), np.complex(yv))
except:
zv = 0
poly.GetPoints().SetPoint(i, [xv, yv, np.real(zv)])
arrImg.append(np.imag(zv))
mesh = Mesh(poly, alpha).lighting("plastic")
v = max(abs(np.min(arrImg)), abs(np.max(arrImg)))
mesh.cmap(cmap, arrImg, vmin=-v, vmax=v)
mesh.computeNormals().lw(lw)
if zlimits[0]:
mesh.cutWithPlane((0, 0, zlimits[0]), (0, 0, 1))
if zlimits[1]:
mesh.cutWithPlane((0, 0, zlimits[1]), (0, 0, -1))
acts = [mesh]
if axes:
axs = addons.Axes(mesh, ztitle="Real part")
acts.append(axs)
asse = Assembly(acts)
asse.name = "plotFz"
if isinstance(z, str):
asse.name += " " + z
return asse
def _plotPolar(
rphi,
title="",
tsize=0.1,
lsize=0.05,
r1=0,
r2=1,
c="blue",
bc="k",
alpha=1,
ps=5,
lw=3,
deg=False,
vmax=None,
fill=False,
splined=False,
smooth=0,
showDisc=True,
nrays=8,
showLines=True,
showAngles=True,
):
if len(rphi) == 2:
rphi = np.stack((rphi[0], rphi[1]), axis=1)
rphi = np.array(rphi, dtype=float)
thetas = rphi[:, 0]
radii = rphi[:, 1]
k = 180 / np.pi
if deg:
thetas = np.array(thetas, dtype=float) / k
vals = []
for v in thetas: # normalize range
t = np.arctan2(np.sin(v), np.cos(v))
if t < 0:
t += 2 * np.pi
vals.append(t)
thetas = np.array(vals, dtype=float)
if vmax is None:
vmax = np.max(radii)
angles = []
points = []
for i in range(len(thetas)):
t = thetas[i]
r = (radii[i]) / vmax * r2 + r1
ct, st = np.cos(t), np.sin(t)
points.append([r * ct, r * st, 0])
p0 = points[0]
points.append(p0)
r2e = r1 + r2
lines = None
if splined:
lines = shapes.KSpline(points, closed=True)
lines.c(c).lw(lw).alpha(alpha)
elif lw:
lines = shapes.Line(points)
lines.c(c).lw(lw).alpha(alpha)
points.pop()
ptsact = None
if ps:
ptsact = shapes.Points(points, r=ps, c=c, alpha=alpha)
filling = None
if fill and lw:
faces = []
coords = [[0, 0, 0]] + lines.points().tolist()
for i in range(1, lines.N()):
faces.append([0, i, i + 1])
filling = Mesh([coords, faces]).c(c).alpha(alpha)
back = None
back2 = None
if showDisc:
back = shapes.Disc(r1=r2e, r2=r2e * 1.01, c=bc, res=(1,360))
back.z(-0.01).lighting('off').alpha(alpha)
back2 = shapes.Disc(r1=r2e/2, r2=r2e/2 * 1.005, c=bc, res=(1,360))
back2.z(-0.01).lighting('off').alpha(alpha)
ti = None
if title:
ti = shapes.Text3D(title, (0, 0, 0), s=tsize, depth=0, justify="top-center")
ti.pos(0, -r2e * 1.15, 0.01)
rays = []
if showDisc:
rgap = 0.05
for t in np.linspace(0, 2 * np.pi, num=nrays, endpoint=False):
ct, st = np.cos(t), np.sin(t)
if showLines:
l = shapes.Line((0, 0, -0.01), (r2e * ct * 1.03, r2e * st * 1.03, -0.01))
rays.append(l)
ct2, st2 = np.cos(t+np.pi/nrays), np.sin(t+np.pi/nrays)
lm = shapes.DashedLine((0, 0, -0.01),
(r2e * ct2, r2e * st2, -0.01),
spacing=0.25)
rays.append(lm)
elif showAngles: # just the ticks
l = shapes.Line(
(r2e * ct * 0.98, r2e * st * 0.98, -0.01),
(r2e * ct * 1.03, r2e * st * 1.03, -0.01),
)
if showAngles:
if 0 <= t < np.pi / 2:
ju = "bottom-left"
elif t == np.pi / 2:
ju = "bottom-center"
elif np.pi / 2 < t <= np.pi:
ju = "bottom-right"
elif np.pi < t < np.pi * 3 / 2:
ju = "top-right"
elif t == np.pi * 3 / 2:
ju = "top-center"
else:
ju = "top-left"
a = shapes.Text3D(int(t * k), pos=(0, 0, 0), s=lsize, depth=0, justify=ju)
a.pos(r2e * ct * (1 + rgap), r2e * st * (1 + rgap), -0.01)
angles.append(a)
mrg = merge(back, back2, angles, rays, ti)
if mrg:
mrg.color(bc).alpha(alpha).lighting('off')
rh = Assembly([lines, ptsact, filling] + [mrg])
rh.base = np.array([0, 0, 0], dtype=float)
rh.top = np.array([0, 0, 1], dtype=float)
rh.name = "plotPolar"
return rh
def _plotSpheric(rfunc, normalize=True, res=33, scalarbar=True, c="grey", alpha=0.05, cmap="jet"):
sg = shapes.Sphere(res=res, quads=True)
sg.alpha(alpha).c(c).wireframe()
cgpts = sg.points()
r, theta, phi = utils.cart2spher(*cgpts.T)
newr, inans = [], []
for i in range(len(r)):
try:
ri = rfunc(theta[i], phi[i])
if np.isnan(ri):
inans.append(i)
newr.append(1)
else:
newr.append(ri)
except:
inans.append(i)
newr.append(1)
newr = np.array(newr, dtype=float)
if normalize:
newr = newr / np.max(newr)
newr[inans] = 1
nanpts = []
if len(inans):
redpts = utils.spher2cart(newr[inans], theta[inans], phi[inans])
nanpts.append(shapes.Points(redpts, r=4, c="r"))
pts = utils.spher2cart(newr, theta, phi)
ssurf = sg.clone().points(pts)
if len(inans):
ssurf.deleteCellsByPointIndex(inans)
ssurf.alpha(1).wireframe(0).lw(0.1)
ssurf.cmap(cmap, newr)
ssurf.computeNormals()
if scalarbar:
xm = np.max([np.max(pts[0]), 1])
ym = np.max([np.abs(np.max(pts[1])), 1])
ssurf.mapper().SetScalarRange(np.min(newr), np.max(newr))
sb3d = ssurf.addScalarBar3D(s=(xm * 0.07, ym), c='k').scalarbar
sb3d.rotateX(90).pos(xm * 1.1, 0, -0.5)
else:
sb3d = None
sg.pickable(False)
asse = Assembly([ssurf, sg] + nanpts + [sb3d])
asse.name = "plotSpheric"
return asse
def _histogramHexBin(
xvalues,
yvalues,
xtitle=" ",
ytitle=" ",
ztitle="",
bins=12,
vrange=None,
norm=1,
fill=True,
c=None,
cmap="terrain_r",
alpha=1,
):
xmin, xmax = np.min(xvalues), np.max(xvalues)
ymin, ymax = np.min(yvalues), np.max(yvalues)
dx, dy = xmax - xmin, ymax - ymin
if utils.isSequence(bins):
n,m = bins
else:
if xmax - xmin < ymax - ymin:
n = bins
m = np.rint(dy / dx * n / 1.2 + 0.5).astype(int)
else:
m = bins
n = np.rint(dx / dy * m * 1.2 + 0.5).astype(int)
src = vtk.vtkPointSource()
src.SetNumberOfPoints(len(xvalues))
src.Update()
pointsPolydata = src.GetOutput()
values = np.stack((xvalues, yvalues), axis=1)
zs = [[0.0]] * len(values)
values = np.append(values, zs, axis=1)
pointsPolydata.GetPoints().SetData(utils.numpy2vtk(values, dtype=float))
cloud = Mesh(pointsPolydata)
col = None
if c is not None:
col = colors.getColor(c)
hexs, binmax = [], 0
ki, kj = 1.33, 1.12
r = 0.47 / n * 1.2 * dx
for i in range(n + 3):
for j in range(m + 2):
cyl = vtk.vtkCylinderSource()
cyl.SetResolution(6)
cyl.CappingOn()
cyl.SetRadius(0.5)
cyl.SetHeight(0.1)
cyl.Update()
t = vtk.vtkTransform()
if not i % 2:
p = (i / ki, j / kj, 0)
else:
p = (i / ki, j / kj + 0.45, 0)
q = (p[0] / n * 1.2 * dx + xmin, p[1] / m * dy + ymin, 0)
ids = cloud.closestPoint(q, radius=r, returnCellId=True)
ne = len(ids)
if fill:
t.Translate(p[0], p[1], ne / 2)
t.Scale(1, 1, ne * 10)
else:
t.Translate(p[0], p[1], ne)
t.RotateX(90) # put it along Z
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(cyl.GetOutput())
tf.SetTransform(t)
tf.Update()
if c is None:
col = i
h = Mesh(tf.GetOutput(), c=col, alpha=alpha).flat()
h.lighting('plastic')
h.PickableOff()
hexs.append(h)
if ne > binmax:
binmax = ne
if cmap is not None:
for h in hexs:
z = h.GetBounds()[5]
col = colors.colorMap(z, cmap, 0, binmax)
h.color(col)
asse = Assembly(hexs)
asse.SetScale(1.2 / n * dx, 1 / m * dy, norm / binmax * (dx + dy) / 4)
asse.SetPosition(xmin, ymin, 0)
asse.base = np.array([0, 0, 0], dtype=float)
asse.top = np.array([0, 0, 1], dtype=float)
asse.name = "histogramHexBin"
return asse
def _histogramPolar(
values,
weights=None,
title="",
tsize=0.1,
bins=16,
r1=0.25,
r2=1,
phigap=0.5,
rgap=0.05,
lpos=1,
lsize=0.04,
c='grey',
bc="k",
alpha=1,
cmap=None,
deg=False,
vmin=None,
vmax=None,
labels=(),
showDisc=True,
nrays=8,
showLines=True,
showAngles=True,
showErrors=False,
):
k = 180 / np.pi
if deg:
values = np.array(values, dtype=float) / k
else:
values = np.array(values, dtype=float)
vals = []
for v in values: # normalize range
t = np.arctan2(np.sin(v), np.cos(v))
if t < 0:
t += 2 * np.pi
vals.append(t+0.00001)
histodata, edges = np.histogram(vals, weights=weights,
bins=bins, range=(0, 2*np.pi))
thetas = []
for i in range(bins):
thetas.append((edges[i] + edges[i + 1]) / 2)
if vmin is None:
vmin = np.min(histodata)
if vmax is None:
vmax = np.max(histodata)
errors = np.sqrt(histodata)
r2e = r1 + r2
if showErrors:
r2e += np.max(errors) / vmax * 1.5
back = None
if showDisc:
back = shapes.Disc(r1=r2e, r2=r2e * 1.01, c=bc, res=(1,360))
back.z(-0.01)
slices = []
lines = []
angles = []
errbars = []
for i, t in enumerate(thetas):
r = histodata[i] / vmax * r2
d = shapes.Disc((0, 0, 0), r1, r1+r, res=(1,360))
delta = np.pi/bins - np.pi/2 - phigap/k
d.cutWithPlane(normal=(np.cos(t + delta), np.sin(t + delta), 0))
d.cutWithPlane(normal=(np.cos(t - delta), np.sin(t - delta), 0))
if cmap is not None:
cslice = colors.colorMap(histodata[i], cmap, vmin, vmax)
d.color(cslice)
else:
if c is None:
d.color(i)
elif utils.isSequence(c) and len(c) == bins:
d.color(c[i])
else:
d.color(c)
d.alpha(alpha).lighting('off')
slices.append(d)
ct, st = np.cos(t), np.sin(t)
if showErrors:
showLines = False
err = np.sqrt(histodata[i]) / vmax * r2
errl = shapes.Line(
((r1 + r - err) * ct, (r1 + r - err) * st, 0.01),
((r1 + r + err) * ct, (r1 + r + err) * st, 0.01),
)
errl.alpha(alpha).lw(3).color(bc)
errbars.append(errl)
labs=[]
rays = []
if showDisc:
outerdisc = shapes.Disc(r1=r2e, r2=r2e * 1.01, c=bc, res=(1,360))
outerdisc.z(-0.01)
innerdisc = shapes.Disc(r1=r2e/2, r2=r2e/2 * 1.005, c=bc, res=(1, 360))
innerdisc.z(-0.01)
rays.append(outerdisc)
rays.append(innerdisc)
rgap = 0.05
for t in np.linspace(0, 2 * np.pi, num=nrays, endpoint=False):
ct, st = np.cos(t), np.sin(t)
if showLines:
l = shapes.Line((0, 0, -0.01), (r2e * ct * 1.03, r2e * st * 1.03, -0.01))
rays.append(l)
ct2, st2 = np.cos(t+np.pi/nrays), np.sin(t+np.pi/nrays)
lm = shapes.DashedLine((0, 0, -0.01),
(r2e * ct2, r2e * st2, -0.01),
spacing=0.25)
rays.append(lm)
elif showAngles: # just the ticks
l = shapes.Line(
(r2e * ct * 0.98, r2e * st * 0.98, -0.01),
(r2e * ct * 1.03, r2e * st * 1.03, -0.01),
)
if showAngles:
if 0 <= t < np.pi / 2:
ju = "bottom-left"
elif t == np.pi / 2:
ju = "bottom-center"
elif np.pi / 2 < t <= np.pi:
ju = "bottom-right"
elif np.pi < t < np.pi * 3 / 2:
ju = "top-right"
elif t == np.pi * 3 / 2:
ju = "top-center"
else:
ju = "top-left"
a = shapes.Text3D(int(t * k), pos=(0, 0, 0), s=lsize, depth=0, justify=ju)
a.pos(r2e * ct * (1 + rgap), r2e * st * (1 + rgap), -0.01)
angles.append(a)
ti = None
if title:
ti = shapes.Text3D(title, (0, 0, 0), s=tsize, depth=0, justify="top-center")
ti.pos(0, -r2e * 1.15, 0.01)
for i,t in enumerate(thetas):
if i < len(labels):
lab = shapes.Text3D(labels[i], (0, 0, 0), #font="VTK",
s=lsize, depth=0, justify="center")
lab.pos(r2e *np.cos(t) * (1 + rgap) * lpos / 2,
r2e *np.sin(t) * (1 + rgap) * lpos / 2, 0.01)
labs.append(lab)
mrg = merge(lines, angles, rays, ti, labs)
if mrg:
mrg.color(bc).lighting('off')
acts = slices + errbars + [mrg]
asse = Assembly(acts)
asse.frequencies = histodata
asse.bins = edges
asse.name = "histogramPolar"
return asse
def _histogramSpheric(
thetavalues, phivalues, rmax=1.2, res=8, cmap="rainbow", lw=0.1, scalarbar=True,
):
x, y, z = utils.spher2cart(np.ones_like(thetavalues) * 1.1, thetavalues, phivalues)
ptsvals = np.c_[x, y, z]
sg = shapes.Sphere(res=res, quads=True).shrink(0.999).computeNormals().lw(0.1)
sgfaces = sg.faces()
sgpts = sg.points()
# sgpts = np.vstack((sgpts, [0,0,0]))
# idx = sgpts.shape[0]-1
# newfaces = []
# for fc in sgfaces:
# f1,f2,f3,f4 = fc
# newfaces.append([idx,f1,f2, idx])
# newfaces.append([idx,f2,f3, idx])
# newfaces.append([idx,f3,f4, idx])
# newfaces.append([idx,f4,f1, idx])
newsg = sg # Mesh((sgpts, sgfaces)).computeNormals().phong()
newsgpts = newsg.points()
cntrs = sg.cellCenters()
counts = np.zeros(len(cntrs))
for p in ptsvals:
cell = sg.closestPoint(p, returnCellId=True)
counts[cell] += 1
acounts = np.array(counts, dtype=float)
counts *= (rmax - 1) / np.max(counts)
for cell, cn in enumerate(counts):
if not cn:
continue
fs = sgfaces[cell]
pts = sgpts[fs]
_, t1, p1 = utils.cart2spher(pts[:, 0], pts[:, 1], pts[:, 2])
x, y, z = utils.spher2cart(1 + cn, t1, p1)
newsgpts[fs] = np.c_[x, y, z]
newsg.points(newsgpts)
newsg.cmap(cmap, acounts, on='cells')
if scalarbar:
newsg.addScalarBar()
newsg.name = "histogramSpheric"
return newsg
def donut(
fractions,
title="",
tsize=0.3,
r1=1.7,
r2=1,
phigap=0,
lpos=0.8,
lsize=0.15,
c=None,
bc="k",
alpha=1,
labels=(),
showDisc=False,
):
"""
Donut plot or pie chart.
Parameters
----------
title : str
plot title
tsize : float
title size
r1 : float inner radius
r2 : float
outer radius, starting from r1
phigap : float
gap angle btw 2 radial bars, in degrees
lpos : float
label gap factor along radius
lsize : float
label size
c : color
color of the plot slices
bc : color
color of the disc frame
alpha : float
opacity of the disc frame
labels : list
list of labels
showDisc : bool
show the outer ring axis
.. hist:: examples/pyplot/donut.py
.. image:: https://vedo.embl.es/images/pyplot/donut.png
"""
fractions = np.array(fractions, dtype=float)
angles = np.add.accumulate(2 * np.pi * fractions)
angles[-1] = 2 * np.pi
if angles[-2] > 2 * np.pi:
print("Error in donut(): fractions must sum to 1.")
raise RuntimeError
cols = []
for i, th in enumerate(np.linspace(0, 2 * np.pi, 360, endpoint=False)):
for ia, a in enumerate(angles):
if th < a:
cols.append(c[ia])
break
labs = ()
if len(labels):
angles = np.concatenate([[0], angles])
labs = [""] * 360
for i in range(len(labels)):
a = (angles[i + 1] + angles[i]) / 2
j = int(a / np.pi * 180)
labs[j] = labels[i]
data = np.linspace(0, 2 * np.pi, 360, endpoint=False) + 0.005
dn = _histogramPolar(
data,
title=title,
bins=360,
r1=r1,
r2=r2,
phigap=phigap,
lpos=lpos,
lsize=lsize,
tsize=tsize,
c=cols,
bc=bc,
alpha=alpha,
vmin=0,
vmax=1,
labels=labs,
showDisc=showDisc,
showLines=0,
showAngles=0,
showErrors=0,
)
dn.name = "donut"
return dn
def violin(
values,
bins=10,
vlim=None,
x=0,
width=3,
splined=True,
fill=True,
c="violet",
alpha=1,
outline=True,
centerline=True,
lc="darkorchid",
lw=3,
):
"""
Violin style histogram.
Parameters
----------
bins : int
number of bins
vlim : list
input value limits. Crop values outside range
x : float
x-position of the violin axis
width : float
width factor of the normalized distribution
splined : bool
spline the outline
fill : bool
fill violin with solid color
outline : bool
add the distribution outline
centerline : bool
add the vertical centerline at x
lc : color
line color
.. hint:: examples/pyplot/histo_violin.py
.. image:: https://vedo.embl.es/images/pyplot/histo_violin.png
"""
fs, edges = np.histogram(values, bins=bins, range=vlim)
mine, maxe = np.min(edges), np.max(edges)
fs = fs.astype(float) / len(values) * width
rs = []
if splined:
lnl, lnr = [(0, edges[0], 0)], [(0, edges[0], 0)]
for i in range(bins):
xc = (edges[i] + edges[i + 1]) / 2
yc = fs[i]
lnl.append([-yc, xc, 0])
lnr.append([yc, xc, 0])
lnl.append((0, edges[-1], 0))
lnr.append((0, edges[-1], 0))
spl = shapes.KSpline(lnl).x(x)
spr = shapes.KSpline(lnr).x(x)
spl.color(lc).alpha(alpha).lw(lw)
spr.color(lc).alpha(alpha).lw(lw)
if outline:
rs.append(spl)
rs.append(spr)
if fill:
rb = shapes.Ribbon(spl, spr, c=c, alpha=alpha).lighting('off')
rs.append(rb)
else:
lns1 = [[0, mine, 0]]
for i in range(bins):
lns1.append([fs[i], edges[i], 0])
lns1.append([fs[i], edges[i + 1], 0])
lns1.append([0, maxe, 0])
lns2 = [[0, mine, 0]]
for i in range(bins):
lns2.append([-fs[i], edges[i], 0])
lns2.append([-fs[i], edges[i + 1], 0])
lns2.append([0, maxe, 0])
if outline:
rs.append(shapes.Line(lns1, c=lc, alpha=alpha, lw=lw).x(x))
rs.append(shapes.Line(lns2, c=lc, alpha=alpha, lw=lw).x(x))
if fill:
for i in range(bins):
p0 = (-fs[i], edges[i], 0)
p1 = (fs[i], edges[i + 1], 0)
r = shapes.Rectangle(p0, p1).x(p0[0] + x)
r.color(c).alpha(alpha).lighting('off')
rs.append(r)
if centerline:
cl = shapes.Line([0, mine, 0.01], [0, maxe, 0.01], c=lc, alpha=alpha, lw=2).x(x)
rs.append(cl)
asse = Assembly(rs)
asse.name = "violin"
return asse
def whisker(
data,
s=0.25,
c='k',
lw=2,
bc='blue',
alpha=0.25,
r=5,
jitter=True,
horizontal=False,
):
"""
Generate a "whisker" bar from a 1-dimensional dataset.
Parameters
----------
s : float
size of the box
c : color
color of the lines
lw : float
line width
bc : color
color of the box
alpha : float
transparency of the box
r : float
point radius in pixels (use value 0 to disable)
jitter : bool
add some randomness to points to avoid overlap
horizontal : bool
set horizontal layout
.. hint:: examples/pyplot/whiskers.py
.. image:: https://vedo.embl.es/images/pyplot/whiskers.png
"""
xvals = np.zeros_like(np.array(data))
if jitter:
xjit = np.random.randn(len(xvals))*s/9
xjit = np.clip(xjit, -s/2.1, s/2.1)
xvals += xjit
dmean = np.mean(data)
dq05 = np.quantile(data, 0.05)
dq25 = np.quantile(data, 0.25)
dq75 = np.quantile(data, 0.75)
dq95 = np.quantile(data, 0.95)
pts = None
if r: pts = shapes.Points([xvals, data], c=c, r=r)
rec = shapes.Rectangle([-s/2, dq25],[s/2, dq75], c=bc, alpha=alpha)
rec.GetProperty().LightingOff()
rl = shapes.Line([[-s/2, dq25],[s/2, dq25],[s/2, dq75],[-s/2, dq75]], closed=True)
l1 = shapes.Line([0,dq05,0], [0,dq25,0], c=c, lw=lw)
l2 = shapes.Line([0,dq75,0], [0,dq95,0], c=c, lw=lw)
lm = shapes.Line([-s/2, dmean], [s/2, dmean])
lns = merge(l1, l2, lm, rl)
asse = Assembly([lns, rec, pts])
if horizontal:
asse.rotateZ(-90)
asse.name = "Whisker"
asse.info['mean'] = dmean
asse.info['quantile_05'] = dq05
asse.info['quantile_25'] = dq25
asse.info['quantile_75'] = dq75
asse.info['quantile_95'] = dq95
return asse
def streamplot(X, Y, U, V, direction="both",
maxPropagation=None, mode=1, lw=0.001, c=None, probes=()):
"""
Generate a streamline plot of a vectorial field (U,V) defined at positions (X,Y).
Returns a ``Mesh`` object.
Parameters
----------
direction : str
either "forward", "backward" or "both"
maxPropagation : float
maximum physical length of the streamline
lw : float
line width in absolute units
mode : int
mode of varying the line width
- 0 - do not vary line width
- 1 - vary line width by first vector component
- 2 - vary line width vector magnitude
- 3 - vary line width by absolute value of first vector component
.. hint:: examples/pyplot/plot_stream.py
.. image:: https://vedo.embl.es/images/pyplot/plot_stream.png
"""
n = len(X)
m = len(Y[0])
if n != m:
print("Limitation in streamplot(): only square grids are allowed.", n, m)
raise RuntimeError()
xmin, xmax = X[0][0], X[-1][-1]
ymin, ymax = Y[0][0], Y[-1][-1]
field = np.sqrt(U * U + V * V)
vol = vedo.Volume(field, dims=(n, n, 1))
uf = np.ravel(U, order="F")
vf = np.ravel(V, order="F")
vects = np.c_[uf, vf, np.zeros_like(uf)]
vol.addPointArray(vects, "vects")
if len(probes) == 0:
probe = shapes.Grid(pos=((n-1)/2,(n-1)/2,0), s=(n-1, n-1), res=(n-1,n-1))
else:
if isinstance(probes, vedo.Points):
probes = probes.points()
else:
probes = | np.array(probes, dtype=float) | numpy.array |
from numba import njit
import numpy as np
@njit(nogil = True)
def returns(arrIn):
arrOut = np.array([np.nan for _ in range(len(arrIn))])
for i in range(1, len(arrIn)):
arrOut[i] = (arrIn[i] - arrIn[i-1])/arrIn[i-1]
return arrOut
@njit(nogil = True)
def logReturns(arrIn):
arrOut = np.array([np.nan for _ in range(len(arrIn))])
for i in range(1, len(arrIn)):
arrOut[i] = np.log(arrIn[i]/arrIn[i-1])
return arrOut
@njit(nogil = True)
def MA(arrIn, window = 60):
arrOut = np.array([np.nan for _ in range(len(arrIn))])
for i in range(window, len(arrIn)):
arrOut[i] = np.mean(arrIn[i-window:i])
return arrOut
@njit(nogil = True)
def EMA(arrIn, window = 60):
arrOut = np.array([np.nan for _ in range(len(arrIn))])
arrOut[0] = arrIn[0]
k = 2./(window + 1.)
for i in range(1, len(arrIn)):
arrOut[i] = arrIn[i]*k + arrOut[i-1]*(1-k)
return arrOut
@njit(nogil = True)
def MACD(arrIn, window1 = 12, window2 = 26):
ema1 = EMA(arrIn, window = window1)
ema2 = EMA(arrIn, window = window2)
return ema1 - ema2
@njit(nogil = True)
def DEA(arrIn, window1 = 12, window2 = 26, window3 = 9):
macd = MACD(arrIn, window1 = window1, window2 = window2)
return EMA(macd, window = window3)
@njit(nogil = True)
def OSC(arrIn, window1 = 12, window2 = 26, window3 = 9):
macd = MACD(arrIn, window1 = window1, window2 = window2)
dea = DEA(arrIn, window1 = window1, window2 = window2, window3 = window3)
return macd - dea
@njit(nogil = True)
def RSI(arrIn, window = 14):
up = np.array([np.nan for _ in range(len(arrIn))])
down = np.array([np.nan for _ in range(len(arrIn))])
up[0] = 0
down[0] = 0
for i in range(1, len(arrIn)):
delta = arrIn[i] - arrIn[i-1]
if delta > 0:
up[i] = delta
down[i] = 0
else:
up[i] = 0
down[i] = -delta
ema_up = EMA(up, window = window)
ema_down = EMA(down, window = window)
rs = ema_up/ema_down
return 100. - 100./(1.+rs)
@njit(nogil = True)
def OBV(arrInClose, arrInVol):
arrOut = np.array([np.nan for _ in range(len(arrInClose))])
arrOut[0] = arrInVol[0]
for i in range(1, len(arrInClose)):
arrOut[i] = arrOut[i-1] + np.sign(arrInClose[i] - arrInClose[i-1])*arrInVol[i]
return arrOut
@njit(nogil = True)
def nPeriodLow(arrIn, window = 7):
arrOut = np.array([np.nan for _ in range(len(arrIn))])
for i in range(len(arrIn)):
if i == 0:
arrOut[i] = arrIn[0]
elif i - window <= 0:
arrOut[i] = np.min(arrIn[:i])
else:
arrOut[i] = | np.min(arrIn[i-window:i]) | numpy.min |
import time
import numpy as np
import vtk
from vtk.util import numpy_support
from svtk.lib.toolbox.integer import minmax
from svtk.lib.toolbox.idarray import IdArray
from svtk.lib.toolbox.numpy_helpers import normalize
import math as m
class VTKAnimationTimerCallback(object):
"""This class is called every few milliseconds by VTK based on the set frame rate. This allows for animation.
I've added several modification functions, such as adding and deleting lines/points, changing colors, etc."""
__slots__ = ["points", "point_colors", "timer_count", "points_poly",
"lines", "lines_poly", "line_colors", "line_id_array"
"last_velocity_update", "unused_locations",
"last_color_velocity_update", "renderer", "last_bg_color_velocity_update",
"last_velocity_update", "_loop_time", "remaining_lerp_fade_time", "lerp_multiplier",
"line_id_array", "point_id_array", "point_vertices", "interactor_style", "renderer",
"interactive_renderer", "_started"
]
def __init__(self):
self.timer_count = 0
self.last_velocity_update = time.clock()
self.last_color_velocity_update = time.clock()
self.last_bg_color_velocity_update = time.clock()
self._loop_time = time.clock()
self.unused_locations = []
self.remaining_lerp_fade_time = 0
self.lerp_multiplier = 1
self.line_id_array = IdArray()
self.point_id_array = IdArray()
self._started=False
def add_lines(self, lines, line_colors):
"""
Adds multiple lines between any sets of points.
Args:
lines (list, tuple, np.ndarray, np.generic):
An array in the format of [2, point_a, point_b, 2, point_c, point_d, ...]. The two is needed for VTK's
lines.
line_colors (list, tuple, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
lines.
Returns:
list: An array containing the memory locations of each of the newly inserted lines.
"""
assert (isinstance(lines, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(line_colors, (list, tuple, np.ndarray, np.generic)))
np_line_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_line_color_data = numpy_support.vtk_to_numpy(self.line_colors)
#todo: add lines in unused locations if possible
mem_locations = range(int(len(np_line_data) / 3), int((len(np_line_data) + len(lines)) / 3))
np_line_data = np.append(np_line_data, lines)
if len(np_line_color_data) > 0:
np_line_color_data = np.append(np_line_color_data, line_colors, axis=0)
else:
np_line_color_data = line_colors
vtk_line_data = numpy_support.numpy_to_vtkIdTypeArray(np_line_data, deep=True)
self.lines.SetCells(int(len(np_line_data) / 3), vtk_line_data)
vtk_line_color_data = numpy_support.numpy_to_vtk(num_array=np_line_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_line_color_data)
self.lines_poly.Modified()
self.line_id_array.add_ids(mem_locations)
return mem_locations
def del_all_lines(self):
"""
Deletes all lines.
"""
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np.array([], dtype=np.int64), deep=True)
self.lines.SetCells(0, vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np.array([]), deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_lines(self, line_indices):
#todo: change idarray to use tuples of (start,end) locations and set this to delete those partitions
"""
Delete specific lines.
Args:
line_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing line memory locations(s) to delete.
"""
np_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_color_data = numpy_support.vtk_to_numpy(self.line_colors)
if isinstance(line_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
np_new_data = []
np_new_color_data = []
for i in range(len(line_indices)):
loc = self.line_id_array.pop_id(line_indices[i])
if loc==None:
#todo: put warning here
continue
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
else:
np_new_data = np_data[(last_loc + 1) * 3:loc * 3]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
np_new_color_data = np_color_data[(last_loc + 1):loc]
last_loc = loc
last_loc = loc
loc = len(np_data) / 3
np_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
np_data = np_data.astype(np.int64)
np_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_data, deep=True)
self.lines.SetCells(int(len(np_data) / 3), vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_points(self, point_indices):
"""
Delete specific points.
Args:
point_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing point memory locations(s) to delete.
"""
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())#1,1,1,2,1,3,1,4,1,5,1,6...
print(len(np_vert_data), len(np_point_data), len(np_point_color_data))
if isinstance(point_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
subtractor = 0
np_new_data = []
np_new_color_data = []
np_new_verts = []
for i in range(len(point_indices)):
loc = self.point_id_array.pop_id(point_indices[i])
if loc == None:
# todo: put warning here
continue
subtractor+=1
#I could just remove the end of the array, but this keeps the lines attached to the same points
if len(np_new_verts) >0:
np_new_verts = np.append(np_new_verts, np_vert_data[(last_loc+1)*2:loc*2], axis = 0)
else:
np_new_verts = np_vert_data[(last_loc+1)*2: loc*2]
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
else:
np_new_data = np_point_data[(last_loc + 1):loc]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1)*3:loc*3], axis=0)
else:
np_new_color_data = np_point_color_data[(last_loc + 1):loc]
last_loc = loc
if loc == None:
return
last_loc = loc
loc = len(np_point_data)
np_point_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
np_point_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1):loc], axis=0)
np_vert_data = np.append(np_new_verts, np_vert_data[(last_loc + 1)*2:loc*2], axis = 0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtk(np_point_data, deep=True)
self.points.SetData(vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data, deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
self.lines_poly.Modified()
def add_points(self, points, point_colors):
"""
Adds points in 3d space.
Args:
points (tuple, list, np.ndarray, np.generic):
An array in the format of [[x1,y1,z1], [x2,y2,x2], ..., [xn,yn,zn]]
point_colors (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added.
Returns:
"""
assert (isinstance(points, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(point_colors, (list, tuple, np.ndarray, np.generic)))
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())
print(np_vert_data)
for i in range(len(points)):
#todo: modify pointer_id_array to set free pointers to deleted data, not deleted data locations
if len(self.point_id_array.free_pointers)>0:
np_vert_data = np.append(np_vert_data, [1,self.point_id_array.free_pointers.pop()])
else:
np_vert_data = np.append(np_vert_data,[1, len(np_vert_data)/2])
mem_locations = range(int(len(np_point_data)), int((len(np_point_data) + len(points))))
if len(np_point_data) > 0:
np_point_data = np.append(np_point_data, points, axis=0)
else:
np_point_data = points
if len(point_colors) ==1:
points = np.array(points)
point_colors = np.tile(point_colors, (points.shape[0], 1))
if len(np_point_color_data) > 0:
np_point_color_data = np.append(np_point_color_data, point_colors, axis=0)
else:
np_point_color_data = point_colors
vtk_point_data = numpy_support.numpy_to_vtk(num_array=np_point_data, deep=True, array_type=vtk.VTK_FLOAT)
self.points.SetData(vtk_point_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data.astype(np.int64), deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
vtk_point_color_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_point_color_data)
self.points_poly.Modified()
self.point_id_array.add_ids(mem_locations)
#print(self.point_id_array)
return mem_locations
def add_point_field(self, widths, normal, center, color):
"""
Adds a rectangular field of points.
Args:
widths (tuple, list, np.ndarray, np.generic): an array defining the widths of each dimension of the field.
normal (tuple, list, np.ndarray, np.generic): an array defining the normal to the field. Specifies angle.
center (tuple, list, np.ndarray, np.generic): an array defining the central position of the field.
color (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added, or a single color in the form of [[r1, g1, b1]].
Returns:
A list of integers representing the memory locations where the points were added.
"""
true_normal = normalize(normal)
if not np.allclose(true_normal, [1, 0, 0]):
zn = np.cross(true_normal, [1, 0, 0])
xn = np.cross(true_normal, zn)
else:
xn = [1, 0, 0]
zn = [0, 0, 1]
point_field = np.array([])
#todo: replace for loops with numpy or gpu ops
for z in range(-int(m.floor(widths[2] / 2.0)), int(m.ceil(widths[2] / 2.0))):
for y in range(-int(m.floor(widths[1] / 2.0)), int(m.ceil(widths[1] / 2.0))):
for x in range(-int(m.floor(widths[0] / 2.0)), int(m.ceil(widths[0] / 2.0))):
vector_space_matrix = np.column_stack(
(np.transpose(xn), np.transpose(true_normal), np.transpose(zn)))
translation = np.matmul([x, y, z], vector_space_matrix)
point_location = [center[0], center[1], center[2]] + translation
point_location = [point_location]
if len(point_field)>0:
point_field = np.append(point_field, point_location, axis = 0)
else:
point_field = point_location
return self.add_points(point_field, color) #returns ids
def set_bg_color(self, color):
"""
Sets the background color of the viewport.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
r, g, b = color[0]
r,g,b = (r/255.,g/255.,b/255.)
self.renderer.SetBackground((minmax(r, 0, 1), minmax(g, 0, 1), minmax(b, 0, 1)))
self.renderer.Modified()
def set_all_point_colors(self, color):
"""
Sets the color of every point.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data = np.tile(color, (np_color_data.shape[0], 1))
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
def set_point_colors(self, colors, point_indices=None):
if point_indices is None:
if isinstance(colors, (list, tuple, np.ndarray, np.generic)):
vtk_data = numpy_support.numpy_to_vtk(num_array=colors, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data[point_indices] = colors
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
# self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
def setup_lerp_all_point_colors(self, color, fade_time):
"""
Sets all points to the same color, but uses lerping to slowly change the colors.
Args:
color ():
fade_time ():
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
self.next_colors = np.tile(color, (np_color_data.shape[0], 1))
self.prev_colors = numpy_support.vtk_to_numpy(self.point_colors)
self.lerp_fade_time = fade_time
self.remaining_lerp_fade_time = fade_time
def lerp_point_colors(self, colors, fade_time, point_indices=None):
"""
Sets colors for specific points, but uses lerping to slowly change those colors.
Args:
colors ():
fade_time ():
point_indices ():
"""
if isinstance(self.next_colors, (np.ndarray, np.generic)):
if isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
self.next_colors[point_indices] = colors
else:
self.next_colors = colors
self.next_color_indices = None
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)) or isinstance(colors, (list, tuple)):
if self.lerp_fade_time > 0:
self.next_colors = np.append(self.next_colors, colors)
if point_indices is not None:
self.next_color_indices = | np.append(self.next_color_indices, point_indices) | numpy.append |
from pydpm._sampler import Basic_Sampler
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
from collections import Counter
def debug_sampler_and_plot():
sampler = Basic_Sampler('gpu')
# gamma
output = sampler.gamma(np.ones(1000)*4.5, 5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 100, 100), stats.gamma.pdf(np.linspace(0, 100, 100), 4.5, scale=5))
plt.title('gamma(4.5, 5)')
plt.show()
# standard_gamma
output = sampler.standard_gamma(np.ones(1000)*4.5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 20, 100), stats.gamma.pdf(np.linspace(0, 20, 100), 4.5))
plt.title('standard_gamma(4.5)')
plt.show()
# dirichlet
output = sampler.dirichlet(np.ones(1000)*4.5)
plt.figure()
plt.hist(output, bins=20, density=True)
# x = np.linspace(np.min(output), np.max(output), 100)
# plt.plot(x, stats.dirichlet.pdf(x, alpha=np.ones(100)*4.5))
plt.title('dirichlet(4.5)')
plt.show()
# beta
output = sampler.beta(np.ones(1000)*0.5, 0.5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 1, 100), stats.beta.pdf(np.linspace(0, 1, 100), 0.5, 0.5))
plt.title('beta(0.5, 0.5)')
plt.show()
# beta(2, 5)
output = sampler.beta(np.ones(1000)*2, 5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 1, 100), stats.beta.pdf(np.linspace(0, 1, 100), 2, 5))
plt.title('beta(2, 5)')
plt.show()
# normal
output = sampler.normal(np.ones(1000)*5, np.ones(1000)*2)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-2, 13, 100), stats.norm.pdf(np.linspace(-2, 13, 100), 5, scale=2))
plt.title('normal(5, 2)')
plt.show()
# standard_normal
output = sampler.standard_normal(1000)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-3, 3, 100), stats.norm.pdf(np.linspace(-3, 3, 100)))
plt.title('standard_normal()')
plt.show()
# uniform
output = sampler.uniform(np.ones(1000)*(-2), np.ones(1000)*5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-3, 6, 100), stats.uniform.pdf(np.linspace(-3, 6, 100), -2, 7))
plt.title('uniform(-2, 5)')
plt.show()
# standard_uniform
output = sampler.standard_uniform(1000)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-0.3, 1.3, 100), stats.uniform.pdf(np.linspace(-0.3, 1.3, 100)))
plt.title('standard_uniform()')
plt.show()
# binomial
output = sampler.binomial(np.ones(1000)*10, np.ones(1000)*0.5)
plt.figure()
plt.hist(output, bins= | np.max(output) | numpy.max |
import numpy as np
import yt
from galaxy_analysis import Galaxy
import matplotlib.pyplot as plt
from galaxy_analysis.plot.plot_styles import *
import deepdish as dd
import glob
import os
from galaxy_analysis.utilities import utilities
wdir = "./"
#
# Define region
#
def compute_mass_outflow(ds, data):
dL = 200.0 * yt.units.pc
above_region = data.cut_region(["(obj['cylindrical_z'].in_units('kpc') > 0.9) & (obj['cylindrical_z'].in_units('kpc') < 1.1) & (obj['cylindrical_radius'].in_units('kpc') < 2)"])
above_hot_region = above_region.cut_region(["obj['temperature'].in_units('K') > 3.0E5"])
above_cold_region = above_region.cut_region(["obj['temperature'].in_units('K') < 3.0E5"])
above_colder_region = above_region.cut_region(["obj['temperature'].in_units('K') < 1.0E4"])
below_region = data.cut_region(["(obj['cylindrical_z'].in_units('kpc') < -0.9) & np.abs(obj['cylindrical_z'].in_units('kpc') > -1.1) & (obj['cylindrical_radius'].in_units('kpc') < 2)"])
below_hot_region = below_region.cut_region(["obj['temperature'].in_units('K') > 3.0E5"])
below_cold_region = below_region.cut_region(["obj['temperature'].in_units('K') < 3.0E5"])
below_colder_region = below_region.cut_region(["obj['temperature'].in_units('K') < 1.0E4"])
i = 0
total_vals = np.zeros(3)
metal_vals = np.zeros(3)
for above_reg, below_reg in [(above_region, below_region),
(above_hot_region,below_hot_region),
(above_cold_region,below_cold_region)]:
# (above_colder_region,below_colder_region)]:
M = above_reg['cell_mass'].to('Msun')
O = above_reg['O_Density'].value / above_reg['Density'].value * M
v = above_reg['z-velocity'].to('km/s')
select = v > 0
total_outflow_rate = np.sum(M[select]*v[select]/dL).to('Msun/yr')
metal_outflow_rate = np.sum(O[select]*v[select]/dL).to('Msun/yr')
M = below_reg['cell_mass'].to('Msun')
O = below_reg['O_Density'].value / below_reg['Density'].value * M
v = below_reg['z-velocity'].to('km/s')
select = v < 0
total_outflow_rate += np.sum(-M[select]*v[select]/dL).to('Msun/yr')
metal_outflow_rate += np.sum(-O[select]*v[select]/dL).to('Msun/yr')
total_vals[i] = total_outflow_rate
metal_vals[i] = metal_outflow_rate
i = i + 1
# now I need to compute the SFR and metal production rate
return total_vals[0], total_vals[1], total_vals[2], metal_vals[0], metal_vals[1], metal_vals[2]
def compute_energy_outflow(ds, data):
birth_mass = data['birth_mass']
birth_time = data['creation_time'].to('Myr')
pt = data['particle_type']
t = ds.current_time.to('Myr')
select = ((birth_mass > 8.0) * (birth_mass < 25.0)) * (pt > 11) * ((t - birth_time ) < 40*yt.units.Myr)
N_SN = np.size(birth_mass[select])
dL = 200 * yt.units.pc
region = None
above_region = data.cut_region(["(obj['cylindrical_z'].in_units('kpc') > 0.9) & (obj['cylindrical_z'].in_units('kpc') < 1.1) & (obj['cylindrical_radius'].in_units('kpc') < 2)"])
above_hot_region = above_region.cut_region(["obj['temperature'].in_units('K') > 3.0E5"])
above_cold_region = above_region.cut_region(["obj['temperature'].in_units('K') < 3.0E5"])
above_colder_region = above_region.cut_region(["obj['temperature'].in_units('K') < 1.0E4"])
below_region = data.cut_region(["(obj['cylindrical_z'].in_units('kpc') < -0.9) & np.abs(obj['cylindrical_z'].in_units('kpc') > -1.1) & (obj['cylindrical_radius'].in_units('kpc') < 2)"])
below_hot_region = below_region.cut_region(["obj['temperature'].in_units('K') > 3.0E5"])
below_cold_region = below_region.cut_region(["obj['temperature'].in_units('K') < 3.0E5"])
below_colder_region = below_region.cut_region(["obj['temperature'].in_units('K') < 1.0E4"])
vals = np.zeros(4)
if N_SN == 0:
print("No Supernova this time step")
return vals
i = 0
for above_reg, below_reg in [(above_region, below_region),
(above_hot_region,below_hot_region),
(above_cold_region,below_cold_region),
(above_colder_region,below_colder_region)]:
M = above_reg['cell_mass'].to('Msun')
v = above_reg['z-velocity'].to('km/s')
cV = above_reg['cell_volume'].to('cm**(3)')
KE = 0.5*M*above_reg['velocity_magnitude']**2
select = v > 0
above_E_out_therm = (v / dL * above_reg['thermal_energy'].to('erg/g') * M).to('erg/s')[select]
above_E_out_kin = (v / dL * KE).to('erg/s')[select]
M = below_reg['cell_mass'].to('Msun')
v = below_reg['z-velocity'].to('km/s')
cV = below_reg['cell_volume'].to('cm**(3)')
KE = 0.5*M*below_reg['velocity_magnitude']**2
select = v < 0
below_E_out_therm = (-v / dL * below_reg['thermal_energy'].to('erg/g') * M).to('erg/s')[select]
below_E_out_kin = (-v / dL * KE).to('erg/s')[select]
E_out_therm = | np.sum(above_E_out_therm) | numpy.sum |
# -*- coding: utf-8 -*-
"""
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
@author: <NAME> (sghosh) (started Feb 2018)
@author: <NAME> (alepros), Technical University of Denmark
"""
from time import time
import numpy as np
from numpy import flatnonzero as find, pi, exp
from pandapower import LoadflowNotConverged
from pandapower.pypower.pfsoln import pfsoln
from pandapower.pypower.idx_gen import PG, QG
from pandapower.pd2ppc import _pd2ppc
from pandapower.pypower.makeYbus import makeYbus
from pandapower.pypower.idx_bus import GS, BS, PD , QD
from pandapower.auxiliary import _sum_by_group, _check_if_numba_is_installed,\
_check_bus_index_and_print_warning_if_high,\
_check_gen_index_and_print_warning_if_high, \
_add_pf_options, _add_ppc_options, _clean_up, sequence_to_phase, \
phase_to_sequence, X012_to_X0, X012_to_X2, \
I1_from_V012, S_from_VI_elementwise, V1_from_ppc, V_from_I,\
combine_X012, I0_from_V012, I2_from_V012, ppException
from pandapower.pf.run_newton_raphson_pf import _run_newton_raphson_pf
from pandapower.build_bus import _add_ext_grid_sc_impedance
from pandapower.pypower.bustypes import bustypes
from pandapower.run import _passed_runpp_parameters
from pandapower.pypower.idx_bus import VM, VA
from pandapower.pypower.idx_gen import GEN_BUS, GEN_STATUS, VG
from pandapower.results import _copy_results_ppci_to_ppc, _extract_results_3ph,\
init_results
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
class Not_implemented(ppException):
"""
Exception being raised in case loadflow did not converge.
"""
pass
def _get_pf_variables_from_ppci(ppci):
"""
Used for getting values for pfsoln function in one convinient function
"""
# default arguments
if ppci is None:
ValueError('ppci is empty')
# get data for calc
base_mva, bus, gen, branch = \
ppci["baseMVA"], ppci["bus"], ppci["gen"], ppci["branch"]
# get bus index lists of each type of bus
ref, pv, pq = bustypes(bus, gen)
# generator info
on = find(gen[:, GEN_STATUS] > 0) # which generators are on?
gbus = gen[on, GEN_BUS].astype(int) # what buses are they at?
# initial state
v0 = bus[:, VM] * exp(1j * pi / 180 * bus[:, VA])
v0[gbus] = gen[on, VG] / abs(v0[gbus]) * v0[gbus]
ref_gens = ppci["internal"]["ref_gens"]
return base_mva, bus, gen, branch, ref, pv, pq, on, gbus, v0, ref_gens
def _store_results_from_pf_in_ppci(ppci, bus, gen, branch):
ppci["bus"], ppci["gen"], ppci["branch"] = bus, gen, branch
return ppci
def _get_elements(params,net,element,phase,typ):
sign = -1 if element.endswith("sgen") else 1
elm = net[element].values
# # Trying to find the column no for using numpy filters for active loads
scaling = net[element].columns.get_loc("scaling")
typ_col = net[element].columns.get_loc("type") # Type = Delta or Wye load
# active wye or active delta row selection
active = (net["_is_elements"][element]) & (elm[:,typ_col] == typ)
bus = [net[element].columns.get_loc("bus")]
if len(elm):
if element == 'load' or element == 'sgen':
vl = elm[active,scaling].ravel()
p_mw = [net[element].columns.get_loc("p_mw")]
q_mvar = [net[element].columns.get_loc("q_mvar")]
params['p'+phase+typ] = np.hstack([params['p'+phase+typ],
elm[active,p_mw]/3 * vl * sign])
params['q'+phase+typ] = np.hstack([params['q'+phase+typ],
(elm[active,q_mvar]/3) * vl * sign])
params['b'+typ] = np.hstack([params['b'+typ],
elm[active,bus].astype(int)])
elif element.startswith('asymmetric'):
vl = elm[active,scaling].ravel()
p = {'a': net[element].columns.get_loc("p_a_mw")
,'b': net[element].columns.get_loc("p_b_mw")
,'c': net[element].columns.get_loc("p_c_mw")
}
q = {'a' : net[element].columns.get_loc("q_a_mvar")
,'b' : net[element].columns.get_loc("q_b_mvar")
,'c' : net[element].columns.get_loc("q_c_mvar")
}
params['p'+phase+typ] = np.hstack([params['p'+phase+typ],
elm[active,p[phase]] * vl * sign])
params['q'+phase+typ] = np.hstack([params['q'+phase+typ],
elm[active,q[phase]] * vl * sign])
params['b'+typ] = np.hstack([params['b'+typ],
elm[active,bus].astype(int)])
return params
def _load_mapping(net, ppci1):
"""
Takes three phase P, Q values from PQ elements
sums them up for each bus
maps them in ppc bus order and forms s_abc matrix
"""
bus_lookup = net["_pd2ppc_lookups"]["bus"]
params = dict()
phases = ['a', 'b', 'c']
load_types = ['wye', 'delta']
load_elements = ['load', 'asymmetric_load', 'sgen', 'asymmetric_sgen']
# =============================================================================
# Loop to initialize and feed s_abc wye and delta values
# =============================================================================
for phase in phases:
for typ in load_types:
params['S'+phase+typ] = (ppci1["bus"][:, PD] +
ppci1["bus"][:, QD]*1j)*0
params['p'+phase+typ] = np.array([]) # p values from loads/sgens
params['q'+phase+typ] = np.array([]) # q values from loads/sgens
params['P'+phase+typ] = np.array([]) # Aggregated Active Power
params['Q'+phase+typ] = np.array([]) # Aggregated reactive Power
params['b'+phase+typ] = | np.array([], dtype=int) | numpy.array |
import sys
import numpy as np
from trimmedEM import TrimmedEM
from utils.grader import RMCGrader
from utils.data_gen import regression_with_missing_cov, add_outliers
# Experiment for the Regression with Missing Covariates (RMC) Model
epsilons = [0, 0.05, 0.1, 0.15, 0.2]
n_samples = 2000
rmc_sigma = 0.1
dim = 100
# sparsities = np.array([0.4, 0.2, 0.1, 0.05]) * dim
# sparsities = np.array([32, 24, 16, 12, 8, 4, 2])
sparsities = np.array([15, 13, 11, 9, 7, 5, 3])
sparsities = sparsities.astype(np.int)
rmc_g = RMCGrader(sigma=rmc_sigma)
results_RMC = {
'eps-1': np.array(epsilons),
'err-1': [],
'sparsity-1': np.array(sparsities),
'n_samples-1': n_samples,
'dim-1': dim,
'true-beta-1': [],
}
n_iters = 101
n_repeats = 20
## type 1: error v.s. n_samples / (sparsity * log(dim)), for different epsilon
print("==================\ntype 1: error v.s. n_samples / (sparsity * log(dim)), for different epsilon")
for r in range(n_repeats):
print("===\nRepeat {}".format(r))
err_rates = [[] for _ in range(len(epsilons))]
for i, eps in enumerate(results_RMC['eps-1']):
n_outliers = np.int(results_RMC['n_samples-1'] * eps)
results_RMC['true-beta-1'].append([])
for s in results_RMC['sparsity-1']:
# re-generate data with different sparsity
effective_idxs = np.random.choice(dim, size=s, replace=False)
true_beta = np.zeros(dim)
true_beta[effective_idxs] = 10
X, Y = regression_with_missing_cov(n_samples=n_samples, n_features=dim,
missing_prob=0.1,
mean=true_beta, sigma=rmc_sigma,
subspace=effective_idxs)
results_RMC['true-beta-1'][i].append(true_beta)
# corrupt the data
XY = add_outliers(np.hstack((X, Y[:, np.newaxis])),
n_outliers=n_outliers,
dist_factor=50)
X_corrupted, Y_corrupted = XY[:, :-1], XY[:, -1].ravel()
# set initial point for gradient descent
init_distortion = np.linalg.norm(true_beta) * \
np.random.randn(results_RMC['dim-1']) / \
(4 * np.sqrt(dim))
beta0 = true_beta + init_distortion
model = TrimmedEM(n_iters=n_iters,
eta=0.05, sparsity=s,
alpha=0.0, grader=rmc_g,
init_val=beta0)
model.fit(X, Y_corrupted)
err_rates[i].append(model.loss(groundtruth=true_beta))
# print("eps={}, sparsity={}, loss={}, loss / true_beta={}"
# .format(eps, s, err_rates[i][-1], err_rates[i][-1] / np.linalg.norm(true_beta)))
results_RMC['err-1'].append( | np.array(err_rates) | numpy.array |
# -*- coding:Utf-8 -*-
#####################################################################
# This file is part of RGPA.
# Foobar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------------------------
# authors :
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
#####################################################################
"""
.. curentmodule:: boxn
.. autosummary:
"""
from pylayers.util.project import *
import numpy as np
import os
import pdb
import copy
import time
try:
from tvtk.api import tvtk
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
# GeomNetType = np.dtype([('Id',np.uint64),
# ('time',np.uint64),
# ('p',float,(3)),
# ('v',float,(3)),
# ('a',float,(3))])
class LBoxN(PyLayers):
"""
class LBoxN
List of BoxN
Atributes
---------
box : np.array
array containing BoxN object. default void
vol : list
volume of each boxes from self.box. default void
bd : np.arrays 2*len(box) x ndim
contains all the boundaries of boxes from self.box. default void
ndim : int
dimension of the nbox
parmsh : dictionnary
keys ['display'] =True
['interactive'] =False
LB : LBoxN
Create a BoxN object from another one. default : None, the LBoxN obebject created is void.
Methods
-------
mesure(self):
measure intervals of box
append(self,b):
append a box 'b' to a Lbox
append_l(self,lb):
append a lbox 'lb' to a lbox
info(self):
display LBoxN information
bd2coord(self,Mapping = False):
convert boundaries of Lbox to their vertexes coordintates
octant(self):
quadtree on Lboxes
volume(self):
estimate volume of LBoxes
intersect(self,lb):
EXPERIMENTAL find the intersection of LBoxN
show3(self,col='b',Id=0):
required file generation for geomview display
"""
# __slots__=('box','vol','bd','ndim','ctr','grav','parmsh')
def __init__(self, Lb=None, ndim=3):
self.ctr = []
if Lb is None:
self.box = np.array([])
self.vol = []
self.bd = []
self.ndim = ndim # !!! TO BE DONE
# self.bnum = []
else:
self.box = np.array([])
self.vol = []
self.bd = []
for b in Lb:
self.append(b)
self.ndim = b.ndim
self.mesure()
self.parmsh = {}
self.parmsh['display'] = True
self.parmsh['interactive'] = False
def mesure(self):
""" LMeasure BoxN
Obtain measure of :
- size of each interval from each dimension for each boxes
- center of each interval from each dimension for each boxes
- Volume of the BoxN for each boxes (NOT WORKING)
"""
if len(self.bd) != 0:
lbd = int(len(self.bd) / 2)
self.ctr = np.zeros((lbd, self.ndim))
for i in range(lbd): self.ctr[i, :] = (self.bd[2 * i] + self.bd[(2 * i) + 1]) / 2.
#########################FONTIONNE MAIS TROP LOURD/TENT quand bcp de box
# C =np.array(([[1/2.,1/2.]]))
# #M =np.array(([[-1.,1.]]))
# I = np.identity(const)
# CTR = np.kron(I,C)
# #MES = np.kron(I,M)
# #self.mes = np.dot(MES,self.bd)#self.bd[1,:]-self.bd[0,:]
# self.ctr = np.dot(CTR,self.bd)#(self.bd[1,:]+self.bd[0,:])/2.0
# self.vol = np.prod(self.mes,axis=1)
else:
self.ctr = []
def append(self, b):
"""append : Append a box to LboxN
Parameters
----------
b : BoxN
box to added
Returns
-------
Nothing but update self.box status
"""
self.box = np.append(self.box, b)
try:
self.bd = np.vstack((self.bd, b.bd[0]))
self.bd = np.vstack((self.bd, b.bd[1]))
except:
self.bd = b.bd[0]
self.bd = np.vstack((self.bd, b.bd[1]))
V1 = sum(self.vol)
V2 = b.vol
# uptate center of gravity
try:
self.grav = (V1 * self.grav + V2 * b.ctr) / (V1 + V2)
except:
self.grav = b.ctr
self.vol.append(b.vol)
self.ctr.append(b.ctr)
def append_l(self, lb):
"""Append LBoxN to LBoxN
Parameters
----------
lb : LBoxN
lbox to be added
Returns
-------
Nothing but update self.box status
"""
# for i in xrange(len(lb.box)):self.append(lb.box[i])
self.box = np.append(self.box, lb.box)
try:
self.bd = np.vstack((self.bd, lb.bd))
self.ctr = np.vstack((self.ctr, lb.ctr))
self.vol = self.vol + lb.vol
except:
self.bd = lb.bd
self.ctr = lb.ctr
self.vol = lb.vol
# try:
# self.bd=np.vstack((self.bd,lb.bd[i][0]))
# self.bd=np.vstack((self.bd,lb.bd[i][1]))
# except:
# self.bd=lb.bd[i][0]
# self.bd=lb.bd[i][1]
# self.box = self.box + lb.box
# V1 = sum(self.vol)
# V2 = lb.box[0].vol*len(lb.box)
# # uptate center of gravity
# try:
# self.grav = (V1*self.grav+V2*lb.grav)/(V1+V2)
# except:
# self.grav = lb.grav
# self.vol = self.vol + lb.vol
def info(self):
""" display LBoxN information
"""
Vtot = 0
for k in range(len(self.box)):
# print "Box : ",k," Volume :",self.vol[k]
# print "ndim :",self.ndim
print("------------")
self.box[k].info()
Vtot = Vtot + self.box[k].vol
print(("Volume : ", Vtot))
def bd2coord(self, Mapping=False):
"""Boundary to coordinates
Convert boundaries of Lbox to their vertexes coordintates
in :
[xmin ymin zmin]
[xmax ymax zmax]
out :
[xmin ymin zmin]
[xmin ymax zmin]
[xmax ymin zmin]
[xmax ymax zmin]
[xmin ymin zmax]
[xmin ymax zmax]
[xmax ymin zmax]
[xmax ymax zmax]
Parameters
----------
Mapping : Boolean
return a mapping of the vertex in regards of Lbox. Default = False
Returns
-------
P : array 2^ndim x ndim
coordinates of box vertex
"""
lbd = len(self.bd)
dimm1 = pow(2, self.ndim - 1)
dim = pow(2, self.ndim)
P = np.zeros((lbd * dimm1, self.ndim)) # P=(len self.bd/2)*4
# organisation de P
if self.ndim == 3:
R = np.repeat(list(range(0, dimm1 * lbd, dimm1)), 2)
R[list(range(1, len(R), 2))] = R[list(range(1, len(R), 2))] + 1
if self.ndim == 2:
R = np.repeat(list(range(0, dimm1 * lbd, dim)), 2)
R[list(range(1, len(R), 2))] = R[list(range(1, len(R), 2))] + 1
if self.ndim == 3:
RZ = np.repeat(list(range(0, dimm1 * lbd, dim)), dimm1) + (int(lbd / 2)) * list(range(0, dimm1, 1))
# aller chercher dans self.bd
R2a = np.repeat(self.bd[list(range(0, lbd, 2)), :], dimm1, axis=0)
R2b = np.repeat(self.bd[list(range(1, lbd, 2)), :], dimm1, axis=0)
# # X
# P[R,0]=R2a[:,0]#np.repeat(L.bd[range(0,lbd,2),0],4)
# P[R+2,0]=R2b[:,0]#np.repeat(L.bd[range(1,lbd,2),0],4)
# # Y
# P[np.sort(np.mod(R+3,4*lbd)),1]=R2a[:,1]#np.repeat(L.bd[range(0,lbd,2),1],4)
# P[R+1,1]=R2b[:,1]#np.repeat(L.bd[range(1,lbd,2),1],4)
# X
P[np.sort(np.mod(R + 3, dimm1 * lbd)), 0] = R2a[:, 0] # np.repeat(L.bd[range(0,lbd,2),1],4)
P[R + 1, 0] = R2b[:, 0] # np.repeat(L.bd[range(1,lbd,2),1],4)
# Y
P[R, 1] = R2a[:, 1] # np.repeat(L.bd[range(0,lbd,2),0],4)
P[R + 2, 1] = R2b[:, 1] # np.repeat(L.bd[range(1,lbd,2),0],4)
if self.ndim == 3:
# Z
P[RZ, 2] = R2a[:, 2] # np.repeat(L.bd[range(0,lbd,2),2],4)
P[RZ + 4, 2] = R2b[:, 2] # np.repeat(L.bd[range(1,lbd,2),2],4)
# mapping coresponding box
if Mapping == True:
Map = np.repeat(list(range(0, lbd / 2, 1)), dim)
# Map=10*np.repeat(self.bnum,8)+range(0,8,1)*(len(self.bnum))
return (P, Map)
# P=np.array((
# [self.bd[0,0],self.bd[0,1],self.bd[0,2]],
# [self.bd[0,0],self.bd[1,1],self.bd[0,2]],
# [self.bd[1,0],self.bd[1,1],self.bd[0,2]],
# [self.bd[1,0],self.bd[0,1],self.bd[0,2]],
# [self.bd[0,0],self.bd[0,1],self.bd[1,2]],
# [self.bd[0,0],self.bd[1,1],self.bd[1,2]],
# [self.bd[1,0],self.bd[1,1],self.bd[1,2]],
# [self.bd[1,0],self.bd[0,1],self.bd[1,2]]
# ))
else:
return (P)
def octant(self):
""" quadtree on boxes
Divide each Lboxes into 2^ndim equal parts
aka Split each interval from each dimension into 2 equal part
Returns
-------
lb : LBoxN 2^ndim*self.box x ndim
return theLBoxn from quadtree process
"""
if self.ndim == 3:
const = int(len(self.bd) / 2)
tlb = []
lbox = LBoxN([], ndim=self.ndim)
C = np.array(([[1, 1 / 2., 0], [0, 1 / 2., 1]])).T
I = np.identity(const)
CC = np.kron(I, C)
BD = np.dot(CC, self.bd)
M = np.repeat(3 * np.arange(0, const), 16) # groupe de 16 boundaries equivaut a 8 sous boites
X = (list(range(0, 2)) + list(range(1, 3))) * 4 * (const)
Y = (list(range(0, 2)) * 2 + list(range(1, 3)) * 2) * 2 * (const)
Z = (list(range(0, 2)) * 4 + list(range(1, 3)) * 4) * (const)
Rx = BD[X + M, 0]
Ry = BD[Y + M, 1]
Rz = BD[Z + M, 2]
lbd = np.array((Rx, Ry, Rz)).T
llbd = len(lbd)
xr = range(0, llbd, 2)
lb = LBoxN([BoxN(lbd[i:i + 2], ndim=self.ndim) for i in xr])
lb.mesure()
if self.ndim == 2:
tlb = []
lbox = LBoxN(ndim=self.ndim)
C = np.array(([[1, 1 / 2., 0], [0, 1 / 2., 1]])).T
I = np.identity(const)
CC = np.kron(I, C)
BD = np.dot(CC, self.bd)
M = np.repeat(3 * np.arange(0, const), 8) # groupe de 16 boundaries equivaut a 8 sous boites
X = (list(range(0, 2)) + list(range(1, 3))) * 2 * (const)
Y = (list(range(0, 2)) * 2 + list(range(1, 3)) * 2) * (const)
Rx = BD[X + M, 0]
Ry = BD[Y + M, 1]
lbd = np.array((Rx, Ry)).T
llbd = len(lbd)
lb = LBoxN(np.array([BoxN(lbd[i:i + 2], ndim=self.ndim) for i in range(0, llbd, 2)]))
return (lb)
def volume(self):
""" Evaluate Boxes volume
Compute the volume on each LBoxes
"""
self.vol = []
for b in self.box:
vol = b.vol
if vol >= 0:
self.vol.append(vol)
else:
pdb.set_trace()
def intersect(self, lb):
""" EXPERIMENTAL
Intersection of 2 LBOXN
"""
new_lb = LBoxN(ndim=self.ndim)
for k in range(len(self.box)):
for l in range(len(lb.box)):
b = self.box[k].intersect(lb.box[l])
if b.vol > 0: # si intersection non vide
new_lb.append(b)
return (new_lb)
def _show3(self, col='r', Id=[0], H_Id=0, alpha=0.2):
# gett list of all boundaries
lbd = np.array([b.bd for b in self.box])
shb = lbd.shape
lbdr = lbd.reshape(shb[0] * shb[1], shb[2])
# mapping of single boundaries to get the 8 vertices of the boxN
mapp = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0], [0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]],
dtype='int')
# repeat the coordinates mapping for all the boxes + shift index
mappe = np.tile(mapp, (shb[0], 1)) + np.repeat(np.arange(shb[0]), 8)[:, None]
# get coordintaes of all verticesof all boxN
allpt = np.array([lbdr[mappe[:, 0], 0], lbdr[mappe[:, 1], 1], lbdr[mappe[:, 2], 2]])
edges = [[0, 1, 2],
[0, 2, 3],
[0, 1, 5],
[0, 4, 5],
[1, 5, 2],
[5, 6, 2],
[2, 6, 7],
[2, 7, 3],
[0, 3, 4],
[3, 7, 4],
[4, 5, 6],
[4, 6, 7]]
# as many edges as boxN
edgese = np.tile(edges, (shb[0], 1)) + np.repeat(np.arange(shb[0]) * 8, 12)[:, None]
mesh = tvtk.PolyData(points=allpt.T, polys=edgese)
if col == 'r':
color = (1, 0, 0)
elif col == 'b':
color = (0, 0, 1)
mlab.pipeline.surface(mesh, opacity=alpha, color=color)
# for b in self.box:
# b._show3(col='r',Id=[0],H_Id=0,alpha=0.2)
def show3(self, col='b', Id=0):
"""Show box into geomview
generate a geomview file which allow to represent a box.
Parameters
----------
col : string
choose box color. compliant with matplotlib colorConverter. default 'r'
Id : list
Identity of boxes to show.Default : [0]
Returns
-------
filename : string
name of the generated file
"""
filename = "lbox" + str(Id) + ".list"
filename2 = basename + "/geom/" + filename
fd = open(filename2, "w")
fd.write("LIST\n")
for k in range(len(self.box)):
b = self.box[k]
b.parmsh['display'] = False
filebox = b.show3(col=col, Id=[Id, k])
# filebox = b.show3(col=col[k],Id=[Id,k])
chaine = "{<" + filebox + "}\n"
fd.write(chaine)
# chaine = "{<cloud.list}\n"
# fd.write(chaine)
fd.close()
if self.parmsh['display']:
chaine = "geomview -nopanel -b 1 1 1 " + filename2 + " 2>/dev/null &"
os.system(chaine)
return (filename)
class BoxN(PyLayers):
"""BoxN Class
A box is determined by its boundary interval along each dimension
Attributes
----------
bd : numpy array 2 x ndim
box boundary
ndim : int
dimension of the box (2D, 3D,...)
self.parmsh : dictionnary
display dictionnary for show 3 method TO BE CHANGE !!!
keys :['display']=True
['interactive']=False
OBTAIN FROM mesure()
self.mes : array 1 x ndim
size of intervals of each dimension
self.ctr : array 1 x ndim
center of intervals of each dimension
self.vol : float
Volume of box
Methods
-------
info() : info about class
def mesure(self):
measure intervals of box
volume() : evaluate volume
inbox(p) : is p in box ?
intersect(box) : intersection of two boxes
show3() : geomview vizualization
cut() : cut a box along given direction
TODO
----
Remove parmsh and replace it by a ini file
"""
# __slots__=('bd','ndim','mes','ctr','vol','parmsh')
def __init__(self, bd=None, ndim=3):
# if bd==None:
# self.bd = np.array([]).astype('float')
# else:
# for i in range(np.shape(bd)[1]):
# assert bd[1,i]>=bd[0,i] , pdb.set_trace()
# self.bd = bd.astype('float')
self.bd = bd
self.ndim = ndim # np.shape(bd)[1]
self.mesure()
self.parmsh = {}
self.parmsh['display'] = True
self.parmsh['interactive'] = False
# print "%s from %s" % (inspect.stack()[1][3],inspect.stack()[1][1])
def mesure(self):
""" Measure BoxN
Obtain measure of :
- size of each interval from each dimension
- center of each interval from each dimension
- Volume of the BoxN
"""
self.mes = self.bd[1, :] - self.bd[0, :]
self.ctr = (self.bd[1, :] + self.bd[0, :]) / 2.0
self.vol = np.prod(self.mes)
def setbd(self, vmin, vmax, axis=0):
"""
setbd : set boundary value on axis
"""
assert vmin <= vmax, "Incorrect bound"
self.bd[0, axis] = vmin.astype('float')
self.bd[1, axis] = vmax.astype('float')
self.mesure()
def void(self):
""" return True if box is void
"""
b = False
if self.meas == []:
b = True
else:
pmes = np.prod(self.meas)
if pmes == 0:
b = True
return (b)
def info(self):
""" Information on BoxN
"""
print(("Volume (.vol) :", self.vol))
print(("Center (.ctr) :", self.ctr))
def bd2coord(self):
"""Boundary to coordinates
Return an array containing of vertex from a box
3D case :
in :
[xmin ymin zmin]
[xmax ymax zmax]
out :
[xmin ymin zmin]
[xmin ymax zmin]
[xmax ymin zmin]
[xmax ymax zmin]
[xmin ymin zmax]
[xmin ymax zmax]
[xmax ymin zmax]
[xmax ymax zmax]
Returns
-------
P : array 2^ndim x ndim
coordinates of box vertex
"""
if self.ndim == 3:
P = np.array(([self.bd[0, 0], self.bd[0, 1], self.bd[0, 2]],
[self.bd[0, 0], self.bd[1, 1], self.bd[0, 2]],
[self.bd[1, 0], self.bd[1, 1], self.bd[0, 2]],
[self.bd[1, 0], self.bd[0, 1], self.bd[0, 2]],
[self.bd[0, 0], self.bd[0, 1], self.bd[1, 2]],
[self.bd[0, 0], self.bd[1, 1], self.bd[1, 2]],
[self.bd[1, 0], self.bd[1, 1], self.bd[1, 2]],
[self.bd[1, 0], self.bd[0, 1], self.bd[1, 2]]))
return (P)
if self.ndim == 2:
P = np.array(([self.bd[0, 0], self.bd[0, 1]],
[self.bd[0, 0], self.bd[1, 1]],
[self.bd[1, 0], self.bd[1, 1]],
[self.bd[1, 0], self.bd[0, 1]]))
return (P)
def coord2bd(self, coord):
"""
Coordinates to boundary
update boundary array from numpy array of coordinates
Parameters
----------
coord : array 2^ndim x ndim
vertexes coordinates of a boxN
Returns
-------
Nothing but fills self.bd
"""
self.bd[0, :] = np.min(coord, axis=0)
self.bd[1, :] = np.max(coord, axis=0)
# def octant(self):
# tlb = []
# lbox = LBoxN([])
# BD = np.array((self.bd[0],(self.bd[0]+self.bd[1])/2,self.bd[1] ))
## Rx = BD[(range(0,2)+range(1,3))*4,0]
## Ry = BD[(range(0,2)*2+range(1,3)*2)*2,1]
## Rz = BD[range(0,2)*4+range(1,3)*4,2]
## O = np.array((Rx,Ry,Rz )).T
## LB = LBoxN([BoxN(O[0:2]),BoxN(O[2:4]),BoxN(O[4:6]),BoxN(O[6:8]),BoxN(O[8:10]),BoxN(O[10:12]),BoxN(O[12:14]),BoxN(O[14:16])])
## # LB.bnum = range(00,010,1)
## return(LB)
##
def octant(self):
""" quadtree on boxes OBSOLETE
Divide boxes into 2^ndim equal parts
aka Split each interval from each dimension into 2 equal part
"""
tlb = []
lbox = LBoxN([])
for k in range(self.ndim):
tlb.append(self.cut(self.ctr[k], axis=k))
lbm = tlb[0]
for l in range(len(tlb) - 1):
lbp = lbm.intersect(tlb[l + 1])
lbm = lbp
return (lbm)
def intersect(self, box):
""" Find intersection between current box and a given one
Parameters
----------
box : BoxN
a BoxN object
Returns
-------
new_box : BoxN
a BoxN object
"""
new_box = BoxN(np.zeros((2, self.ndim)), ndim=self.ndim)
for k in range(self.ndim):
newmin = max(self.bd[0, k], box.bd[0, k])
newmax = min(self.bd[1, k], box.bd[1, k])
if (newmax > newmin):
new_box.bd[0, k] = newmin
new_box.bd[1, k] = newmax
new_box.mesure()
return (new_box)
def bdiff(self, box):
""" OBSOLETE
USE self.intersect instead !!!
"""
new_box = BoxN(np.zeros((2, self.ndim)), ndim=self.ndim)
for k in range(self.ndim):
newmin = max(self.bd[0, k], box.bd[0, k])
newmax = min(self.bd[1, k], box.bd[1, k])
if (newmax > newmin):
new_box.bd[0, k] = newmin
new_box.bd[1, k] = newmax
new_box.mesure()
return (new_box)
def _show3(self, col='r', Id=[0], H_Id=0, alpha=0.2):
mapp = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0], [0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]],
dtype='int')
b = np.array([self.bd[mapp[:, 0], 0], self.bd[mapp[:, 1], 1], self.bd[mapp[:, 2], 2]])
edges = [[0, 1, 2],
[0, 2, 3],
[0, 1, 5],
[0, 4, 5],
[1, 5, 2],
[5, 6, 2],
[2, 6, 7],
[2, 7, 3],
[0, 3, 4],
[3, 7, 4],
[4, 5, 6],
[4, 6, 7]]
# trick for correcting color assignement
mesh = tvtk.PolyData(points=b.T, polys=edges)
if col == 'r':
color = (1, 0, 0)
elif col == 'b':
color = (0, 0, 1)
mlab.pipeline.surface(mesh, opacity=alpha, color=color)
def show3(self, dim=(0, 1, 2), col='r', Id=[0], H_Id=0, alpha=0.2):
"""Show box into geomview
generate a geomview file which allow to represent a box.
Parameters
----------
dim : tuple
chose dimension to display. default : (0,1,2)
col : string
choose box color. compliant with matplotlib colorConverter. default 'r'
Id : list
Identity of boxes to show.Default : [0]
alpha : float
set transparency. Default 0.2
Returns
-------
fname : string
name of the generated file
"""
# for k in range(len(self.bb)):
b = np.zeros((6, 4, 3)).astype('int')
b[0, :, :] = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]])
b[1, :, :] = np.array([[1, 0, 0], [1, 0, 1], [1, 1, 1], [1, 1, 0]])
b[2, :, :] = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]])
b[3, :, :] = np.array([[0, 1, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1]])
b[4, :, :] = np.array([[0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 1, 1]])
b[5, :, :] = | np.array([[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]]) | numpy.array |
import numpy as np
import cv2
from linalg import get_line_equation, get_plane_equation, rotate3d, signed_plane_point_distance
from utils import nonzero, get_cube, load_points_and_faces, colorize_faces
class Renderer:
'''
simple class that renders points to screen
'''
def __init__(
self,
cam=np.array([250, 250, -300]),
zplane=0,
image_size=(500, 500, 3),
light_vector = np.array([1, 1, 1]),
light_point = np.array([0, 0, 0]),
):
self.cam = cam
self.zplane = zplane
self.image_size = image_size
self.light_vector = light_vector
self.light_point = light_point
def get_projections(self, pointsijz):
'''
projests points in ijz coordinate system on zplane using self.cam
'''
zs = pointsijz[:, 2]
_, _, zcam = self.cam
alphas = (self.zplane-zs)/(zcam-zs)
alphas = np.expand_dims(alphas, axis=1)
camxy = np.expand_dims(self.cam[:2], axis=0)
projection = camxy*alphas + pointsijz[:, :2]*(1-alphas)
return projection
def render_points(
self,
pointsxyz,
faces=[],
colors=[],
background_color = 255,
img=None,
zbuff=None,
render_points=False,
point_radius = 10,
):
if img is None:
img = np.zeros(self.image_size) + background_color
if zbuff is None:
zbuff = np.zeros((img.shape[0], img.shape[1])) + float('inf')
pointsijz = self.xyz2ijz(pointsxyz)
proj = self.get_projections(pointsijz)
if render_points:
for point in proj:
point = point.astype('int')
img = cv2.circle(img, (point[1], point[0]), point_radius, (100, 100, 100), 2*point_radius)
for face_point_idxs, color in zip(faces, colors):
face_proj = proj[face_point_idxs].astype('int')
face3d = pointsijz[face_point_idxs]
normal, d = get_plane_equation(face3d)
brightness = np.dot(normal, self.light_vector)/np.linalg.norm(normal)/np.linalg.norm(self.light_vector)
brightness = abs(brightness)
camera_dist_sign = np.sign(signed_plane_point_distance(normal, d, self.cam))
light_dist_sign = np.sign(signed_plane_point_distance(normal, d, self.light_point))
if camera_dist_sign!=light_dist_sign:
brightness = 0
color = np.clip((color*brightness), 0, 255).astype('uint8')
self.zdraw_triangle(img, zbuff, face_proj, color, face3d)
return img, zbuff
def zdraw_triangle(self, img, zbuff, vertices, color, pointsijz):
zplane, cam = self.zplane, self.cam
vertices = vertices[vertices[:,0].argsort()]
normal, d = get_plane_equation(pointsijz)
xpos = lambda k, l, y:int((y-l)/nonzero(k))
xposin=lambda k, l, y: max(0, min(img.shape[1]-1, xpos(k, l, y) ))
k1, l1 = get_line_equation([vertices[0], vertices[2]])
k2, l2 = get_line_equation([vertices[0], vertices[1]])
if (xpos(k1, l1, vertices[0][0]+1000) > xpos(k2, l2, vertices[0][0]+1000)):
k1, l1, k2, l2 = k2, l2, k1, l1
for i in range(max(0, vertices[0][0]), min(vertices[1][0], img.shape[0])):
for j in range(xposin(k1, l1, i), xposin(k2, l2, i)+1):
alpha = (d - np.dot(cam, normal)) / nonzero((np.array([i, j, zplane]) - cam) @ normal)
z = alpha * (zplane - cam[-1])
if zbuff[i][j] > z:
zbuff[i][j] = z
img[i][j] = color
k1, l1 = get_line_equation([vertices[0], vertices[2]])
k2, l2 = get_line_equation([vertices[1], vertices[2]])
if (xpos(k1, l1, vertices[2][0]-1000) > xpos(k2, l2, vertices[2][0]-1000)):
k1, l1, k2, l2 = k2, l2, k1, l1
for i in range(max(0, vertices[1][0]), min(vertices[2][0], img.shape[0])):
for j in range(xposin(k1, l1, i), xposin(k2, l2, i)+1):
alpha = (d - np.dot(cam, normal)) / nonzero((np.array([i, j, zplane]) - cam) @ normal)
z = alpha * (zplane - cam[-1])
if zbuff[i][j] > z:
zbuff[i][j] = z
img[i][j] = color
def change_zbuff_for_viewing(self, zbuff):
minv = np.min(zbuff)
zbuff = | np.nan_to_num(zbuff, posinf=minv) | numpy.nan_to_num |
from scipy.spatial import ConvexHull
import numpy as np
from scipy.integrate import simps
from scipy import signal
import antropy as ant
import scipy.stats
import nolds
from package import diffusion_stabilogram
from package import recurrence_quantification_analysis
from package import fractal_dimension
## NOTE: Recordings from Bertec Acquire have the following specifications:
#Row 1 = Time
#Row 2 = Fz
#Row 3 = Mx
#Row 4 = My
#Row 5 = CoPx = CoP_ML
#Row 6 = CoPy = CoP_AP
#Note. CoPx = -My/Fz
#Note. CoPy = Mx/Fz
def _recenter(data):
"""De-means the data"""
data = np.array(data)
return data - data.mean()
def _delta(data):
"""Gets the difference in data, i.e., delta[i] = x[i+1] - x[i]"""
d1 = np.array(data[:-1])
d2 = np.array(data[1:])
return d2 - d1
def _eig(data):
"""Returns eigenvectors and eigenvalues from the x y"""
def _confidence_ellipse(x,y):
N = len(x)
corr = np.zeros([2,2])
corr[0,0] = sum(x ** 2)
corr[1,1] = sum(y ** 2)
corr[0,1] = corr[1,0] = sum(x * y)
w,v = | np.linalg.eig(corr) | numpy.linalg.eig |
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import fsolve, root
from .multiflash import multiflash
from .equilibriumresult import EquilibriumResult
from warnings import warn
def haz_objb(inc, T_P, type, model, index, equilibrium):
X0 = inc[:-1].reshape(3, 2)
P_T = inc[-1]
if type == 'T':
P = P_T
temp_aux = T_P
elif type == 'P':
T = P_T
temp_aux = model.temperature_aux(T)
P = T_P
nc = model.nc
X = np.zeros((3, nc))
X[:, index] = X0
lnphi = np.zeros_like(X)
global vg, Xassg
for i, state in enumerate(equilibrium):
out = model.logfugef_aux(X[i], temp_aux, P, state, vg[i], Xassg[i])
lnphi[i], vg[i], Xassg[i] = out
lnK = lnphi[0] - lnphi[1:]
K = np.exp(lnK)
return np.hstack([(K[:, index]*X0[0]-X0[1:]).flatten(), X0.sum(axis=1)-1.])
def haz_objt(inc, temp_aux, P, model):
X, W, Y = np.split(inc, 3)
global vg, Xassg
fugX, vg[0], Xassg[0] = model.logfugef_aux(X, temp_aux, P, 'L', vg[0],
Xassg[0])
fugW, vg[1], Xassg[1] = model.logfugef_aux(W, temp_aux, P, 'L', vg[1],
Xassg[1])
fugY, vg[2], Xassg[2] = model.logfugef_aux(Y, temp_aux, P, 'V', vg[2],
Xassg[2])
K1 = np.exp(fugX-fugY)
K2 = | np.exp(fugX-fugW) | numpy.exp |
# PYTHON 3
#
# Author: <NAME>
# Created: 1 February 2013 IDL, Converted to Python 3 12th Jan 2021
# Last update: 12 January 2021
# Location: /home/h04/hadkw/HadISDH_Code/HADISDH_BUILD/
# GitHub: https://github.com/Kate-Willett/HadISDH_Build
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# For selected variable, grid the data and uncertainties, including the gridbox sampling uncertainty
# Read in list of goods
# - IF RAW: Read in raw netCDF of abs, anoms, clims.
# - IF PHA/IDPHA/PHADPD: Read in homogenised netCDF of abs, anoms, err, adjE, obsE, clmE, clims and climsds.
# move from gridbox to gridbox starting with -177.5W, 87.5S
# if there is a station then begin
# find all stations in GB - store lat, lon, elev
# calc gridbox mean (abs, anoms, clims), standard deviation (sds of abs), uncertainties (combined assuming no correlation and unique values) - already 2 sigma when read in!!!
# For Tw extremes calculate the Quality Scores for each gridbox and output:
# HQ1: based on number of stations within gridbox
# - 0 = > 1 station (should this be higher?)
# - 1 = 1 station
# HQ2: based on the number of inhomogeneity/adjustment per station detected
# - 0 = 0 inhomogeneity/adjustment detected
# - 1 = 0-1 inhomogeneity/adjustment per station detected
# - 2 = 1 inhomogeneity/adjustment per station detected
# HQ3: based on number of very large (>= 2 degrees) adjustments per station detected
# - 0 = 0 very large adjustments per station
# - 1-9 = > 0 and < 1 very large adjustments per station, scaled
# - 10 = 1 very large adjustment per station
# HQ4: based on number of large (>= 1 and <2 degrees) adjustments per station detected
# - 0 = 0 large adjustments per station
# - 1-4 = > 0 and < 1 large adjustments per station, scaled
# - 5 = 1 large adjustment per station
# HQ5: based on number of moderate (>= 0.5 and <1 degrees) adjustments per station detected
# - 0 = 0 moderate adjustments per station
# - 1-2 = > 0 and < 1 moderate adjustments per station, scaled
# - 3 = 1 moderate adjustment per station
# HQ6: based on number of small (> 0 and <0.5 degrees) adjustments per station detected
# - 0 = 0 small adjustments per station
# - 0 = > 0 and < 1 small adjustments per station (an HQ will have been allocated by HQ 2)
# - 1 = 1 small adjustment per station
# HQ7: based on average actual adjustment over gridbox month (are adjustments in opposite directions averaging out?)
# - 0 = 0 adjustment over gridbox month
# - 1 = > 0 and < 0.5 degree abs(mean adjustment) over gridbox month
# - 2-3 = >= 0.5 and < 1 degree abs(mean adjustment) over gridbox month, scaled
# - 4-9 = >= 1 and < 2 degree abs(mean adjustment) over gridbox month, scaled
# - 10 = >= 2 degree abs(mean adjustment) over gridbox month
# HQ8: based on average absolute adjustment over gridbox month
# - Mean(absolute adjustments) over gridbox month
# Homogenisation quality score and flag: combines homogenisation quality statistics 1 to 7 using the following method:
# - >=10 = terrible
# - 5-9 = bad
# - 2-4 = iffy
# - 1 = ok
# - 0 = Good
# Call gridbox_sampling_uncertainty.py to compute gridbox sampling error due to missing data and incomplete spatial sampling.
# Not sure how this will work for the days of exceedance
# Write out to netCDF, ascii (abs, anoms, uncertainty) - all errors are 2 sigma errors!!!
# Write out gridding results min/max of each var
#
# -----------------------
# LIST OF MODULES
# -----------------------
#import numpy as np # used
#import numpy.ma as npm # used
#from datetime import datetime # used
#import matplotlib.pyplot as plt
#import sys, os, getopt # used
#import struct
#import glob # used
#import pdb # used
#import netCDF4 as nc4
#
## Kate's Functions
#import CalcHums
#from RandomsRanges import LetterRange
#import gridbox_sampling_uncertainty as gsu
#
# -----------------------
# DATA
# -----------------------
# Input station list of 'good stations':
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/'
# Posthomog<typee><var>_anoms'+CLMlab+'_<goods>HadISDH.'+versiondots+'.txt'
# Input homogenised netCDF files of data with station uncertainties to grid - IDPHA version and PHADPD:
# /scratch/hadkw/UPDATE<YYYY>/MONTHLIES/HOMOG/<typee>NETCDF/<VAR>DIR/' # this will then be PHANETCDF or IDPHANETCDF
# <station>'_anoms<climLAB>_homog.nc'
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# > module load scitools/default-current
# > python F13_GridHadISDHFLAT --var <var> --typee <type>
#
## Which variable?
# var = 'dpd' #'dpd','td','t','tw','e','q','rh'
#
## Which homog type?
# typee = 'PHA' #'PHA' (for DPD only),'IDPHA' (for t, e, q, rh and tw),'PHADPD' (for Td)
#
#
# Or ./F13_submit_spice.sh
#
#
# -----------------------
# OUTPUT
# -----------------------
# The gridded netCDF file:
# /scratch/hadkw/UPDATE<YYYY>/STATISTICS/GRIDS/
# HadISDH.land<var>.'+version+'_FLATgrid<homogtype>PHA5by5_anoms8110.nc
# The summary min and max values for each variable within the netCDF file:
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/
# GriddingResults_<versiondots>_anoms8110.txt max/mins of all fields in nc file
#
# THESE ARE OUTPUT AS 2 SIGMA ERRORS!!!
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 7 (27 August 2021)
# ---------
#
# Enhancements
#
# Changes
# Now grids all Tw extremes variables
# Additionally - outputs homogenisation quality scores for the Tw extremes only (because theses are from unhomogenised data)
#
# Bug fixes
#
#
# Version 6 (12 January 2021)
# ---------
#
# Enhancements
# Double checked uncertainty calculations and they are quantitatively the same as for the marine code
# but expressed differently so I have changed the code to match that for the marine.
#
# Changes
# Now Python 3
# Using pythong gridbox_sampling_uncertainty.py rather than IDL code (as used for the marine data)
# gridbox_sampling_uncertainty.py uses HadCRUT.4.3.0.0.land_fraction.py to select land boxes
# gridbox_sampling_uncertainty.py sets rbar to 0.8 if there are missing values rather than the 0.1 previously which
# was far too low. 0.8 is about mid-range for rbar
# Sampling uncertainty is very slightly different order 0.001 in a few places
# We now use the mean number of stations contributing to the gridbox rather than the maximum - this is smaller so
# will result in slightly larger sampling uncertainty, especially in gridboxes with very few stations LARGER UNCERTAINTIES
# Combining uncertainty over gridbox now uses actual numer of stations for that month rather than total over time
# period for that gridbox so new gridbox uncs are LARGER than IDL ones where there are fewer
# stations contributing to the gridbox compared to the total. LARGER UNCERTAINTIES
#
# Bug fixes
# In 2019 I reduced the combined uncertainties because I had thought that *2 made them 4 sigma. I hadn;t noticed the /2 in the equation. So, while the original
# equation of sqrt((staterr/2)^2 + (samperr/2)^2)*2 was pointless it was right and 2019 would have had combined uncertainty that was too small - now corrected!!!
# LARGER UNCERTAINTIES - BY *2
#
#
# Version 5 (29 March 2018)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
# Wrong FILE_SEARCH string was finding multiple files and therefore sometimes reading in the wrong one (with sats/subzeros or duplicate!)
#
# Version 4 (13 February 2018)
# ---------
#
# Enhancements
#Now has param and homogtype called at run time
## Which variable? T first, RH, DPD, q, e, td, tw
#param = 'tw'
## Which homog type?
#homogtype = 'ID' #'ID','DPD' for Td, 'PHA' - req for DPD or PHA versions of all variables
#
# Now looks at Posthomog...lists to get station counts automatically rather than being hard coded
#
# Changes
#
# Bug fixes
# NetCDF err outputs had wrong long_names
#
# Version 3 (1 February 2017)
# ---------
#
# Enhancements
# General tidy up and improved headers
#
# Changes
#
# Bug fixes
#
#
# Version 2 (7 September 2017)
# ---------
#
# Enhancements
# General tidy up and reframe of tweakable variables to make the file/data batching easier for each variable/climatology choice etc.
# Can now work with different anomaly periods 7605 or 8110 which have to be created by create_homogNCDFall_stunc_JAN2015.pro
#
# Changes
#
# Bug fixes
# Fixed bug in sampling error which was only using first 29 years of the 30 year climatology period (missing +1)
# This fix is actually in the calc_samplingerrorJUL2012_nofill.pro.
#
# Version 1 (15 January 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
# climerr is difference for some gridboxes - larger for new compared to old - when old is small new is large???
# sampling error needs to be saved only where there are data - not for all land.
#**** THIS IS WHERE TO ADD UNCERTAINTY ALA BROHAN et al. 2006
# Station error:
# Tob - Tclim + errorCLIM + measurementerror + homogadj + adjuncertainty + reporting error
# Samping error:
# SE^2 = GBstdev*avg.intersite correlation*(1-avg.intersite corr)
# --------------------------------------------------------
# 1 + ((num stations - 1) * avg.intersite correlation)
# Bias error:
# urbanisation? exposure change? irrigation?
# combine these by adding in quadrature.
# sampling error - after Jones et al. 1997
#Shat^2 = variance of gridbox(extended?) means over climatology period
#n = number of stations contributing to gridbox(extended?) over climatology period
#Xo = correlation decay distance (km) for that gridbox (where correlation = 1/e)
#X = diagonal from bottom left to top right of gridbox(extended?) (km) - use lats, longs and dist_calc
#rbar = (Xo/X)*(1-e(-X/Xo))
#sbar^2 = mean station variance within the gridbox
#sbar^2 = (Shat^2*n)/(1+((n-1)*rbar))
#INFILL empty gridboxes by interpolated Xo and then calculating rbar
#SE^2 = gridbox sampling error
#SE^2 = (sbar^2*rbar*(1-rbar))/(1+((n-1)*rbar))
#SE^2 (where n=0) = sbar^2*rbar (INFILL GB with Shat^2???)
#SEglob^2 = global average sampling error
#SEglob^2 = SEbar^2/Neff
#SEbar^2 = (SUM(SE^2*cos(lat)))/(SUM(cos(lat)))
#Neff = number of effectively independent points
#Neff = (2*R)/F
#R = radius of the earth (6371 km)
#F=(((e((-piR)/Xobar))/R)+(1/R))/((1/(Xobar^2))+(1/R^2))
#Xobar=(SUM(Xo*cos(lat)))/(SUM(cos(lat)))
#******************************************************
# Global variables and imports
# Inbuilt: (may not all be required actually)
import numpy as np # used
import numpy.ma as npm # used
from datetime import datetime # used
import matplotlib.pyplot as plt
import sys, os, getopt # used
import struct
import glob # used
import pdb # used
import netCDF4 as nc4
#from subprocess import call, check_output, run, PIPE # used
# Kate's Functions
import CalcHums
from RandomsRanges import LetterRange
import gridbox_sampling_uncertainty as gsu
# Start and end years if HardWire = 1
styear = 1973
edyear = 2019
# Which climatology?
MYclst = 1981 # 1976, 1981
MYcled = 2010 # 2005, 2010
CLMlab = str(MYclst)[2:4]+str(MYcled)[2:4]
# Dataset version if HardWire = 1
versiondots = '4.2.0.2019f'
version = 'v420_2019f'
hadisdversiondots = '3.1.0.2019f'
hadisdversion = 'v310_2019f'
# HARDWIRED SET UP!!!
# If HardWire = 1 then program reads from the above run choices
# If HardWire = 0 then program reads in from F1_HadISDHBuildConfig.txt
HardWire = 0
if (HardWire == 0):
#' Read in the config file to get all of the info
with open('F1_HadISDHBuildConfig.txt') as f:
ConfigDict = dict(x.rstrip().split('=', 1) for x in f)
versiondots = ConfigDict['VersionDots']
hadisdversiondots = ConfigDict['HadISDVersionDots']
styear = ConfigDict['StartYear']
edyear = ConfigDict['EndYear']
# AttribDict held in memory to probide global attribute text later
#' Read in the attribute file to get all of the info
with open('F1_HadISDHBuildAttributes.txt') as f:
AttribDict = dict(x.rstrip().split('=', 1) for x in f)
# NOT CODED THIS FUNCTIONALITY YET
## Are we working with homogenised actuals (True) or anomalies (False)?
#Actuals = True
# Set up directories locations
updateyy = str(edyear)[2:4]
updateyyyy = str(edyear)
workingdir = '/scratch/hadkw/UPDATE'+updateyyyy
#workingdir = '/data/users/hadkw/WORKING_HADISDH/UPDATE'+updateyyyy
# Set up filenames
INDIRLIST = workingdir+'/LISTS_DOCS/'
INDIRHOM = workingdir+'/MONTHLIES/HOMOG/' # this will then be PHAASCII or IDPHAASCII
#workingdir = '/scratch/hadkw/UPDATE'+updateyyyy
OUTDIRLIST = workingdir+'/LISTS_DOCS/GriddingResults_'+versiondots+'_anoms'+CLMlab+'.txt'
OUTDIRDAT = workingdir+'/STATISTICS/GRIDS/'
# File for output stats but also for reading in missed adjustment uncertainties
OUTPUTLOG = workingdir+'/LISTS_DOCS/OutputLogFile'+versiondots+'.txt'
# Set up variables
MDI = -1e+30
INTMDI = -999.
LatBox = 5. # latitude gridbox size
LonBox = 5. # longitude gridbox size
# Dictionaries for param, units, homogdirprefix, STATION FILE PREFIX, standard name, long name, raw data suffix(only for test run)
ParamDict = dict([('q',['q','g/kg','IDPHA','Q','specific_humidity','monthly mean 2m specific humidity','qhum']),
('rh',['RH','%rh','IDPHA','RH','relative_humidity','monthly mean 2m relative humidity','rhum']),
('t',['T','deg C','IDPHA','T','drybulb_temperature','monthly mean 2m dry bulb temperature','temp']), # Note this needs to be changed to IDPHAMG later
('td',['Td','deg C','IDPHA','TD','dewpoint_temperature','monthly mean 2m dew point temperature','dewp']),
('tw',['Tw','deg C','IDPHA','TW','wetbulb_temperature','monthly mean 2m wetbulb temperature','twet']),
('e',['e','hPa','IDPHA','E','vapour_pressure','monthly mean 2m vapour pressure','evap']),
('dpd',['DPD','deg C','PHA','DPD','dewpoint depression','monthly mean 2m dew point depression','ddep']),
('tw_max',['TwX','deg C','IDPHA','TWMAX','wetbulb_temperature_maximum','monthly maximum 2m wetbulb temperature','twmx']),
('tw_max_95p',['TwX95p','1','IDPHA','TWMAX95','wetbulb_temperature_max95p','days per month maximum >= 95 percentile maximum 2m wetbulb temperature','twx95']),
('tw_mean_95p',['TwM95p','1','IDPHA','TWMEAN95','wetbulb_temperature_mean95p','days per month mean >= 95 percentile mean 2m wetbulb temperature','twm95']),
('tw_max_ex25',['Tw25','1','IDPHA','TW25','wetbulb_temperature_ex25','days per month >= 25 deg 2m wetbulb temperature','tw25']),
('tw_max_ex27',['Tw27','1','IDPHA','TW27','wetbulb_temperature_ex27','days per month >= 27 deg 2m wetbulb temperature','tw27']),
('tw_max_ex29',['Tw29','1','IDPHA','TW29','wetbulb_temperature_ex29','days per month >= 29 deg 2m wetbulb temperature','tw29']),
('tw_max_ex31',['Tw31','1','IDPHA','TW31','wetbulb_temperature_ex31','days per month >= 31 deg 2m wetbulb temperature','tw31']),
('tw_max_ex33',['Tw33','1','IDPHA','TW33','wetbulb_temperature_ex33','days per month >= 33 deg 2m wetbulb temperature','tw33']),
('tw_max_ex35',['Tw35','1','IDPHA','TW35','wetbulb_temperature_ex35','days per month >= 35 deg 2m wetbulb temperature','tw35'])])
# This is needed by WriteNetCDF and writing to ascii
MonthName = ['January ',
'February ',
'March ',
'April ',
'May ',
'June ',
'July ',
'August ',
'September ',
'October ',
'November ',
'December ']
#******************************************************
# SUBROUTINES #
#******************************************************
# READDATA
def ReadData(FileName,typee,delimee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return | np.genfromtxt(FileName, dtype=typee, delimiter=delimee, encoding='latin-1') | numpy.genfromtxt |
'''
episodestats.py
implements statistic that are used in producing employment statistics for the
lifecycle model
'''
import h5py
import numpy as np
import numpy_financial as npf
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from scipy.stats import norm
#import locale
from tabulate import tabulate
import pandas as pd
import scipy.optimize
from tqdm import tqdm_notebook as tqdm
from . empstats import Empstats
from scipy.stats import gaussian_kde
#locale.setlocale(locale.LC_ALL, 'fi_FI')
def modify_offsettext(ax,text):
'''
For y axis
'''
x_pos = 0.0
y_pos = 1.0
horizontalalignment='left'
verticalalignment='bottom'
offset = ax.yaxis.get_offset_text()
#value=offset.get_text()
# value=float(value)
# if value>=1e12:
# text='biljoonaa'
# elif value>1e9:
# text=str(value/1e9)+' miljardia'
# elif value==1e9:
# text=' miljardia'
# elif value>1e6:
# text=str(value/1e6)+' miljoonaa'
# elif value==1e6:
# text='miljoonaa'
# elif value>1e3:
# text=str(value/1e3)+' tuhatta'
# elif value==1e3:
# text='tuhatta'
offset.set_visible(False)
ax.text(x_pos, y_pos, text, transform=ax.transAxes,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment)
class Labels():
def get_labels(self,language='English'):
labels={}
if language=='English':
labels['osuus tilassa x']='Proportion in state {} [%]'
labels['age']='Age [y]'
labels['ratio']='Proportion [%]'
labels['unemp duration']='Length of unemployment [y]'
labels['scaled freq']='Scaled frequency'
labels['probability']='probability'
labels['telp']='Employee pension premium'
labels['sairausvakuutus']='Health insurance'
labels['työttömyysvakuutusmaksu']='Unemployment insurance'
labels['puolison verot']='Partners taxes'
labels['taxes']='Taxes'
labels['asumistuki']='Housing benefit'
labels['toimeentulotuki']='Supplementary benefit'
labels['tyottomyysturva']='Unemployment benefit'
labels['paivahoito']='Daycare'
labels['elake']='Pension'
labels['tyollisyysaste']='Employment rate'
labels['tyottomien osuus']='Proportion of unemployed'
labels['havainto']='Observation'
labels['tyottomyysaste']='Unemployment rate [%]'
labels['tyottomien osuus']='Proportion of unemployed [%]'
labels['tyollisyysaste %']='Employment rate [%]'
labels['ero osuuksissa']='Difference in proportions [%]'
labels['osuus']='proportion'
labels['havainto, naiset']='data, women'
labels['havainto, miehet']='data, men'
labels['palkkasumma']='Palkkasumma [euroa]'
labels['Verokiila %']='Verokiila [%]'
labels['Työnteko [hlö/htv]']='Työnteko [hlö/htv]'
labels['Työnteko [htv]']='Työnteko [htv]'
labels['Työnteko [hlö]']='Työnteko [hlö]'
labels['Työnteko [miljoonaa hlö/htv]']='Työnteko [miljoonaa hlö/htv]'
labels['Työnteko [miljoonaa htv]']='Työnteko [miljoonaa htv]'
labels['Työnteko [miljoonaa hlö]']='Työnteko [miljoonaa hlö]'
labels['Osatyönteko [%-yks]']='Osa-aikatyössä [%-yks]'
labels['Muut tulot [euroa]']='Muut tulot [euroa]'
labels['Henkilöitä']='Henkilöitä'
labels['Verot [euroa]']='Verot [euroa]'
labels['Verot [[miljardia euroa]']='Verot [[miljardia euroa]'
labels['Verokertymä [euroa]']='Verokertymä [euroa]'
labels['Verokertymä [miljardia euroa]']='Verokertymä [miljardia euroa]'
labels['Muut tarvittavat tulot [euroa]']='Muut tarvittavat tulot [euroa]'
labels['Muut tarvittavat tulot [miljardia euroa]']='Muut tarvittavat tulot [miljardia euroa]'
labels['malli']='Life cycle model'
else:
labels['osuus tilassa x']='Osuus tilassa {} [%]'
labels['age']='Ikä [v]'
labels['ratio']='Osuus tilassa [%]'
labels['unemp duration']='työttömyysjakson pituus [v]'
labels['scaled freq']='skaalattu taajuus'
labels['probability']='todennäköisyys'
labels['telp']='TEL-P'
labels['sairausvakuutus']='Sairausvakuutus'
labels['työttömyysvakuutusmaksu']='Työttömyysvakuutusmaksu'
labels['puolison verot']='puolison verot'
labels['taxes']='Verot'
labels['asumistuki']='Asumistuki'
labels['toimeentulotuki']='Toimeentulotuki'
labels['tyottomyysturva']='Työttömyysturva'
labels['paivahoito']='Päivähoito'
labels['elake']='Eläke'
labels['tyollisyysaste']='työllisyysaste'
labels['tyottomien osuus']='työttömien osuus'
labels['havainto']='havainto'
labels['tyottomyysaste']='Työttömyysaste [%]'
labels['tyottomien osuus']='Työttömien osuus väestöstö [%]'
labels['tyollisyysaste %']='Työllisyysaste [%]'
labels['ero osuuksissa']='Ero osuuksissa [%]'
labels['osuus']='Osuus'
labels['havainto, naiset']='havainto, naiset'
labels['havainto, miehet']='havainto, miehet'
labels['palkkasumma']='Palkkasumma [euroa]'
labels['Verokiila %']='Verokiila [%]'
labels['Työnteko [hlö/htv]']='Työnteko [hlö/htv]'
labels['Työnteko [htv]']='Työnteko [htv]'
labels['Työnteko [hlö]']='Työnteko [hlö]'
labels['Työnteko [miljoonaa hlö/htv]']='Työnteko [miljoonaa hlö/htv]'
labels['Työnteko [miljoonaa htv]']='Työnteko [miljoonaa htv]'
labels['Työnteko [miljoonaa hlö]']='Työnteko [miljoonaa hlö]'
labels['Osatyönteko [%-yks]']='Osa-aikatyössä [%-yks]'
labels['Muut tulot [euroa]']='Muut tulot [euroa]'
labels['Henkilöitä']='Henkilöitä'
labels['Verot [euroa]']='Verot [euroa]'
labels['Verot [[miljardia euroa]']='Verot [[miljardia euroa]'
labels['Verokertymä [euroa]']='Verokertymä [euroa]'
labels['Verokertymä [miljardia euroa]']='Verokertymä [miljardia euroa]'
labels['Muut tarvittavat tulot [euroa]']='Muut tarvittavat tulot [euroa]'
labels['Muut tarvittavat tulot [miljardia euroa]']='Muut tarvittavat tulot [miljardia euroa]'
labels['malli']='elinkaarimalli'
return labels
class EpisodeStats():
def __init__(self,timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year=2018,version=3,params=None,gamma=0.92,lang='English'):
self.version=version
self.gamma=gamma
self.params=params
self.lab=Labels()
self.reset(timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year,params=params,lang=lang)
print('version',version)
def reset(self,timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year,version=None,params=None,lang=None,dynprog=False):
self.min_age=min_age
self.max_age=max_age
self.min_retirementage=min_retirementage
self.minimal=minimal
if params is not None:
self.params=params
if lang is None:
self.language='English'
else:
self.language=lang
if version is not None:
self.version=version
self.setup_labels()
self.n_employment=n_emps
self.n_time=n_time
self.timestep=timestep # 0.25 = 3kk askel
self.inv_timestep=int(np.round(1/self.timestep)) # pitää olla kokonaisluku
self.n_pop=n_pop
self.year=year
self.env=env
self.reaalinen_palkkojenkasvu=0.016
self.palkkakerroin=(0.8*1+0.2*1.0/(1+self.reaalinen_palkkojenkasvu))**self.timestep
self.elakeindeksi=(0.2*1+0.8*1.0/(1+self.reaalinen_palkkojenkasvu))**self.timestep
self.dynprog=dynprog
if self.minimal:
self.version=0
if self.version in set([0,101]):
self.n_groups=1
else:
self.n_groups=6
self.empstats=Empstats(year=self.year,max_age=self.max_age,n_groups=self.n_groups,timestep=self.timestep,n_time=self.n_time,
min_age=self.min_age)
self.init_variables()
def init_variables(self):
n_emps=self.n_employment
self.empstate=np.zeros((self.n_time,n_emps))
self.gempstate=np.zeros((self.n_time,n_emps,self.n_groups))
self.deceiced=np.zeros((self.n_time,1))
self.alive=np.zeros((self.n_time,1))
self.galive=np.zeros((self.n_time,self.n_groups))
self.rewstate=np.zeros((self.n_time,n_emps))
self.poprewstate=np.zeros((self.n_time,self.n_pop))
self.salaries_emp=np.zeros((self.n_time,n_emps))
#self.salaries=np.zeros((self.n_time,self.n_pop))
self.actions=np.zeros((self.n_time,self.n_pop))
self.popempstate=np.zeros((self.n_time,self.n_pop))
self.popunemprightleft=np.zeros((self.n_time,self.n_pop))
self.popunemprightused=np.zeros((self.n_time,self.n_pop))
self.tyoll_distrib_bu=np.zeros((self.n_time,self.n_pop))
self.unemp_distrib_bu=np.zeros((self.n_time,self.n_pop))
self.siirtyneet=np.zeros((self.n_time,n_emps))
self.siirtyneet_det=np.zeros((self.n_time,n_emps,n_emps))
self.pysyneet=np.zeros((self.n_time,n_emps))
self.aveV=np.zeros((self.n_time,self.n_pop))
self.time_in_state=np.zeros((self.n_time,n_emps))
self.stat_tyoura=np.zeros((self.n_time,n_emps))
self.stat_toe=np.zeros((self.n_time,n_emps))
self.stat_pension=np.zeros((self.n_time,n_emps))
self.stat_paidpension=np.zeros((self.n_time,n_emps))
self.out_of_work=np.zeros((self.n_time,n_emps))
self.stat_unemp_len=np.zeros((self.n_time,self.n_pop))
self.stat_wage_reduction=np.zeros((self.n_time,n_emps))
self.stat_wage_reduction_g=np.zeros((self.n_time,n_emps,self.n_groups))
self.infostats_group=np.zeros((self.n_pop,1))
self.infostats_taxes=np.zeros((self.n_time,1))
self.infostats_wagetaxes=np.zeros((self.n_time,1))
self.infostats_taxes_distrib=np.zeros((self.n_time,n_emps))
self.infostats_etuustulo=np.zeros((self.n_time,1))
self.infostats_etuustulo_group=np.zeros((self.n_time,self.n_groups))
self.infostats_perustulo=np.zeros((self.n_time,1))
self.infostats_palkkatulo=np.zeros((self.n_time,1))
self.infostats_palkkatulo_eielakkeella=np.zeros((self.n_time,1))
self.infostats_palkkatulo_group=np.zeros((self.n_time,self.n_groups))
self.infostats_palkkatulo_eielakkeella_group=np.zeros((self.n_time,1))
self.infostats_ansiopvraha=np.zeros((self.n_time,1))
self.infostats_ansiopvraha_group=np.zeros((self.n_time,self.n_groups))
self.infostats_asumistuki=np.zeros((self.n_time,1))
self.infostats_asumistuki_group=np.zeros((self.n_time,self.n_groups))
self.infostats_valtionvero=np.zeros((self.n_time,1))
self.infostats_valtionvero_group=np.zeros((self.n_time,self.n_groups))
self.infostats_kunnallisvero=np.zeros((self.n_time,1))
self.infostats_kunnallisvero_group=np.zeros((self.n_time,self.n_groups))
self.infostats_valtionvero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_kunnallisvero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_ptel=np.zeros((self.n_time,1))
self.infostats_tyotvakmaksu=np.zeros((self.n_time,1))
self.infostats_tyoelake=np.zeros((self.n_time,1))
self.infostats_kokoelake=np.zeros((self.n_time,1))
self.infostats_opintotuki=np.zeros((self.n_time,1))
self.infostats_isyyspaivaraha=np.zeros((self.n_time,1))
self.infostats_aitiyspaivaraha=np.zeros((self.n_time,1))
self.infostats_kotihoidontuki=np.zeros((self.n_time,1))
self.infostats_sairauspaivaraha=np.zeros((self.n_time,1))
self.infostats_toimeentulotuki=np.zeros((self.n_time,1))
self.infostats_tulot_netto=np.zeros((self.n_time,1))
self.infostats_pinkslip=np.zeros((self.n_time,n_emps))
self.infostats_pop_pinkslip=np.zeros((self.n_time,self.n_pop))
self.infostats_chilren18_emp=np.zeros((self.n_time,n_emps))
self.infostats_chilren7_emp=np.zeros((self.n_time,n_emps))
self.infostats_chilren18=np.zeros((self.n_time,1))
self.infostats_chilren7=np.zeros((self.n_time,1))
self.infostats_tyelpremium=np.zeros((self.n_time,self.n_pop))
self.infostats_paid_tyel_pension=np.zeros((self.n_time,self.n_pop))
self.infostats_sairausvakuutus=np.zeros((self.n_time))
self.infostats_pvhoitomaksu=np.zeros((self.n_time,self.n_pop))
self.infostats_ylevero=np.zeros((self.n_time,1))
self.infostats_ylevero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_irr=np.zeros((self.n_pop,1))
self.infostats_npv0=np.zeros((self.n_pop,1))
self.infostats_mother_in_workforce=np.zeros((self.n_time,1))
self.infostats_children_under3=np.zeros((self.n_time,self.n_pop))
self.infostats_children_under7=np.zeros((self.n_time,self.n_pop))
self.infostats_unempwagebasis=np.zeros((self.n_time,self.n_pop))
self.infostats_unempwagebasis_acc=np.zeros((self.n_time,self.n_pop))
self.infostats_toe=np.zeros((self.n_time,self.n_pop))
self.infostats_ove=np.zeros((self.n_time,n_emps))
self.infostats_kassanjasen=np.zeros((self.n_time))
self.infostats_poptulot_netto=np.zeros((self.n_time,self.n_pop))
self.infostats_pop_wage=np.zeros((self.n_time,self.n_pop))
self.infostats_pop_pension=np.zeros((self.n_time,self.n_pop))
self.infostats_equivalent_income=np.zeros(self.n_time)
self.infostats_alv=np.zeros(self.n_time)
self.infostats_puoliso=np.zeros(self.n_time)
self.pop_predrew=np.zeros((self.n_time,self.n_pop))
if self.version==101:
self.infostats_savings=np.zeros((self.n_time,self.n_pop))
self.sav_actions=np.zeros((self.n_time,self.n_pop))
def add(self,n,act,r,state,newstate,q=None,debug=False,plot=False,aveV=None,pred_r=None):
if self.version==0:
emp,_,_,a,_,_=self.env.state_decode(state) # current employment state
newemp,newpen,newsal,a2,tis,next_wage=self.env.state_decode(newstate)
g=0
bu=0
ove=0
jasen=0
puoliso=0
elif self.version==1:
# v1
emp,_,_,_,a,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,ura,oof,bu,wr,p=self.env.state_decode(newstate)
ove=0
jasen=0
puoliso=0
elif self.version==2:
# v2
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,ura,bu,wr,upr,uw,uwr,pr,c3,c7,c18,unemp_left,aa,toe58=self.env.state_decode(newstate)
ove=0
jasen=0
puoliso=0
elif self.version==3:
# v3
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,toek,ura,bu,wr,upr,uw,uwr,pr,c3,c7,c18,unemp_left,aa,toe58,ove,jasen=self.env.state_decode(newstate)
puoliso=0
elif self.version==4:
# v3
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,toek,ura,bu,wr,upr,uw,uwr,pr,\
c3,c7,c18,unemp_left,aa,toe58,ove,jasen,puoliso,puoliso_tyossa,puoliso_palkka=self.env.state_decode(newstate)
elif self.version==101:
emp,_,_,a,_,_,_=self.env.state_decode(state) # current employment state
newemp,newpen,newsal,a2,tis,next_wage,savings=self.env.state_decode(newstate)
g=0
bu=0
ove=0
jasen=0
t=int(np.round((a2-self.min_age)*self.inv_timestep))#-1
if a2>a and newemp>=0: # new state is not reset (age2>age)
if a2>self.min_retirementage and newemp==3 and self.version in set([1,2,3,4]):
newemp=2
if self.version in set([1,2,3,4]):
self.empstate[t,newemp]+=1
self.alive[t]+=1
self.rewstate[t,newemp]+=r
self.poprewstate[t,n]=r
self.actions[t,n]=act
self.popempstate[t,n]=newemp
#self.salaries[t,n]=newsal
self.salaries_emp[t,newemp]+=newsal
self.time_in_state[t,newemp]+=tis
if tis<=0.25 and newemp==5:
self.infostats_mother_in_workforce[t]+=1
self.infostats_pinkslip[t,newemp]+=pink
self.infostats_pop_pinkslip[t,n]=pink
self.gempstate[t,newemp,g]+=1
self.stat_wage_reduction[t,newemp]+=wr
self.stat_wage_reduction_g[t,newemp,g]+=wr
self.galive[t,g]+=1
self.stat_tyoura[t,newemp]+=ura
self.stat_toe[t,newemp]+=toe
self.stat_pension[t,newemp]+=newpen
self.stat_paidpension[t,newemp]+=paidpens
self.stat_unemp_len[t,n]=tis
self.popunemprightleft[t,n]=-self.env.unempright_left(newemp,tis,bu,a2,ura)
self.popunemprightused[t,n]=bu
self.infostats_group[n]=int(g)
self.infostats_unempwagebasis[t,n]=uw
self.infostats_unempwagebasis_acc[t,n]=uwr
self.infostats_toe[t,n]=toe
self.infostats_ove[t,newemp]+=ove
self.infostats_kassanjasen[t]+=jasen
self.infostats_pop_wage[t,n]=newsal
self.infostats_pop_pension[t,n]=newpen
self.infostats_puoliso[t]+=puoliso
if q is not None:
#print(newsal,q['palkkatulot'])
self.infostats_taxes[t]+=q['verot']*self.timestep*12
self.infostats_wagetaxes[t]+=q['verot_ilman_etuuksia']*self.timestep*12
self.infostats_taxes_distrib[t,newemp]+=q['verot']*self.timestep*12
self.infostats_etuustulo[t]+=q['etuustulo_brutto']*self.timestep*12
self.infostats_etuustulo_group[t,g]+=q['etuustulo_brutto']*self.timestep*12
self.infostats_perustulo[t]+=q['perustulo']*self.timestep*12
self.infostats_palkkatulo[t]+=q['palkkatulot']*self.timestep*12
self.infostats_palkkatulo_eielakkeella[t]+=q['palkkatulot_eielakkeella']*self.timestep*12
self.infostats_ansiopvraha[t]+=q['ansiopvraha']*self.timestep*12
self.infostats_asumistuki[t]+=q['asumistuki']*self.timestep*12
self.infostats_valtionvero[t]+=q['valtionvero']*self.timestep*12
self.infostats_valtionvero_distrib[t,newemp]+=q['valtionvero']*self.timestep*12
self.infostats_kunnallisvero[t]+=q['kunnallisvero']*self.timestep*12
self.infostats_kunnallisvero_distrib[t,newemp]+=q['kunnallisvero']*self.timestep*12
self.infostats_ptel[t]+=q['ptel']*self.timestep*12
self.infostats_tyotvakmaksu[t]+=q['tyotvakmaksu']*self.timestep*12
self.infostats_tyoelake[t]+=q['elake_maksussa']*self.timestep*12
self.infostats_kokoelake[t]+=q['kokoelake']*self.timestep*12
self.infostats_opintotuki[t]+=q['opintotuki']*self.timestep*12
self.infostats_isyyspaivaraha[t]+=q['isyyspaivaraha']*self.timestep*12
self.infostats_aitiyspaivaraha[t]+=q['aitiyspaivaraha']*self.timestep*12
self.infostats_kotihoidontuki[t]+=q['kotihoidontuki']*self.timestep*12
self.infostats_sairauspaivaraha[t]+=q['sairauspaivaraha']*self.timestep*12
self.infostats_toimeentulotuki[t]+=q['toimtuki']*self.timestep*12
self.infostats_tulot_netto[t]+=q['kateen']*self.timestep*12
self.infostats_tyelpremium[t,n]=q['tyel_kokomaksu']*self.timestep*12
self.infostats_paid_tyel_pension[t,n]=q['puhdas_tyoelake']*self.timestep*12
self.infostats_sairausvakuutus[t]+=q['sairausvakuutus']*self.timestep*12
self.infostats_pvhoitomaksu[t,n]=q['pvhoito']*self.timestep*12
self.infostats_ylevero[t]+=q['ylevero']*self.timestep*12
self.infostats_ylevero_distrib[t,newemp]=q['ylevero']*self.timestep*12
self.infostats_poptulot_netto[t,n]=q['kateen']*self.timestep*12
self.infostats_children_under3[t,n]=c3
self.infostats_children_under7[t,n]=c7
self.infostats_npv0[n]=q['multiplier']
self.infostats_equivalent_income[t]+=q['eq']
if 'alv' in q:
self.infostats_alv[t]+=q['alv']
#self.infostats_kassanjasen[t]+=1
elif self.version in set([0,101]):
self.empstate[t,newemp]+=1
self.alive[t]+=1
self.rewstate[t,newemp]+=r
self.infostats_tulot_netto[t]+=q['netto'] # already at annual level
self.infostats_poptulot_netto[t,n]=q['netto']
self.poprewstate[t,n]=r
self.popempstate[t,n]=newemp
#self.salaries[t,n]=newsal
self.salaries_emp[t,newemp]+=newsal
self.time_in_state[t,newemp]+=tis
self.infostats_equivalent_income[t]+=q['eq']
self.infostats_pop_wage[t,n]=newsal
self.infostats_pop_pension[t,n]=newpen
if self.dynprog and pred_r is not None:
self.pop_predrew[t,n]=pred_r
if self.version==101:
self.infostats_savings[t,n]=savings
self.actions[t,n]=act[0]
self.sav_actions[t,n]=act[1]
else:
self.actions[t,n]=act
# if self.version in set([1,2,3]):
# self.gempstate[t,newemp,g]+=1
# self.stat_wage_reduction[t,newemp]+=wr
# self.galive[t,g]+=1
# self.stat_tyoura[t,newemp]+=ura
# self.stat_toe[t,newemp]+=toe
# self.stat_pension[t,newemp]+=newpen
# self.stat_paidpension[t,newemp]+=paidpens
# self.stat_unemp_len[t,n]=tis
# self.popunemprightleft[t,n]=0
# self.popunemprightused[t,n]=0
if aveV is not None:
self.aveV[t,n]=aveV
if not emp==newemp:
self.siirtyneet[t,emp]+=1
self.siirtyneet_det[t,emp,newemp]+=1
else:
self.pysyneet[t,emp]+=1
elif newemp<0:
self.deceiced[t]+=1
def scale_error(self,x,target=None,averaged=False):
return (target-self.comp_scaled_consumption(x,averaged=averaged))
def comp_employed_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=np.squeeze(self.gempstate[:,:,g])
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyoll_osuus=(emp[:,1]+emp[:,3])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,3])/nn
tyoll_osuus=np.reshape(tyoll_osuus,(tyoll_osuus.shape[0],1))
htv_osuus=np.reshape(htv_osuus,(htv_osuus.shape[0],1))
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyoll_osuus=(emp[:,1]+emp[:,8]+emp[:,9]+emp[:,10])
htv_osuus=(emp[:,1]+0.5*emp[:,8]+emp[:,9]+0.5*emp[:,10])
tyoll_osuus=np.reshape(tyoll_osuus,(tyoll_osuus.shape[0],1))
htv_osuus=np.reshape(htv_osuus,(htv_osuus.shape[0],1))
return tyoll_osuus,htv_osuus
def comp_employed_aggregate(self,emp=None,start=20,end=63.5,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyoll_osuus=(emp[:,1]+emp[:,3])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,3])/nn
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyoll_osuus=(emp[:,1]+emp[:,8]+emp[:,9]+emp[:,10])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,8]+emp[:,9]+0.5*emp[:,10])/nn
htv_osuus=self.comp_state_stats(htv_osuus,start=start,end=end,ratio=True)
tyoll_osuus=self.comp_state_stats(tyoll_osuus,start=start,end=end,ratio=True)
return tyoll_osuus,htv_osuus
def comp_group_ps(self):
return self.comp_palkkasumma(grouped=True)
def comp_palkkasumma(self,start=19,end=68,grouped=False,scale_time=True):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
if grouped:
scalex=demog2/self.n_pop*self.timestep
ps=np.zeros((self.n_time,6))
ps_norw=np.zeros((self.n_time,6))
a_ps=np.zeros(6)
a_ps_norw=np.zeros(6)
for k in range(self.n_pop):
g=int(self.infostats_group[k,0])
for t in range(min_cage,max_cage):
e=int(self.popempstate[t,k])
if e in set([1,10]):
ps[t,g]+=self.infostats_pop_wage[t,k]
ps_norw[t,g]+=self.infostats_pop_wage[t,k]
elif e in set([8,9]):
ps[t,g]+=self.infostats_pop_wage[t,k]*self.timestep
for g in range(6):
a_ps[g]=np.sum(scalex[min_cage:max_cage]*ps[min_cage:max_cage,g])
a_ps_norw[g]=np.sum(scalex[min_cage:max_cage]*ps_norw[min_cage:max_cage,g])
else:
scalex=demog2/self.n_pop*self.timestep
ps=np.zeros((self.n_time,1))
ps_norw=np.zeros((self.n_time,1))
for k in range(self.n_pop):
for t in range(min_cage,max_cage):
e=int(self.popempstate[t,k])
if e in set([1,10]):
ps[t,0]+=self.infostats_pop_wage[t,k]
ps_norw[t,0]+=self.infostats_pop_wage[t,k]
elif e in set([8,9]):
ps[t,0]+=self.infostats_pop_wage[t,k]
a_ps=np.sum(scalex[min_cage:max_cage]*ps[min_cage:max_cage])
a_ps_norw=np.sum(scalex[min_cage:max_cage]*ps_norw[min_cage:max_cage])
return a_ps,a_ps_norw
def comp_stats_agegroup(self,border=[19,35,50]):
n_groups=len(border)
low=border.copy()
high=border.copy()
high[0:n_groups-1]=border[1:n_groups]
high[-1]=65
employed=np.zeros(n_groups)
unemployed=np.zeros(n_groups)
ahtv=np.zeros(n_groups)
parttimeratio=np.zeros(n_groups)
unempratio=np.zeros(n_groups)
empratio=np.zeros(n_groups)
i_ps=np.zeros(n_groups)
i_ps_norw=np.zeros(n_groups)
for n in range(n_groups):
l=low[n]
h=high[n]
htv,tyollvaikutus,tyollaste,tyotosuus,tyottomat,osatyollaste=\
self.comp_tyollisyys_stats(self.empstate,scale_time=True,start=l,end=h,agegroups=True)
ps,ps_norw=self.comp_palkkasumma(start=l,end=h)
print(f'l {l} h {h}\nhtv {htv}\ntyollaste {tyollaste}\ntyotosuus {tyotosuus}\ntyottomat {tyottomat}\nosatyollaste {osatyollaste}\nps {ps}')
employed[n]=tyollvaikutus
ahtv[n]=htv
unemployed[n]=tyottomat
unempratio[n]=tyotosuus
empratio[n]=tyollaste
parttimeratio[n]=osatyollaste
i_ps[n]=ps
i_ps_norw[n]=ps_norw
return employed,ahtv,unemployed,parttimeratio,i_ps,i_ps_norw,unempratio,empratio
def comp_unemployed_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyot_osuus=emp[:,0]/nn
tyot_osuus=np.reshape(tyot_osuus,(tyot_osuus.shape[0],1))
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyot_osuus=(emp[:,0]+emp[:,4]+emp[:,13])[:,None]
#tyot_osuus=np.reshape(tyot_osuus,(tyot_osuus.shape[0],1))
return tyot_osuus
def comp_unemployed_aggregate(self,emp=None,start=20,end=63.5,scale_time=True,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyot_osuus=emp[:,0]/nn
else:
tyot_osuus=(emp[:,0]+emp[:,4]+emp[:,13])/nn
#print(f'tyot_osuus {tyot_osuus}')
unemp=self.comp_state_stats(tyot_osuus,start=start,end=end,ratio=True)
return unemp
def comp_parttime_aggregate(self,emp=None,start=20,end=63.5,scale_time=True,grouped=False,g=0):
'''
Lukumäärätiedot (EI HTV!)
'''
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if not self.minimal:
tyossa=(emp[:,1]+emp[:,10]+emp[:,8]+emp[:,9])/nn
osatyossa=(emp[:,10]+emp[:,8])/nn
else:
tyossa=emp[:,1]/nn
osatyossa=0*tyossa
osatyo_osuus=osatyossa/tyossa
osatyo_osuus=self.comp_state_stats(osatyo_osuus,start=start,end=end,ratio=True)
kokotyo_osuus=1-osatyo_osuus
return kokotyo_osuus,osatyo_osuus
def comp_parttime_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
kokotyo_osuus=(emp[:,1])/nn
osatyo_osuus=(emp[:,3])/nn
else:
if grouped:
for g in range(6):
kokotyo_osuus=(emp[:,1,g]+emp[:,9,g])/nn
osatyo_osuus=(emp[:,8,g]+emp[:,10,g])/nn
else:
kokotyo_osuus=(emp[:,1]+emp[:,9])/nn
osatyo_osuus=(emp[:,8]+emp[:,10])/nn
osatyo_osuus=np.reshape(osatyo_osuus,(osatyo_osuus.shape[0],1))
kokotyo_osuus=np.reshape(kokotyo_osuus,(osatyo_osuus.shape[0],1))
return kokotyo_osuus,osatyo_osuus
def comp_employed_ratio(self,emp):
tyoll_osuus,htv_osuus=self.comp_employed_ratio_by_age(emp)
tyot_osuus=self.comp_unemployed_ratio_by_age(emp)
kokotyo_osuus,osatyo_osuus=self.comp_parttime_ratio_by_age(emp)
return tyoll_osuus,htv_osuus,tyot_osuus,kokotyo_osuus,osatyo_osuus
def comp_unemployed_detailed(self,emp):
if self.minimal:
ansiosid_osuus=emp[:,0]/np.sum(emp,1)
tm_osuus=ansiosid_osuus*0
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
ansiosid_osuus=(emp[:,0]+emp[:,4])/np.sum(emp,1)
tm_osuus=(emp[:,13])/np.sum(emp,1)
return ansiosid_osuus,tm_osuus
def comp_tyollisyys_stats(self,emp,scale_time=True,start=19,end=68,full=False,tyot_stats=False,agg=False,shapes=False,only_groups=False,g=0,agegroups=False):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
scalex=demog2[min_cage:max_cage]/self.n_pop*scale
if only_groups:
tyollosuus,htvosuus,tyot_osuus,kokotyo_osuus,osatyo_osuus=self.comp_employed_ratio(emp)
else:
tyollosuus,htvosuus,tyot_osuus,kokotyo_osuus,osatyo_osuus=self.comp_employed_ratio(emp)
htv=np.sum(scalex*htvosuus[min_cage:max_cage])
tyollvaikutus=np.sum(scalex*tyollosuus[min_cage:max_cage])
tyottomat=np.sum(scalex*tyot_osuus[min_cage:max_cage])
osatyollvaikutus=np.sum(scalex*osatyo_osuus[min_cage:max_cage])
kokotyollvaikutus=np.sum(scalex*kokotyo_osuus[min_cage:max_cage])
haj=np.mean(np.std(tyollosuus[min_cage:max_cage]))
tyollaste=tyollvaikutus/(np.sum(scalex)*self.n_pop)
osatyollaste=osatyollvaikutus/(kokotyollvaikutus+osatyollvaikutus)
kokotyollaste=kokotyollvaikutus/(kokotyollvaikutus+osatyollvaikutus)
if tyot_stats:
if agg:
#d2=np.squeeze(demog2)
tyolliset_osuus=np.squeeze(tyollosuus)
tyottomat_osuus=np.squeeze(tyot_osuus)
return tyolliset_ika,tyottomat_ika,htv_ika,tyolliset_osuus,tyottomat_osuus
else:
d2=np.squeeze(demog2)
tyolliset_ika=np.squeeze(scale*d2*np.squeeze(htvosuus))
tyottomat_ika=np.squeeze(scale*d2*np.squeeze(tyot_osuus))
htv_ika=np.squeeze(scale*d2*np.squeeze(htvosuus))
tyolliset_osuus=np.squeeze(tyollosuus)
tyottomat_osuus=np.squeeze(tyot_osuus)
return tyolliset_ika,tyottomat_ika,htv_ika,tyolliset_osuus,tyottomat_osuus
elif full:
return htv,tyollvaikutus,haj,tyollaste,tyollosuus,osatyollvaikutus,kokotyollvaikutus,osatyollaste,kokotyollaste
elif agegroups:
tyot_osuus=self.comp_unemployed_aggregate(start=start,end=end)
return htv,tyollvaikutus,tyollaste,tyot_osuus,tyottomat,osatyollaste
else:
return htv,tyollvaikutus,haj,tyollaste,tyollosuus
def comp_employment_stats(self,scale_time=True,returns=False):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(self.min_age)
max_cage=self.map_age(self.max_age)+1
scalex=np.squeeze(demog2/self.n_pop*self.timestep)
d=np.squeeze(demog2[min_cage:max_cage])
self.ratiostates=self.empstate/self.alive
self.demogstates=(self.empstate.T*scalex).T
if self.minimal>0:
self.stats_employed=self.demogstates[:,0]+self.demogstates[:,3]
self.stats_parttime=self.demogstates[:,3]
self.stats_unemployed=self.demogstates[:,0]
self.stats_all=np.sum(self.demogstates,1)
else:
self.stats_employed=self.demogstates[:,0]+self.demogstates[:,10]+self.demogstates[:,8]+self.demogstates[:,9]
self.stats_parttime=self.demogstates[:,10]+self.demogstates[:,8]
self.stats_unemployed=self.demogstates[:,0]+self.demogstates[:,4]+self.demogstates[:,13]
self.stats_all=np.sum(self.demogstates,1)
if returns:
return self.stats_employed,self.stats_parttime,self.stats_unemployed
# def test_emp(self):
# g_emp=0
# g_htv=0
# g_10=0
# g_1=0
# g_8=0
# g_9=0
# g_x=0
# scalex=1
#
# demog2=self.empstats.get_demog()
# scalex=np.squeeze(demog2/self.n_pop*self.timestep)
#
#
# for g in range(6):
# q=self.comp_participants(grouped=True,g=g)
# #g_1+=np.sum(self.gempstate[:,1,g])
# #g_10+=np.sum(self.gempstate[:,10,g])
# #g_8+=np.sum(self.gempstate[:,8,g])
# #g_9+=np.sum(self.gempstate[:,9,g])
# g_emp+=q['palkansaajia']
# g_htv+=q['htv']
# g_x+=np.sum((self.gempstate[:,1,g]+self.gempstate[:,10,g])*scalex)
#
# q=self.comp_participants()
# s_1=np.sum(self.empstate[:,1])
# s_10=np.sum(self.empstate[:,10])
# s_8=np.sum(self.empstate[:,8])
# s_9=np.sum(self.empstate[:,9])
# s_x=np.sum((self.empstate[:,1]+self.empstate[:,10])*scalex)
# emp=q['palkansaajia']
# htv=q['htv']
#
# print(f'htv {htv} vs g_htv {g_htv}')
# print(f'emp {emp} vs g_emp {g_emp}')
# print(f's_x {s_x} vs g_x {g_x}')
# #print(f's_1 {s_1} vs g_1 {g_1}')
# #print(f's_10 {s_10} vs g_10 {g_10}')
# #print(f's_8 {s_8} vs g_8 {g_8}')
# #print(f's_9 {s_9} vs g_9 {g_9}')
def comp_participants(self,scale=True,include_retwork=True,grouped=False,g=0):
'''
<NAME> lkm
scalex olettaa, että naisia & miehiä yhtä paljon. Tämän voisi tarkentaa.
'''
demog2=self.empstats.get_demog()
scalex=np.squeeze(demog2/self.n_pop*self.timestep)
#print('version',self.version)
q={}
if self.version in set([1,2,3,4]):
if grouped:
#print('group=',g)
emp=np.squeeze(self.gempstate[:,:,g])
q['yhteensä']=np.sum(np.sum(emp,axis=1)*scalex)
if include_retwork:
q['palkansaajia']=np.sum((emp[:,1]+emp[:,10]+emp[:,8]+emp[:,9])*scalex)
q['htv']=np.sum((emp[:,1]+0.5*emp[:,10]+0.5*emp[:,8]+emp[:,9])*scalex)
else:
q['palkansaajia']=np.sum((emp[:,1]+emp[:,10])*scalex)
q['htv']=np.sum((emp[:,1]+0.5*emp[:,10])*scalex)
q['ansiosidonnaisella']=np.sum((emp[:,0]+emp[:,4])*scalex)
q['tmtuella']=np.sum(emp[:,13]*scalex)
q['isyysvapaalla']=np.sum(emp[:,6]*scalex)
q['kotihoidontuella']=np.sum(emp[:,7]*scalex)
q['vanhempainvapaalla']=np.sum(emp[:,5]*scalex)
else:
q['yhteensä']=np.sum(np.sum(self.empstate[:,:],axis=1)*scalex)
if include_retwork:
q['palkansaajia']=np.sum((self.empstate[:,1]+self.empstate[:,10]+self.empstate[:,8]+self.empstate[:,9])*scalex)
q['htv']=np.sum((self.empstate[:,1]+0.5*self.empstate[:,10]+0.5*self.empstate[:,8]+self.empstate[:,9])*scalex)
else:
q['palkansaajia']=np.sum((self.empstate[:,1]+self.empstate[:,10])*scalex)
q['htv']=np.sum((self.empstate[:,1]+0.5*self.empstate[:,10])*scalex)
q['ansiosidonnaisella']=np.sum((self.empstate[:,0]+self.empstate[:,4])*scalex)
q['tmtuella']=np.sum(self.empstate[:,13]*scalex)
q['isyysvapaalla']=np.sum(self.empstate[:,6]*scalex)
q['kotihoidontuella']=np.sum(self.empstate[:,7]*scalex)
q['vanhempainvapaalla']=np.sum(self.empstate[:,5]*scalex)
else:
q['yhteensä']=np.sum(np.sum(self.empstate[:,:],1)*scalex)
q['palkansaajia']=np.sum((self.empstate[:,1])*scalex)
q['htv']=np.sum((self.empstate[:,1])*scalex)
q['ansiosidonnaisella']=np.sum((self.empstate[:,0])*scalex)
q['tmtuella']=np.sum(self.empstate[:,1]*0)
q['isyysvapaalla']=np.sum(self.empstate[:,1]*0)
q['kotihoidontuella']=np.sum(self.empstate[:,1]*0)
q['vanhempainvapaalla']=np.sum(self.empstate[:,1]*0)
return q
def comp_employment_groupstats(self,scale_time=True,g=0,include_retwork=True,grouped=True):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
#min_cage=self.map_age(self.min_age)
#max_cage=self.map_age(self.max_age)+1
scalex=np.squeeze(demog2/self.n_pop*scale)
#d=np.squeeze(demog2[min_cage:max_cage])
if grouped:
ratiostates=np.squeeze(self.gempstate[:,:,g])/self.alive
demogstates=np.squeeze(self.gempstate[:,:,g])
else:
ratiostates=self.empstate[:,:]/self.alive
demogstates=self.empstate[:,:]
if self.version in set([1,2,3,4]):
if include_retwork:
stats_employed=np.sum((demogstates[:,1]+demogstates[:,9])*scalex)
stats_parttime=np.sum((demogstates[:,10]+demogstates[:,8])*scalex)
else:
stats_employed=np.sum((demogstates[:,1])*scalex)
stats_parttime=np.sum((demogstates[:,10])*scalex)
stats_unemployed=np.sum((demogstates[:,0]+demogstates[:,4]+demogstates[:,13])*scalex)
else:
stats_employed=np.sum((demogstates[:,0]+demogstates[:,3])*scalex)
stats_parttime=np.sum((demogstates[:,3])*scalex)
stats_unemployed=np.sum((demogstates[:,0])*scalex)
#stats_all=np.sum(demogstates,1)
return stats_employed,stats_parttime,stats_unemployed
def comp_state_stats(self,state,scale_time=True,start=20,end=63.5,ratio=False):
demog2=np.squeeze(self.empstats.get_demog())
#if scale_time:
# scale=self.timestep
#else:
# scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
#vaikutus=np.round(scale*np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage]))/np.sum(demog2[min_cage:max_cage])
vaikutus=np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage])/np.sum(demog2[min_cage:max_cage])
x=np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage])
y=np.sum(demog2[min_cage:max_cage])
#print(f'vaikutus {vaikutus} x {x} y {y}\n s {state[min_cage:max_cage]} mean {np.mean(state[min_cage:max_cage])}\n d {demog2[min_cage:max_cage]}')
return vaikutus
def get_vanhempainvapaat(self):
'''
Laskee vanhempainvapaalla olevien määrän outsider-mallia (Excel) varten, tila 6
'''
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
ulkopuolella_m=np.sum(self.gempstate[:,7,0:3],axis=1)[:,None]/alive
alive[:,0]=np.sum(self.galive[:,3:6],1)
nn=np.sum(self.gempstate[:,5,3:6]+self.gempstate[:,7,3:6],axis=1)[:,None]-self.infostats_mother_in_workforce
ulkopuolella_n=nn/alive
return ulkopuolella_m[::4],ulkopuolella_n[::4]
def get_vanhempainvapaat_md(self):
'''
Laskee vanhempainvapaalla olevien määrän outsider-mallia (Excel) varten, tila 7
'''
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
ulkopuolella_m=np.sum(self.gempstate[:,6,0:3],axis=1)[:,None]/alive
alive[:,0]=np.sum(self.galive[:,3:6],1)
nn=self.infostats_mother_in_workforce
ulkopuolella_n=nn/alive
return ulkopuolella_m[::4],ulkopuolella_n[::4]
def comp_L2error(self):
tyollisyysaste_m,osatyoaste_m,tyottomyysaste_m,ka_tyottomyysaste=self.comp_gempratios(gender='men',unempratio=False)
tyollisyysaste_w,osatyoaste_w,tyottomyysaste_w,ka_tyottomyysaste=self.comp_gempratios(gender='women',unempratio=False)
emp_statsratio_m=self.empstats.emp_stats(g=1)[:-1]*100
emp_statsratio_w=self.empstats.emp_stats(g=2)[:-1]*100
unemp_statsratio_m=self.empstats.unemp_stats(g=1)[:-1]*100
unemp_statsratio_w=self.empstats.unemp_stats(g=2)[:-1]*100
w1=1.0
w2=3.0
L2= w1*np.sum(np.abs(emp_statsratio_m-tyollisyysaste_m[:-1])**2)+\
w1*np.sum(np.abs(emp_statsratio_w-tyollisyysaste_w[:-1])**2)+\
w2*np.sum(np.abs(unemp_statsratio_m-tyottomyysaste_m[:-1])**2)+\
w2*np.sum(np.abs(unemp_statsratio_w-tyottomyysaste_w[:-1])**2)
L2=L2/self.n_pop
#print(L1,emp_statsratio_m,tyollisyysaste_m,tyollisyysaste_w,unemp_statsratio_m,tyottomyysaste_m,tyottomyysaste_w)
print('L2 error {}'.format(L2))
return L2
def comp_budgetL2error(self,ref_muut,scale=1):
q=self.comp_budget()
muut=q['muut tulot']
L2=-((ref_muut-muut)/scale)**2
print(f'L2 error {L2} (muut {muut} muut_ref {ref_muut})')
return L2
def optimize_scale(self,target,averaged=scale_error):
opt=scipy.optimize.least_squares(self.scale_error,0.20,bounds=(-1,1),kwargs={'target':target,'averaged':averaged})
#print(opt)
return opt['x']
def optimize_logutil(self,target,source):
'''
analytical compensated consumption
does not implement final reward, hence duration 110 y
'''
n_time=110
gy=np.empty(n_time)
g=1
gx=np.empty(n_time)
for t in range(0,n_time):
gx[t]=g
g*=self.gamma
for t in range(1,n_time):
gy[t]=np.sum(gx[0:t])
gf=np.mean(gy[1:])/10
lx=(target-source)
opt=np.exp(lx/gf)-1.0
print(opt)
def min_max(self):
min_wage=np.min(self.infostats_pop_wage)
max_wage=np.max(self.infostats_pop_wage)
max_pension=np.max(self.infostats_pop_pension)
min_pension=np.min(self.infostats_pop_pension)
print(f'min wage {min_wage} max wage {max_wage}')
print(f'min pension {min_pension} max pension {max_pension}')
def setup_labels(self):
self.labels=self.lab.get_labels(self.language)
def map_age(self,age,start_zero=False):
if start_zero:
return int((age)*self.inv_timestep)
else:
return int((age-self.min_age)*self.inv_timestep)
def map_t_to_age(self,t):
return self.min_age+t/self.inv_timestep
def episodestats_exit(self):
plt.close(self.episode_fig)
def comp_gini(self):
'''
<NAME>-kerroin populaatiolle
'''
income=np.sort(self.infostats_tulot_netto,axis=None)
n=len(income)
L=np.arange(n,0,-1)
A=np.sum(L*income)/np.sum(income)
G=(n+1-2*A)/2
return G
def comp_annual_irr(self,npv,premium,pension,empstate,doprint=False):
k=0
max_npv=int(np.ceil(npv))
cashflow=-premium+pension
x=np.zeros(cashflow.shape[0]+max_npv)
eind=np.zeros(max_npv+1)
el=1
for k in range(max_npv+1):
eind[k]=el
el=el*self.elakeindeksi
x[:cashflow.shape[0]]=cashflow
if npv>0:
x[cashflow.shape[0]-1:]=cashflow[-2]*eind[:max_npv+1]
y=np.zeros(int(np.ceil(x.shape[0]/4)))
for k in range(y.shape[0]):
y[k]=np.sum(x[4*k:4*k+4])
irri=npf.irr(y)*100
#if np.isnan(irri):
# if np.sum(pension)<0.1 and np.sum(empstate[0:self.map_age(63)]==15)>0: # vain maksuja, joista ei saa tuottoja, joten tappio 100%
# irri=-100
if irri<0.01 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
if irri>100 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
if np.isnan(irri) and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,np.sum(pension),empstate))
#print('---------\nirri {}\nnpv {}\n\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,np.sum(pension),np.sum(empstate==15)))
if irri<-50 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
return irri
def comp_irr(self):
'''
Laskee sisäisen tuottoasteen (IRR)
Indeksointi puuttuu npv:n osalta
Tuloksiin lisättävä inflaatio+palkkojen reaalikasvu = palkkojen nimellinen kasvu
'''
for k in range(self.n_pop):
self.infostats_irr[k]=self.reaalinen_palkkojenkasvu*100+self.comp_annual_irr(self.infostats_npv0[k,0],self.infostats_tyelpremium[:,k],self.infostats_paid_tyel_pension[:,k],self.popempstate[:,k])
def comp_aggirr(self):
'''
Laskee aggregoidun sisäisen tuottoasteen (IRR)
Indeksointi puuttuu npv:n osalta
Tuloksiin lisättävä inflaatio+palkkojen reaalikasvu = palkkojen nimellinen kasvu
'''
maxnpv=np.max(self.infostats_npv0)
agg_premium=np.sum(self.infostats_tyelpremium,axis=1)
agg_pensions=np.sum(self.infostats_paid_tyel_pension,axis=1)
agg_irr=self.reaalinen_palkkojenkasvu*100+self.comp_annual_irr(maxnpv,agg_premium,agg_pensions,self.popempstate[:,0])
x=np.zeros(self.infostats_paid_tyel_pension.shape[0]+int(np.ceil(maxnpv)))
max_npv=int(max(np.ceil(self.infostats_npv0[:,0])))
eind=np.zeros(max_npv)
el=1
for k in range(max_npv):
eind[k]=el
el=el*self.elakeindeksi
cfn=self.infostats_tyelpremium.shape[0]
for k in range(self.n_pop):
if np.sum(self.popempstate[0:self.map_age(63),k]==15)<1: # ilman kuolleita
n=int(np.ceil(self.infostats_npv0[k,0]))
cashflow=-self.infostats_tyelpremium[:,k]+self.infostats_paid_tyel_pension[:,k]
# indeksointi puuttuu
x[:cfn]+=cashflow
if n>0:
x[cfn-1:cfn+n-1]+=cashflow[-2]*eind[:n] # ei indeksoida, pitäisi huomioida takuueläkekin
y=np.zeros(int(np.ceil(x.shape[0]/4)))
for k in range(y.shape[0]):
y[k]=np.sum(x[4*k:4*k+101])
irri=npf.irr(y)*100
print('aggregate irr {}'.format(agg_irr))
def comp_unemp_durations(self,popempstate=None,popunemprightused=None,putki=True,\
tmtuki=False,laaja=False,outsider=False,ansiosid=True,tyott=False,kaikki=False,\
return_q=True,max_age=100):
'''
Poikkileikkaushetken työttömyyskestot
'''
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
if popunemprightused is None:
popunemprightused=self.popunemprightused
keskikesto=np.zeros((5,5)) # 20-29, 30-39, 40-49, 50-59, 60-69, vastaa TYJin tilastoa
n=np.zeros(5)
for k in range(self.n_pop):
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k] in unempset:
if age<29:
l=0
elif age<39:
l=1
elif age<49:
l=2
elif age<59:
l=3
else:
l=4
n[l]+=1
if self.popunemprightused[t,k]<=0.51:
keskikesto[l,0]+=1
elif self.popunemprightused[t,k]<=1.01:
keskikesto[l,1]+=1
elif self.popunemprightused[t,k]<=1.51:
keskikesto[l,2]+=1
elif self.popunemprightused[t,k]<=2.01:
keskikesto[l,3]+=1
else:
keskikesto[l,4]+=1
for k in range(5):
keskikesto[k,:] /= n[k]
if return_q:
return self.empdur_to_dict(keskikesto)
else:
return keskikesto
def empdur_to_dict(self,empdur):
q={}
q['20-29']=empdur[0,:]
q['30-39']=empdur[1,:]
q['40-49']=empdur[2,:]
q['50-59']=empdur[3,:]
q['60-65']=empdur[4,:]
return q
def comp_unemp_durations_v2(self,popempstate=None,putki=True,tmtuki=False,laaja=False,\
outsider=False,ansiosid=True,tyott=False,kaikki=False,\
return_q=True,max_age=100):
'''
Poikkileikkaushetken työttömyyskestot
Tässä lasketaan tulos tiladatasta, jolloin kyse on viimeisimmän jakson kestosta
'''
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
keskikesto=np.zeros((5,5)) # 20-29, 30-39, 40-49, 50-59, 60-69, vastaa TYJin tilastoa
n=np.zeros(5)
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] not in unempset:
prev_state=popempstate[t,k]
duration=(t-prev_trans)*self.timestep
prev_trans=t
if age<29:
l=0
elif age<39:
l=1
elif age<49:
l=2
elif age<59:
l=3
else:
l=4
n[l]+=1
if duration<=0.51:
keskikesto[l,0]+=1
elif duration<=1.01:
keskikesto[l,1]+=1
elif duration<=1.51:
keskikesto[l,2]+=1
elif duration<=2.01:
keskikesto[l,3]+=1
else:
keskikesto[l,4]+=1
elif prev_state not in unempset and popempstate[t,k] in unempset:
prev_trans=t
prev_state=popempstate[t,k]
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
for k in range(5):
keskikesto[k,:] /= n[k]
if return_q:
return self.empdur_to_dict(keskikesto)
else:
return keskikesto
def comp_virrat(self,popempstate=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,kaikki=False,max_age=100):
tyoll_virta=np.zeros((self.n_time,1))
tyot_virta=np.zeros((self.n_time,1))
unempset=[]
empset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
empset=set([1,10])
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] in empset:
tyoll_virta[t]+=1
prev_state=popempstate[t,k]
elif prev_state in empset and popempstate[t,k] in unempset:
tyot_virta[t]+=1
prev_state=popempstate[t,k]
else: # some other state
prev_state=popempstate[t,k]
return tyoll_virta,tyot_virta
def comp_tyollistymisdistribs(self,popempstate=None,popunemprightleft=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,max_age=100):
tyoll_distrib=[]
tyoll_distrib_bu=[]
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
empset=set([1,10])
unempset=set(unempset)
if popempstate is None or popunemprightleft is None:
popempstate=self.popempstate
popunemprightleft=self.popunemprightleft
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] in empset:
tyoll_distrib.append((t-prev_trans)*self.timestep)
tyoll_distrib_bu.append(popunemprightleft[t,k])
prev_state=popempstate[t,k]
prev_trans=t
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
return tyoll_distrib,tyoll_distrib_bu
def comp_empdistribs(self,popempstate=None,popunemprightleft=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,max_age=100):
unemp_distrib=[]
unemp_distrib_bu=[]
emp_distrib=[]
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if popempstate is None or popunemprightleft is None:
popempstate=self.popempstate
popunemprightleft=self.popunemprightleft
empset=set([1,10])
unempset=set(unempset)
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if self.popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] not in unempset:
unemp_distrib.append((t-prev_trans)*self.timestep)
unemp_distrib_bu.append(popunemprightleft[t,k])
prev_state=popempstate[t,k]
prev_trans=t
elif prev_state in empset and popempstate[t,k] not in unempset:
emp_distrib.append((t-prev_trans)*self.timestep)
prev_state=popempstate[t,k]
prev_trans=t
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
return unemp_distrib,emp_distrib,unemp_distrib_bu
def empdist_stat(self):
ratio=np.array([1,0.287024901703801,0.115508955875928,0.0681083442551332,0.0339886413280909,0.0339886413280909,0.0114460463084316,0.0114460463084316,0.0114460463084316,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206])
return ratio
def comp_gempratios(self,unempratio=True,gender='men'):
if gender=='men': # men
gempstate=np.sum(self.gempstate[:,:,0:3],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
mother_in_workforce=0
else: # women
gempstate=np.sum(self.gempstate[:,:,3:6],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
mother_in_workforce=self.infostats_mother_in_workforce
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(gempstate,alive,unempratio=unempratio,mother_in_workforce=mother_in_workforce)
return tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste
def comp_empratios(self,emp,alive,unempratio=True,mother_in_workforce=0):
employed=emp[:,1]
retired=emp[:,2]
unemployed=emp[:,0]
if self.version in set([1,2,3,4]):
disabled=emp[:,3]
piped=emp[:,4]
mother=emp[:,5]
dad=emp[:,6]
kotihoidontuki=emp[:,7]
vetyo=emp[:,9]
veosatyo=emp[:,8]
osatyo=emp[:,10]
outsider=emp[:,11]
student=emp[:,12]
tyomarkkinatuki=emp[:,13]
tyollisyysaste=100*(employed+osatyo+veosatyo+vetyo+dad+mother_in_workforce)/alive[:,0]
osatyoaste=100*(osatyo+veosatyo)/(employed+osatyo+veosatyo+vetyo)
if unempratio:
tyottomyysaste=100*(unemployed+piped+tyomarkkinatuki)/(tyomarkkinatuki+unemployed+employed+piped+osatyo+veosatyo+vetyo)
ka_tyottomyysaste=100*np.sum(unemployed+tyomarkkinatuki+piped)/np.sum(tyomarkkinatuki+unemployed+employed+piped+osatyo+veosatyo+vetyo)
else:
tyottomyysaste=100*(unemployed+piped+tyomarkkinatuki)/alive[:,0]
ka_tyottomyysaste=100*np.sum(unemployed+tyomarkkinatuki+piped)/np.sum(alive[:,0])
elif self.version in set([0,101]):
if False:
osatyo=emp[:,3]
else:
osatyo=0
tyollisyysaste=100*(employed+osatyo)/alive[:,0]
#osatyoaste=np.zeros(employed.shape)
osatyoaste=100*(osatyo)/(employed+osatyo)
if unempratio:
tyottomyysaste=100*(unemployed)/(unemployed+employed+osatyo)
ka_tyottomyysaste=100*np.sum(unemployed)/np.sum(unemployed+employed+osatyo)
else:
tyottomyysaste=100*(unemployed)/alive[:,0]
ka_tyottomyysaste=100*np.sum(unemployed)/np.sum(alive[:,0])
return tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste
def plot_ratiostats(self,t):
'''
Tee kuvia tuloksista
'''
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.set_xlabel('palkat')
ax.set_ylabel('freq')
ax.hist(self.infostats_pop_wage[t,:])
plt.show()
fig,ax=plt.subplots()
ax.set_xlabel('aika')
ax.set_ylabel('palkat')
meansal=np.mean(self.infostats_pop_wage,axis=1)
stdsal=np.std(self.infostats_pop_wage,axis=1)
ax.plot(x,meansal)
ax.plot(x,meansal+stdsal)
ax.plot(x,meansal-stdsal)
plt.show()
def plot_empdistribs(self,emp_distrib):
fig,ax=plt.subplots()
ax.set_xlabel('työsuhteen pituus [v]')
ax.set_ylabel('freq')
ax.set_yscale('log')
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled,x2=np.histogram(emp_distrib,x)
scaled=scaled/np.sum(emp_distrib)
#ax.hist(emp_distrib)
ax.bar(x2[1:-1],scaled[1:],align='center')
plt.show()
def plot_compare_empdistribs(self,emp_distrib,emp_distrib2,label2='vaihtoehto',label1=''):
fig,ax=plt.subplots()
ax.set_xlabel('työsuhteen pituus [v]')
ax.set_ylabel(self.labels['probability'])
ax.set_yscale('log')
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled,x2=np.histogram(emp_distrib,x)
scaled=scaled/np.sum(emp_distrib)
x=np.linspace(0,max_time,nn_time)
scaled3,x3=np.histogram(emp_distrib2,x)
scaled3=scaled3/np.sum(emp_distrib2)
ax.plot(x3[:-1],scaled3,label=label1)
ax.plot(x2[:-1],scaled,label=label2)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def plot_vlines_unemp(self,point=0):
axvcolor='gray'
lstyle='--'
plt.axvline(x=300/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(310/(12*21.5),point,'300',rotation=90)
plt.axvline(x=400/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(410/(12*21.5),point,'400',rotation=90)
plt.axvline(x=500/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(510/(12*21.5),point,'500',rotation=90)
def plot_tyolldistribs(self,emp_distrib,tyoll_distrib,tyollistyneet=True,max=10,figname=None):
max_time=55
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled0,x0=np.histogram(emp_distrib,x)
if not tyollistyneet:
scaled=scaled0
x2=x0
else:
scaled,x2=np.histogram(tyoll_distrib,x)
jaljella=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien kumulatiivinen summa
scaled=scaled/jaljella
fig,ax=plt.subplots()
ax.set_xlabel('työttömyysjakson pituus [v]')
if tyollistyneet:
ax.set_ylabel('työllistyneiden osuus')
point=0.5
else:
ax.set_ylabel('pois siirtyneiden osuus')
point=0.9
self.plot_vlines_unemp(point)
ax.plot(x2[1:-1],scaled[1:])
#ax.bar(x2[1:-1],scaled[1:],align='center',width=self.timestep)
plt.xlim(0,max)
if figname is not None:
plt.savefig(figname+'tyollistyneetdistrib.eps', format='eps')
plt.show()
def plot_tyolldistribs_both(self,emp_distrib,tyoll_distrib,max=10,figname=None):
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled0,x0=np.histogram(emp_distrib,x)
scaled=scaled0
scaled_tyoll,x2=np.histogram(tyoll_distrib,x)
jaljella=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien summa
scaled=scaled/jaljella
jaljella_tyoll=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien summa
scaled_tyoll=scaled_tyoll/jaljella_tyoll
fig,ax=plt.subplots()
ax.set_xlabel('työttömyysjakson pituus [v]')
point=0.6
self.plot_vlines_unemp(point)
ax.plot(x2[1:-1],scaled[1:],label='pois siirtyneiden osuus')
ax.plot(x2[1:-1],scaled_tyoll[1:],label='työllistyneiden osuus')
#ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.legend()
ax.set_ylabel('pois siirtyneiden osuus')
plt.xlim(0,max)
plt.ylim(0,0.8)
if figname is not None:
plt.savefig(figname+'tyolldistribs.eps', format='eps')
plt.show()
def plot_tyolldistribs_both_bu(self,emp_distrib,tyoll_distrib,max=2):
max_time=4
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(-max_time,0,nn_time)
scaled0,x0=np.histogram(emp_distrib,x)
scaled=scaled0
scaled_tyoll,x2=np.histogram(tyoll_distrib,x)
jaljella=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien summa
#jaljella=np.cumsum(scaled0)
scaled=scaled/jaljella
jaljella_tyoll=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien summa
#jaljella_tyoll=np.cumsum(scaled0)
scaled_tyoll=scaled_tyoll/jaljella_tyoll
fig,ax=plt.subplots()
ax.set_xlabel('aika ennen ansiopäivärahaoikeuden loppua [v]')
point=0.6
#self.plot_vlines_unemp(point)
ax.plot(x2[1:-1],scaled[1:],label='pois siirtyneiden osuus')
ax.plot(x2[1:-1],scaled_tyoll[1:],label='työllistyneiden osuus')
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_ylabel('pois siirtyneiden osuus')
plt.xlim(-max,0)
#plt.ylim(0,0.8)
plt.show()
def plot_compare_tyolldistribs(self,emp_distrib1,tyoll_distrib1,emp_distrib2,
tyoll_distrib2,tyollistyneet=True,max=4,label1='perus',label2='vaihtoehto',
figname=None):
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
# data1
scaled01,x0=np.histogram(emp_distrib1,x)
if not tyollistyneet:
scaled1=scaled01
x1=x0
else:
scaled1,x1=np.histogram(tyoll_distrib1,x)
jaljella1=np.cumsum(scaled01[::-1])[::-1] # jäljellä olevien summa
scaled1=scaled1/jaljella1
# data2
scaled02,x0=np.histogram(emp_distrib2,x)
if not tyollistyneet:
scaled2=scaled02
x2=x0
else:
scaled2,x2=np.histogram(tyoll_distrib2,x)
jaljella2=np.cumsum(scaled02[::-1])[::-1] # jäljellä olevien summa
scaled2=scaled2/jaljella2
fig,ax=plt.subplots()
ax.set_xlabel('työttömyysjakson pituus [v]')
if tyollistyneet:
ax.set_ylabel('työllistyneiden osuus')
else:
ax.set_ylabel('pois siirtyneiden osuus')
self.plot_vlines_unemp()
ax.plot(x2[1:-1],scaled2[1:],label=label2)
ax.plot(x1[1:-1],scaled1[1:],label=label1)
#ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.legend()
plt.xlim(0,max)
if figname is not None:
plt.savefig(figname+'comp_tyollistyneetdistrib.eps', format='eps')
plt.show()
def plot_unempdistribs(self,unemp_distrib,max=4,figname=None,miny=None,maxy=None):
#fig,ax=plt.subplots()
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled,x2=np.histogram(unemp_distrib,x)
scaled=scaled/np.sum(unemp_distrib)
fig,ax=plt.subplots()
self.plot_vlines_unemp(0.6)
ax.set_xlabel(self.labels['unemp duration'])
ax.set_ylabel(self.labels['probability'])
ax.plot(x[:-1],scaled)
ax.set_yscale('log')
plt.xlim(0,max)
if miny is not None:
plt.ylim(miny,maxy)
if figname is not None:
plt.savefig(figname+'unempdistribs.eps', format='eps')
plt.show()
def plot_saldist(self,t=0,sum=False,all=False,n=10,bins=30):
if all:
fig,ax=plt.subplots()
for t in range(1,self.n_time-1,5):
scaled,x=np.histogram(self.infostats_pop_wage[t,:],bins=bins)
x2=0.5*(x[1:]+x[0:-1])
ax.plot(x2,scaled,label=t)
plt.legend()
plt.show()
else:
if sum:
scaled,x=np.histogram(np.sum(self.infostats_pop_wage,axis=0),bins=bins)
x2=0.5*(x[1:]+x[0:-1])
plt.plot(x2,scaled)
else:
fig,ax=plt.subplots()
for t1 in range(t,t+n,1):
scaled,x=np.histogram(self.infostats_pop_wage[t1,:],bins=bins)
x2=0.5*(x[1:]+x[0:-1])
ax.plot(x2,scaled,label=t1)
plt.legend()
plt.show()
def test_salaries(self):
n=self.n_pop
palkat_ika_miehet=12.5*np.array([2339.01,2489.09,2571.40,2632.58,2718.03,2774.21,2884.89,2987.55,3072.40,3198.48,3283.81,3336.51,3437.30,3483.45,3576.67,3623.00,3731.27,3809.58,3853.66,3995.90,4006.16,4028.60,4104.72,4181.51,4134.13,4157.54,4217.15,4165.21,4141.23,4172.14,4121.26,4127.43,4134.00,4093.10,4065.53,4063.17,4085.31,4071.25,4026.50,4031.17,4047.32,4026.96,4028.39,4163.14,4266.42,4488.40,4201.40,4252.15,4443.96,3316.92,3536.03,3536.03])
palkat_ika_naiset=12.5*np.array([2223.96,2257.10,2284.57,2365.57,2443.64,2548.35,2648.06,2712.89,2768.83,2831.99,2896.76,2946.37,2963.84,2993.79,3040.83,3090.43,3142.91,3159.91,3226.95,3272.29,3270.97,3297.32,3333.42,3362.99,3381.84,3342.78,3345.25,3360.21,3324.67,3322.28,3326.72,3326.06,3314.82,3303.73,3302.65,3246.03,3244.65,3248.04,3223.94,3211.96,3167.00,3156.29,3175.23,3228.67,3388.39,3457.17,3400.23,3293.52,2967.68,2702.05,2528.84,2528.84])
g_r=[0.77,1.0,1.23]
data_range=np.arange(20,72)
sal20=np.zeros((n,1))
sal25=np.zeros((n,1))
sal30=np.zeros((n,1))
sal40=np.zeros((n,1))
sal50=np.zeros((n,1))
sal60=np.zeros((n,1))
sal=np.zeros((n,72))
p=np.arange(700,17500,100)*12.5
palkka20=np.array([10.3,5.6,4.5,14.2,7.1,9.1,22.8,22.1,68.9,160.3,421.6,445.9,501.5,592.2,564.5,531.9,534.4,431.2,373.8,320.3,214.3,151.4,82.3,138.0,55.6,61.5,45.2,19.4,32.9,13.1,9.6,7.4,12.3,12.5,11.5,5.3,2.4,1.6,1.2,1.2,14.1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
palkka25=np.array([12.4,11.3,30.2,4.3,28.5,20.3,22.5,23.7,83.3,193.0,407.9,535.0,926.5,1177.1,1540.9,1526.4,1670.2,1898.3,1538.8,1431.5,1267.9,1194.8,1096.3,872.6,701.3,619.0,557.2,465.8,284.3,291.4,197.1,194.4,145.0,116.7,88.7,114.0,56.9,57.3,55.0,25.2,24.4,20.1,25.2,37.3,41.4,22.6,14.1,9.4,6.3,7.5,8.1,9.0,4.0,3.4,5.4,4.1,5.2,1.0,2.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
palkka30=np.array([1.0,2.0,3.0,8.5,12.1,22.9,15.8,21.8,52.3,98.2,295.3,392.8,646.7,951.4,1240.5,1364.5,1486.1,1965.2,1908.9,1729.5,1584.8,1460.6,1391.6,1551.9,1287.6,1379.0,1205.6,1003.6,1051.6,769.9,680.5,601.2,552.0,548.3,404.5,371.0,332.7,250.0,278.2,202.2,204.4,149.8,176.7,149.0,119.6,76.8,71.4,56.3,75.9,76.8,58.2,50.2,46.8,48.9,30.1,32.2,28.8,31.1,45.5,41.2,36.5,18.1,11.6,8.5,10.2,4.3,13.5,12.3,4.9,13.9,5.4,5.9,7.4,14.1,9.6,8.4,11.5,0.0,3.3,9.0,5.2,5.0,3.1,7.4,2.0,4.0,4.1,14.0,2.0,3.0,1.0,0.0,6.2,2.0,1.2,2.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
palkka50=np.array([2.0,3.1,2.4,3.9,1.0,1.0,11.4,30.1,29.3,34.3,231.9,341.9,514.4,724.0,1076.8,1345.2,1703.0,1545.8,1704.0,1856.1,1805.4,1608.1,1450.0,1391.4,1338.5,1173.2,1186.3,1024.8,1105.6,963.0,953.0,893.7,899.8,879.5,857.0,681.5,650.5,579.2,676.8,498.0,477.5,444.3,409.1,429.0,340.5,297.2,243.1,322.5,297.5,254.1,213.1,249.3,212.1,212.8,164.4,149.3,158.6,157.4,154.1,112.7,93.4,108.4,87.3,86.7,82.0,115.9,66.9,84.2,61.4,43.7,58.1,40.9,73.9,50.0,51.6,25.7,43.2,48.2,43.0,32.6,21.6,22.4,36.3,28.3,19.4,21.1,21.9,21.5,19.2,15.8,22.6,9.3,14.0,22.4,14.0,13.0,11.9,18.7,7.3,21.6,9.5,11.2,12.0,18.2,12.9,2.2,10.7,6.1,11.7,7.6,1.0,4.7,8.5,6.4,3.3,4.6,1.2,3.7,5.8,1.0,1.0,1.0,1.0,3.2,1.2,3.1,2.2,2.3,2.1,1.1,2.0,2.1,2.2,4.6,2.2,1.0,1.0,1.0,0.0,3.0,1.2,0.0,8.2,3.0,1.0,1.0,2.1,1.2,3.2,1.0,5.2,1.1,5.2,1.0,1.2,2.3,1.0,3.1,1.0,1.0,1.1,1.6,1.1,1.1,1.0,1.0,1.0,1.0])
m20=0
m25=0
m30=0
m40=0
m50=0
m60=0
salx=np.zeros((self.n_time+2,1))
saln=np.zeros((self.n_time+2,1))
salx_m=np.zeros((self.n_time+2,1))
saln_m=np.zeros((self.n_time+2,1))
salx_f=np.zeros((self.n_time+2,1))
saln_f=np.zeros((self.n_time+2,1))
for k in range(self.n_pop):
for t in range(self.n_time-2):
if self.popempstate[t,k] in set([1,10,8,9]):
salx[t]=salx[t]+self.infostats_pop_wage[t,k]
saln[t]=saln[t]+1
if self.infostats_group[k]>2:
salx_f[t]=salx_f[t]+self.infostats_pop_wage[t,k]
saln_f[t]=saln_f[t]+1
else:
salx_m[t]=salx_m[t]+self.infostats_pop_wage[t,k]
saln_m[t]=saln_m[t]+1
if self.popempstate[self.map_age(20),k] in set([1,10]):
sal20[m20]=self.infostats_pop_wage[self.map_age(20),k]
m20=m20+1
if self.popempstate[self.map_age(25),k] in set([1,10]):
sal25[m25]=self.infostats_pop_wage[self.map_age(25),k]
m25=m25+1
if self.popempstate[self.map_age(30),k] in set([1,10]):
sal30[m30]=self.infostats_pop_wage[self.map_age(30),k]
m30=m30+1
if self.popempstate[self.map_age(40),k] in set([1,10]):
sal40[m40]=self.infostats_pop_wage[self.map_age(40),k]
m40=m40+1
if self.popempstate[self.map_age(50),k] in set([1,10]):
sal50[m50]=self.infostats_pop_wage[self.map_age(50),k]
m50=m50+1
if self.popempstate[self.map_age(60),k] in set([1,10]):
sal60[m60]=self.infostats_pop_wage[self.map_age(60),k]
m60=m60+1
salx=salx/np.maximum(1,saln)
salx_f=salx_f/np.maximum(1,saln_f)
salx_m=salx_m/np.maximum(1,saln_m)
#print(sal25,self.infostats_pop_wage)
def kuva(sal,ika,m,p,palkka):
plt.hist(sal[:m],bins=50,density=True)
ave=np.mean(sal[:m])/12
palave=np.sum(palkka*p)/12/np.sum(palkka)
plt.title('{}: ave {} vs {}'.format(ika,ave,palave))
plt.plot(p,palkka/sum(palkka)/2000)
plt.show()
def kuva2(sal,ika,m):
plt.hist(sal[:m],bins=50,density=True)
ave=np.mean(sal[:m])/12
plt.title('{}: ave {}'.format(ika,ave))
plt.show()
kuva(sal20,20,m20,p,palkka20)
kuva(sal25,25,m25,p,palkka25)
kuva(sal30,30,m30,p,palkka30)
kuva2(sal40,40,m40)
kuva(sal50,50,m50,p,palkka50)
kuva2(sal60,60,m60)
data_range=np.arange(21,72)
plt.plot(data_range,np.mean(self.infostats_pop_wage[::4],axis=1),label='malli kaikki')
plt.plot(data_range,salx[::4],label='malli töissä')
data_range=np.arange(20,72)
plt.plot(data_range,0.5*palkat_ika_miehet+0.5*palkat_ika_naiset,label='data')
plt.legend()
plt.show()
data_range=np.arange(21,72)
plt.plot(data_range,salx_m[::4],label='malli töissä miehet')
plt.plot(data_range,salx_f[::4],label='malli töissä naiset')
data_range=np.arange(20,72)
plt.plot(data_range,palkat_ika_miehet,label='data miehet')
plt.plot(data_range,palkat_ika_naiset,label='data naiset')
plt.legend()
plt.show()
def plot_rewdist(self,t=0,sum=False,all=False):
if all:
fig,ax=plt.subplots()
for t in range(1,self.n_time-1,5):
scaled,x=np.histogram(self.poprewstate[t,:])
x2=0.5*(x[1:]+x[0:-1])
ax.plot(x2,scaled,label=t)
plt.legend()
plt.show()
else:
if sum:
scaled,x=np.histogram(np.sum(self.poprewstate,axis=0))
x2=0.5*(x[1:]+x[0:-1])
plt.plot(x2,scaled)
else:
fig,ax=plt.subplots()
for t in range(t,t+10,1):
scaled,x=np.histogram(self.poprewstate[t,:])
x2=0.5*(x[1:]+x[0:-1])
ax.plot(x2,scaled,label=t)
plt.legend()
plt.show()
def plot_unempdistribs_bu(self,unemp_distrib,max=2):
#fig,ax=plt.subplots()
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(-max_time,0,nn_time)
scaled,x2=np.histogram(unemp_distrib,x)
scaled=scaled/np.abs(np.sum(unemp_distrib))
fig,ax=plt.subplots()
#self.plot_vlines_unemp(0.6)
ax.set_xlabel(self.labels['unemp duration'])
ax.set_ylabel(self.labels['probability'])
#x3=np.flip(x[:-1])
#ax.plot(x3,scaled)
ax.plot(x[:-1],scaled)
#ax.set_yscale('log')
plt.xlim(-max,0)
plt.show()
def plot_compare_unempdistribs(self,unemp_distrib1,unemp_distrib2,max=4,
label2='none',label1='none',logy=True,diff=False,figname=None):
#fig,ax=plt.subplots()
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(self.timestep,max_time,nn_time)
scaled1,x1=np.histogram(unemp_distrib1,x)
print('{} keskikesto {} v {} keskikesto {} v'.format(label1,np.mean(unemp_distrib1),label2,np.mean(unemp_distrib2)))
print('Skaalaamaton {} lkm {} v {} lkm {} v'.format(label1,len(unemp_distrib1),label2,len(unemp_distrib2)))
print('Skaalaamaton {} työtpäiviä yht {} v {} työtpäiviä yht {} v'.format(label1,np.sum(unemp_distrib1),label2,np.sum(unemp_distrib2)))
#scaled=scaled/np.sum(unemp_distrib)
scaled1=scaled1/np.sum(scaled1)
scaled2,x1=np.histogram(unemp_distrib2,x)
scaled2=scaled2/np.sum(scaled2)
fig,ax=plt.subplots()
if not diff:
self.plot_vlines_unemp(0.5)
ax.set_xlabel(self.labels['unemp duration'])
ax.set_ylabel(self.labels['osuus'])
if diff:
ax.plot(x[:-1],scaled1-scaled2,label=label1+'-'+label2)
else:
ax.plot(x[:-1],scaled2,label=label2)
ax.plot(x[:-1],scaled1,label=label1)
if logy and not diff:
ax.set_yscale('log')
if not diff:
plt.ylim(1e-4,1.0)
#ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.legend()
plt.xlim(0,max)
if figname is not None:
plt.savefig(figname+'comp_unempdistrib.eps', format='eps')
plt.show()
def plot_compare_virrat(self,virta1,virta2,min_time=25,max_time=65,label1='perus',label2='vaihtoehto',virta_label='työllisyys',ymin=None,ymax=None):
x=np.linspace(self.min_age,self.max_age,self.n_time)
demog2=self.empstats.get_demog()
scaled1=virta1*demog2/self.n_pop #/self.alive
scaled2=virta2*demog2/self.n_pop #/self.alive
fig,ax=plt.subplots()
plt.xlim(min_time,max_time)
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(virta_label+'virta')
ax.plot(x,scaled1,label=label1)
ax.plot(x,scaled2,label=label2)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if ymin is not None and ymax is not None:
plt.ylim(ymin,ymax)
plt.show()
def plot_outsider(self,printtaa=True):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,100*(self.empstate[:,11]+self.empstate[:,5]+self.empstate[:,7])/self.alive[:,0],label='työvoiman ulkopuolella, ei opiskelija, sis. vanh.vapaat')
emp_statsratio=100*self.empstats.outsider_stats()
ax.plot(x,emp_statsratio,label='havainto')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend()
plt.show()
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,100*np.sum(self.gempstate[:,11,3:5]+self.gempstate[:,5,3:5]+self.gempstate[:,7,3:5],1,keepdims=True)/np.sum(self.galive[:,3:5],1,keepdims=True),label='työvoiman ulkopuolella, naiset')
ax.plot(x,100*np.sum(self.gempstate[:,11,0:2]+self.gempstate[:,5,0:2]+self.gempstate[:,7,0:2],1,keepdims=True)/np.sum(self.galive[:,3:5],1,keepdims=True),label='työvoiman ulkopuolella, miehet')
emp_statsratio=100*self.empstats.outsider_stats(g=1)
ax.plot(x,emp_statsratio,label=self.labels['havainto, naiset'])
emp_statsratio=100*self.empstats.outsider_stats(g=2)
ax.plot(x,emp_statsratio,label=self.labels['havainto, miehet'])
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend()
plt.show()
if printtaa:
#print('yht',100*(self.empstate[:,11]+self.empstate[:,5]+self.empstate[:,6]+self.empstate[:,7])/self.alive[:,0])
nn=np.sum(self.galive[:,3:5],1,keepdims=True)
n=np.sum(100*(self.gempstate[:,5,3:5]+self.gempstate[:,6,3:5]+self.gempstate[:,7,3:5]),1,keepdims=True)/nn
mn=np.sum(self.galive[:,0:2],1,keepdims=True)
m=np.sum(100*(self.gempstate[:,5,0:2]+self.gempstate[:,6,0:2]+self.gempstate[:,7,0:2]),1,keepdims=True)/mn
#print('naiset vv',n[1::4,0])
#print('miehet vv',m[1::4,0])
def plot_pinkslip(self):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,100*self.infostats_pinkslip[:,0]/self.empstate[:,0],label='ansiosidonnaisella')
ax.plot(x,100*self.infostats_pinkslip[:,4]/self.empstate[:,4],label='putkessa')
ax.plot(x,100*self.infostats_pinkslip[:,13]/self.empstate[:,13],label='työmarkkinatuella')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel('Irtisanottujen osuus tilassa [%]')
ax.legend()
plt.show()
def plot_student(self):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x+self.timestep,100*self.empstate[:,12]/self.alive[:,0],label='opiskelija tai armeijassa')
emp_statsratio=100*self.empstats.student_stats()
ax.plot(x,emp_statsratio,label='havainto')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend()
plt.show()
def plot_kassanjasen(self):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x+self.timestep,100*self.infostats_kassanjasen[:]/self.alive[:,0],label='työttömyyskassan jäsenien osuus kaikista')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend()
plt.show()
mini=np.nanmin(100*self.infostats_kassanjasen[:]/self.alive[:,0])
maxi=np.nanmax(100*self.infostats_kassanjasen[:]/self.alive[:,0])
print('Kassanjäseniä min {} % max {} %'.format(mini,maxi))
def plot_group_student(self):
fig,ax=plt.subplots()
for gender in range(2):
if gender==0:
leg='Opiskelijat+Armeija Miehet'
opiskelijat=np.sum(self.gempstate[:,12,0:3],axis=1)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
else:
leg='Opiskelijat+Armeija Naiset'
opiskelijat=np.sum(self.gempstate[:,12,3:6],axis=1)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
opiskelijat=np.reshape(opiskelijat,(self.galive.shape[0],1))
osuus=100*opiskelijat/alive
x=np.linspace(self.min_age,self.max_age,self.n_time)
ax.plot(x,osuus,label=leg)
emp_statsratio=100*self.empstats.student_stats(g=1)
ax.plot(x,emp_statsratio,label=self.labels['havainto, naiset'])
emp_statsratio=100*self.empstats.student_stats(g=2)
ax.plot(x,emp_statsratio,label=self.labels['havainto, miehet'])
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def plot_group_disab(self):
fig,ax=plt.subplots()
for gender in range(2):
if gender==0:
leg='TK Miehet'
opiskelijat=np.sum(self.gempstate[:,3,0:3],axis=1)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
else:
leg='TK Naiset'
opiskelijat=np.sum(self.gempstate[:,3,3:6],axis=1)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
opiskelijat=np.reshape(opiskelijat,(self.galive.shape[0],1))
osuus=100*opiskelijat/alive
x=np.linspace(self.min_age,self.max_age,self.n_time)
ax.plot(x,osuus,label=leg)
emp_statsratio=100*self.empstats.disab_stat(g=1)
ax.plot(x,emp_statsratio,label=self.labels['havainto, naiset'])
emp_statsratio=100*self.empstats.disab_stat(g=2)
ax.plot(x,emp_statsratio,label=self.labels['havainto, miehet'])
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def plot_taxes(self,figname=None):
valtionvero_ratio=100*self.infostats_valtionvero_distrib/np.reshape(np.sum(self.infostats_valtionvero_distrib,1),(-1,1))
kunnallisvero_ratio=100*self.infostats_kunnallisvero_distrib/np.reshape(np.sum(self.infostats_kunnallisvero_distrib,1),(-1,1))
vero_ratio=100*(self.infostats_kunnallisvero_distrib+self.infostats_valtionvero_distrib)/(np.reshape(np.sum(self.infostats_valtionvero_distrib,1),(-1,1))+np.reshape(np.sum(self.infostats_kunnallisvero_distrib,1),(-1,1)))
if figname is not None:
self.plot_states(vero_ratio,ylabel='Valtioneronmaksajien osuus tilassa [%]',stack=True,figname=figname+'_stack')
else:
self.plot_states(vero_ratio,ylabel='Valtioneronmaksajien osuus tilassa [%]',stack=True)
if figname is not None:
self.plot_states(valtionvero_ratio,ylabel='Veronmaksajien osuus tilassa [%]',stack=True,figname=figname+'_stack')
else:
self.plot_states(valtionvero_ratio,ylabel='Veronmaksajien osuus tilassa [%]',stack=True)
if figname is not None:
self.plot_states(kunnallisvero_ratio,ylabel='Kunnallisveron maksajien osuus tilassa [%]',stack=True,figname=figname+'_stack')
else:
self.plot_states(kunnallisvero_ratio,ylabel='Kunnallisveron maksajien osuus tilassa [%]',stack=True)
valtionvero_osuus,kunnallisvero_osuus,vero_osuus=self.comp_taxratios()
print('Valtionveron maksajien osuus\n{}'.format(self.v2_groupstates(valtionvero_osuus)))
print('Kunnallisveron maksajien osuus\n{}'.format(self.v2_groupstates(kunnallisvero_osuus)))
print('Veronmaksajien osuus\n{}'.format(self.v2_groupstates(vero_osuus)))
def group_taxes(self,ratios):
if len(ratios.shape)>1:
vv_osuus=np.zeros((ratios.shape[0],5))
vv_osuus[:,0]=ratios[:,0]+ratios[:,4]+ratios[:,5]+ratios[:,6]+\
ratios[:,7]+ratios[:,8]+ratios[:,9]+ratios[:,11]+\
ratios[:,12]+ratios[:,13]
vv_osuus[:,1]=ratios[:,1]+ratios[:,10]
vv_osuus[:,2]=ratios[:,2]+ratios[:,3]+ratios[:,8]+ratios[:,9]
vv_osuus[:,3]=ratios[:,1]+ratios[:,10]+ratios[:,8]+ratios[:,9]
else:
vv_osuus=np.zeros((4))
vv_osuus[0]=ratios[0]+ratios[4]+ratios[5]+ratios[6]+\
ratios[7]+ratios[8]+ratios[9]+ratios[11]+\
ratios[12]+ratios[13]
vv_osuus[1]=ratios[1]+ratios[10]
vv_osuus[2]=ratios[2]+ratios[3]+ratios[8]+ratios[9]
vv_osuus[3]=ratios[1]+ratios[10]+ratios[8]+ratios[9]
return vv_osuus
def comp_taxratios(self,grouped=False):
valtionvero_osuus=100*np.sum(self.infostats_valtionvero_distrib,0)/np.sum(self.infostats_valtionvero_distrib)
kunnallisvero_osuus=100*np.sum(self.infostats_kunnallisvero_distrib,0)/np.sum(self.infostats_kunnallisvero_distrib)
vero_osuus=100*(np.sum(self.infostats_kunnallisvero_distrib,0)+np.sum(self.infostats_valtionvero_distrib,0))/(np.sum(self.infostats_kunnallisvero_distrib)+np.sum(self.infostats_valtionvero_distrib))
if grouped:
vv_osuus=self.group_taxes(valtionvero_osuus)
kv_osuus=self.group_taxes(kunnallisvero_osuus)
v_osuus=self.group_taxes(vero_osuus)
else:
vv_osuus=valtionvero_osuus
kv_osuus=kunnallisvero_osuus
v_osuus=vero_osuus
return vv_osuus,kv_osuus,v_osuus
def comp_verokiila(self,include_retwork=True,debug=False):
'''
Computes the tax effect as in Lundberg 2017
However, this applies the formulas for averages
'''
if debug:
print('comp_verokiila')
demog2=self.empstats.get_demog()
scalex=demog2/self.n_pop
valtionvero_osuus=np.sum(self.infostats_valtionvero_distrib*scalex,0)
kunnallisvero_osuus=np.sum(self.infostats_kunnallisvero_distrib*scalex,0)
taxes_distrib=np.sum(self.infostats_taxes_distrib*scalex,0)
taxes=self.group_taxes(taxes_distrib)
q=self.comp_budget()
q2=self.comp_participants(scale=True,include_retwork=include_retwork)
#htv=q2['palkansaajia']
#muut_tulot=q['muut tulot']
# kulutuksen verotus
tC=0.24*max(0,q['tyotulosumma']-taxes[3])
# (työssäolevien verot + ta-maksut + kulutusvero)/(työtulosumma + ta-maksut)
kiila=(taxes[3]+q['ta_maksut']+tC)/(q['tyotulosumma']+q['ta_maksut'])
qq={}
qq['tI']=taxes[3]/q['tyotulosumma']
qq['tC']=tC/q['tyotulosumma']
qq['tP']=q['ta_maksut']/q['tyotulosumma']
if debug:
print('qq',qq,'kiila',kiila)
return kiila,qq
def comp_verokiila_kaikki_ansiot(self):
demog2=self.empstats.get_demog()
scalex=demog2/self.n_pop
valtionvero_osuus=np.sum(self.infostats_valtionvero_distrib*scalex,0)
kunnallisvero_osuus=np.sum(self.infostats_kunnallisvero_distrib*scalex,0)
taxes_distrib=np.sum(self.infostats_taxes_distrib*scalex,0)
taxes=self.group_taxes(taxes_distrib)
q=self.comp_budget()
q2=self.comp_participants(scale=True)
htv=q2['palkansaajia']
muut_tulot=q['muut tulot']
# kulutuksen verotus
tC=0.2*max(0,q['tyotulosumma']-taxes[3])
# (työssäolevien verot + ta-maksut + kulutusvero)/(työtulosumma + ta-maksut)
kiila=(taxes[0]+q['ta_maksut']+tC)/(q['tyotulosumma']+q['verotettava etuusmeno']+q['ta_maksut'])
qq={}
qq['tI']=taxes[0]/q['tyotulosumma']
qq['tC']=tC/q['tyotulosumma']
qq['tP']=q['ta_maksut']/q['tyotulosumma']
#print(qq)
return kiila,qq
def v2_states(self,x):
return 'Ansiosidonnaisella {:.2f}\nKokoaikatyössä {:.2f}\nVanhuuseläkeläiset {:.2f}\nTyökyvyttömyyseläkeläiset {:.2f}\n'.format(x[0],x[1],x[2],x[3])+\
'Putkessa {:.2f}\nÄitiysvapaalla {:.2f}\nIsyysvapaalla {:.2f}\nKotihoidontuella {:.2f}\n'.format(x[4],x[5],x[6],x[7])+\
'VE+OA {:.2f}\nVE+kokoaika {:.2f}\nOsa-aikatyö {:.2f}\nTyövoiman ulkopuolella {:.2f}\n'.format(x[8],x[9],x[10],x[11])+\
'Opiskelija/Armeija {:.2f}\nTM-tuki {:.2f}\n'.format(x[12],x[13])
def v2_groupstates(self,xx):
x=self.group_taxes(xx)
return 'Etuudella olevat {:.2f}\nTyössä {:.2f}\nEläkkeellä {:.2f}\n'.format(x[0],x[1],x[2])
def plot_emp(self,figname=None):
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(self.empstate,self.alive,unempratio=False)
age_label=self.labels['age']
ratio_label=self.labels['osuus']
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,tyollisyysaste,label=self.labels['malli'])
#ax.plot(x,tyottomyysaste,label=self.labels['tyottomien osuus'])
emp_statsratio=100*self.empstats.emp_stats()
ax.plot(x,emp_statsratio,ls='--',label=self.labels['havainto'])
ax.set_xlabel(age_label)
ax.set_ylabel(self.labels['tyollisyysaste %'])
ax.legend()
if figname is not None:
plt.savefig(figname+'tyollisyysaste.eps', format='eps')
plt.show()
#if self.version in set([1,2,3]):
fig,ax=plt.subplots()
ax.stackplot(x,osatyoaste,100-osatyoaste,
labels=('osatyössä','kokoaikaisessa työssä')) #, colors=pal) pal=sns.color_palette("hls", self.n_employment) # hls, husl, cubehelix
ax.legend()
plt.show()
empstate_ratio=100*self.empstate/self.alive
if figname is not None:
self.plot_states(empstate_ratio,ylabel=ratio_label,stack=True,figname=figname+'_stack')
else:
self.plot_states(empstate_ratio,ylabel=ratio_label,stack=True)
if self.version in set([1,2,3,4]):
self.plot_states(empstate_ratio,ylabel=ratio_label,ylimit=20,stack=False)
self.plot_states(empstate_ratio,ylabel=ratio_label,parent=True,stack=False)
self.plot_parents_in_work()
self.plot_states(empstate_ratio,ylabel=ratio_label,unemp=True,stack=False)
if figname is not None:
self.plot_states(empstate_ratio,ylabel=ratio_label,start_from=60,stack=True,figname=figname+'_stack60')
else:
self.plot_states(empstate_ratio,ylabel=ratio_label,start_from=60,stack=True)
def plot_savings(self):
savings_0=np.zeros(self.n_time)
savings_1=np.zeros(self.n_time)
savings_2=np.zeros(self.n_time)
act_savings_0=np.zeros(self.n_time)
act_savings_1=np.zeros(self.n_time)
act_savings_2=np.zeros(self.n_time)
for t in range(self.n_time):
state_0=np.argwhere(self.popempstate[t,:]==0)
savings_0[t]=np.mean(self.infostats_savings[t,state_0[:]])
act_savings_0[t]=np.mean(self.sav_actions[t,state_0[:]])
state_1=np.argwhere(self.popempstate[t,:]==1)
savings_1[t]=np.mean(self.infostats_savings[t,state_1[:]])
act_savings_1[t]=np.mean(self.sav_actions[t,state_1[:]])
state_2=np.argwhere(self.popempstate[t,:]==2)
savings_2[t]=np.mean(self.infostats_savings[t,state_2[:]])
act_savings_2[t]=np.mean(self.sav_actions[t,state_2[:]])
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
savings=np.mean(self.infostats_savings,axis=1)
ax.plot(x,savings,label='savings all')
ax.legend()
plt.title('Savings all')
plt.show()
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
savings=np.mean(self.infostats_savings,axis=1)
ax.plot(x,savings_0,label='unemp')
ax.plot(x,savings_1,label='emp')
ax.plot(x,savings_2,label='retired')
plt.title('Savings by emp state')
ax.legend()
plt.show()
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
savings=np.mean(self.sav_actions-20,axis=1)
ax.plot(x[1:],savings[1:],label='savings action')
ax.legend()
plt.title('Saving action')
plt.show()
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
savings=np.mean(self.infostats_savings,axis=1)
ax.plot(x[1:],act_savings_0[1:]-20,label='unemp')
ax.plot(x[1:],act_savings_1[1:]-20,label='emp')
ax.plot(x[1:],act_savings_2[1:]-20,label='retired')
plt.title('Saving action by emp state')
ax.legend()
plt.show()
def plot_emp_by_gender(self,figname=None):
x=np.linspace(self.min_age,self.max_age,self.n_time)
for gender in range(2):
if gender<1:
empstate_ratio=100*np.sum(self.gempstate[:,:,0:3],axis=2)/(np.sum(self.galive[:,0:3],axis=1)[:,None])
genderlabel='miehet'
else:
empstate_ratio=100*np.sum(self.gempstate[:,:,3:6],axis=2)/(np.sum(self.galive[:,3:6],axis=1)[:,None])
genderlabel='naiset'
if figname is not None:
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),stack=True,figname=figname+'_stack')
else:
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),stack=True)
if self.version in set([1,2,3,4]):
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),ylimit=20,stack=False)
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),parent=True,stack=False)
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),unemp=True,stack=False)
if figname is not None:
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),start_from=60,stack=True,figname=figname+'_stack60')
else:
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),start_from=60,stack=True)
def plot_parents_in_work(self):
empstate_ratio=100*self.empstate/self.alive
ml=100*self.infostats_mother_in_workforce/self.alive
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,ml,label='äitiysvapaa')
ax.plot(x,empstate_ratio[:,6],label='isyysvapaa')
ax.legend()
plt.show()
def plot_spouse(self,figname=None):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.set_xlabel(self.labels['age'])
spouseratio=self.infostats_puoliso/self.alive[:,0]
ax.set_ylabel('spouses')
ax.plot(x,spouseratio)
if figname is not None:
plt.savefig(figname+'spouses.eps', format='eps')
plt.show()
def plot_unemp(self,unempratio=True,figname=None,grayscale=False):
'''
Plottaa työttömyysaste (unempratio=True) tai työttömien osuus väestöstö (False)
'''
x=np.linspace(self.min_age,self.max_age,self.n_time)
if unempratio:
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(self.empstate,self.alive,unempratio=True)
unempratio_stat=100*self.empstats.unempratio_stats()
if self.language=='Finnish':
labeli='keskimääräinen työttömyysaste '+str(ka_tyottomyysaste)
ylabeli=self.labels['tyottomyysaste']
labeli2='työttömyysaste'
else:
labeli='average unemployment rate '+str(ka_tyottomyysaste)
ylabeli=self.labels['tyottomyysaste']
labeli2='Unemployment rate'
else:
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(self.empstate,self.alive,unempratio=False)
unempratio_stat=100*self.empstats.unemp_stats()
if self.language=='Finnish':
labeli='keskimääräinen työttömien osuus väestöstö '+str(ka_tyottomyysaste)
ylabeli='Työttömien osuus väestöstö [%]'
labeli2='työttömien osuus väestöstö'
else:
labeli='proportion of unemployed'+str(ka_tyottomyysaste)
ylabeli='Proportion of unemployed [%]'
labeli2='proportion of unemployed'
fig,ax=plt.subplots()
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabeli)
ax.plot(x,tyottomyysaste,label=self.labels['malli'])
ax.plot(x,unempratio_stat,ls='--',label=self.labels['havainto'])
ax.legend()
if figname is not None:
plt.savefig(figname+'tyottomyysaste.eps', format='eps')
plt.show()
fig,ax=plt.subplots()
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabeli)
ax.plot(x,unempratio_stat,label=self.labels['havainto'])
ax.legend()
if grayscale:
pal=sns.light_palette("black", 8, reverse=True)
else:
pal=sns.color_palette("hls", self.n_employment) # hls, husl, cubehelix
ax.stackplot(x,tyottomyysaste,colors=pal) #,label=self.labels['malli'])
#ax.plot(x,tyottomyysaste)
plt.show()
fig,ax=plt.subplots()
for gender in range(2):
if gender==0:
leg='Miehet'
gempstate=np.sum(self.gempstate[:,:,0:3],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
color='darkgray'
else:
gempstate=np.sum(self.gempstate[:,:,3:6],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
leg='Naiset'
color='black'
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(gempstate,alive,unempratio=unempratio)
ax.plot(x,tyottomyysaste,color=color,label='{} {}'.format(labeli2,leg))
if grayscale:
lstyle='--'
else:
lstyle='--'
if self.version in set([1,2,3,4]):
if unempratio:
ax.plot(x,100*self.empstats.unempratio_stats(g=1),ls=lstyle,label=self.labels['havainto, naiset'])
ax.plot(x,100*self.empstats.unempratio_stats(g=2),ls=lstyle,label=self.labels['havainto, miehet'])
labeli='keskimääräinen työttömyysaste '+str(ka_tyottomyysaste)
ylabeli=self.labels['tyottomyysaste']
else:
ax.plot(x,100*self.empstats.unemp_stats(g=1),ls=lstyle,label=self.labels['havainto, naiset'])
ax.plot(x,100*self.empstats.unemp_stats(g=2),ls=lstyle,label=self.labels['havainto, miehet'])
labeli='keskimääräinen työttömien osuus väestöstö '+str(ka_tyottomyysaste)
ylabeli=self.labels['tyottomien osuus']
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabeli)
if False:
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
ax.legend()
if figname is not None:
plt.savefig(figname+'tyottomyysaste_spk.eps', format='eps')
plt.show()
def plot_parttime_ratio(self,grayscale=True,figname=None):
'''
Plottaa osatyötä tekevien osuus väestöstö
'''
x=np.linspace(self.min_age,self.max_age,self.n_time)
labeli2='Osatyötä tekevien osuus'
fig,ax=plt.subplots()
for gender in range(2):
if gender==0:
leg='Miehet'
g='men'
pstyle='-'
else:
g='women'
leg='Naiset'
pstyle=''
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_gempratios(gender=g,unempratio=False)
ax.plot(x,osatyoaste,'{}'.format(pstyle),label='{} {}'.format(labeli2,leg))
o_x=np.array([20,30,40,50,60,70])
f_osatyo=np.array([55,21,16,12,18,71])
m_osatyo=np.array([32,8,5,4,9,65])
if grayscale:
ax.plot(o_x,f_osatyo,ls='--',label=self.labels['havainto, naiset'])
ax.plot(o_x,m_osatyo,ls='--',label=self.labels['havainto, miehet'])
else:
ax.plot(o_x,f_osatyo,label=self.labels['havainto, naiset'])
ax.plot(o_x,m_osatyo,label=self.labels['havainto, miehet'])
labeli='osatyöaste '#+str(ka_tyottomyysaste)
ylabeli='Osatyön osuus työnteosta [%]'
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabeli)
if False:
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
ax.legend()
if figname is not None:
plt.savefig(figname+'osatyoaste_spk.eps', format='eps')
plt.show()
def plot_unemp_shares(self):
empstate_ratio=100*self.empstate/self.alive
self.plot_states(empstate_ratio,ylabel='Osuus tilassa [%]',onlyunemp=True,stack=True)
def plot_group_emp(self,grayscale=False,figname=None):
fig,ax=plt.subplots()
if grayscale:
lstyle='--'
else:
lstyle='--'
for gender in range(2):
if gender==0:
leg='Miehet'
gempstate=np.sum(self.gempstate[:,:,0:3],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
color='darkgray'
else:
gempstate=np.sum(self.gempstate[:,:,3:6],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
leg='Naiset'
color='black'
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(gempstate,alive)
x=np.linspace(self.min_age,self.max_age,self.n_time)
ax.plot(x,tyollisyysaste,color=color,label='työllisyysaste {}'.format(leg))
#ax.plot(x,tyottomyysaste,label='työttömyys {}'.format(leg))
emp_statsratio=100*self.empstats.emp_stats(g=2)
ax.plot(x,emp_statsratio,ls=lstyle,color='darkgray',label=self.labels['havainto, miehet'])
emp_statsratio=100*self.empstats.emp_stats(g=1)
ax.plot(x,emp_statsratio,ls=lstyle,color='black',label=self.labels['havainto, naiset'])
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
if False:
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
ax.legend()
if figname is not None:
plt.savefig(figname+'tyollisyysaste_spk.eps', format='eps')
plt.show()
def plot_pensions(self):
if self.version in set([1,2,3,4]):
self.plot_ratiostates(self.stat_pension,ylabel='Tuleva eläke [e/v]',stack=False)
def plot_career(self):
if self.version in set([1,2,3,4]):
self.plot_ratiostates(self.stat_tyoura,ylabel='Työuran pituus [v]',stack=False)
def plot_ratiostates(self,statistic,ylabel='',ylimit=None, show_legend=True, parent=False,\
unemp=False,start_from=None,stack=False,no_ve=False,figname=None,emp=False,oa_unemp=False):
self.plot_states(statistic/self.empstate,ylabel=ylabel,ylimit=ylimit,no_ve=no_ve,\
show_legend=show_legend,parent=parent,unemp=unemp,start_from=start_from,\
stack=stack,figname=figname,emp=emp,oa_unemp=oa_unemp)
def count_putki(self,emps=None):
if emps is None:
piped=np.reshape(self.empstate[:,4],(self.empstate[:,4].shape[0],1))
demog2=self.empstats.get_demog()
putkessa=self.timestep*np.nansum(piped[1:]/self.alive[1:]*demog2[1:])
return putkessa
else:
piped=np.reshape(emps[:,4],(emps[:,4].shape[0],1))
demog2=self.empstats.get_demog()
alive=np.sum(emps,axis=1,keepdims=True)
putkessa=self.timestep*np.nansum(piped[1:]/alive[1:]*demog2[1:])
return putkessa
def plot_states(self,statistic,ylabel='',ylimit=None,show_legend=True,parent=False,unemp=False,no_ve=False,
start_from=None,stack=True,figname=None,yminlim=None,ymaxlim=None,
onlyunemp=False,reverse=False,grayscale=False,emp=False,oa_unemp=False):
if start_from is None:
x=np.linspace(self.min_age,self.max_age,self.n_time)
else:
x_n = self.max_age-60+1
x_t = int(np.round((x_n-1)*self.inv_timestep))+2
x=np.linspace(start_from,self.max_age,x_t)
#x=np.linspace(start_from,self.max_age,self.n_time)
statistic=statistic[self.map_age(start_from):]
ura_emp=statistic[:,1]
ura_ret=statistic[:,2]
ura_unemp=statistic[:,0]
if self.version in set([1,2,3,4]):
ura_disab=statistic[:,3]
ura_pipe=statistic[:,4]
ura_mother=statistic[:,5]
ura_dad=statistic[:,6]
ura_kht=statistic[:,7]
ura_vetyo=statistic[:,9]
ura_veosatyo=statistic[:,8]
ura_osatyo=statistic[:,10]
ura_outsider=statistic[:,11]
ura_student=statistic[:,12]
ura_tyomarkkinatuki=statistic[:,13]
ura_army=statistic[:,14]
else:
ura_osatyo=0 #statistic[:,3]
if no_ve:
ura_ret[-2:-1]=None
fig,ax=plt.subplots()
if stack:
if grayscale:
pal=sns.light_palette("black", 8, reverse=True)
else:
pal=sns.color_palette("hls", self.n_employment) # hls, husl, cubehelix
reverse=True
if parent:
if self.version in set([1,2,3,4]):
ax.stackplot(x,ura_mother,ura_dad,ura_kht,
labels=('äitiysvapaa','isyysvapaa','khtuki'), colors=pal)
elif unemp:
if self.version in set([1,2,3,4]):
ax.stackplot(x,ura_unemp,ura_pipe,ura_student,ura_outsider,ura_tyomarkkinatuki,
labels=('tyött','putki','opiskelija','ulkona','tm-tuki'), colors=pal)
else:
ax.stackplot(x,ura_unemp,labels=('tyött'), colors=pal)
elif onlyunemp:
if self.version in set([1,2,3,4]):
#urasum=np.nansum(statistic[:,[0,4,11,13]],axis=1)/100
urasum=np.nansum(statistic[:,[0,4,13]],axis=1)/100
osuus=(1.0-np.array([0.84,0.68,0.62,0.58,0.57,0.55,0.53,0.50,0.29]))*100
xx=np.array([22.5,27.5,32.5,37.5,42.5,47.5,52.5,57.5,62.5])
#ax.stackplot(x,ura_unemp/urasum,ura_pipe/urasum,ura_outsider/urasum,ura_tyomarkkinatuki/urasum,
# labels=('ansiosidonnainen','lisäpäivät','työvoiman ulkopuolella','tm-tuki'), colors=pal)
ax.stackplot(x,ura_unemp/urasum,ura_pipe/urasum,ura_tyomarkkinatuki/urasum,
labels=('ansiosidonnainen','lisäpäivät','tm-tuki'), colors=pal)
ax.plot(xx,osuus,color='k')
else:
ax.stackplot(x,ura_unemp,labels=('tyött'), colors=pal)
else:
if self.version in set([1,2,3,4]):
#ax.stackplot(x,ura_emp,ura_osatyo,ura_vetyo,ura_veosatyo,ura_unemp,ura_tyomarkkinatuki,ura_pipe,ura_disab,ura_mother,ura_dad,ura_kht,ura_ret,ura_student,ura_outsider,ura_army,
# labels=('työssä','osatyö','ve+työ','ve+osatyö','työtön','tm-tuki','työttömyysputki','tk-eläke','äitiysvapaa','isyysvapaa','kh-tuki','vanhuuseläke','opiskelija','työvoiman ulkop.','armeijassa'),
# colors=pal)
ax.stackplot(x,ura_emp,ura_osatyo,ura_vetyo,ura_veosatyo,ura_unemp,ura_tyomarkkinatuki,ura_pipe,ura_ret,ura_disab,ura_mother,ura_dad,ura_kht,ura_student,ura_outsider,ura_army,
labels=('työssä','osatyö','ve+työ','ve+osatyö','työtön','tm-tuki','työttömyysputki','vanhuuseläke','tk-eläke','äitiysvapaa','isyysvapaa','kh-tuki','opiskelija','työvoiman ulkop.','armeijassa'),
colors=pal)
else:
#ax.stackplot(x,ura_emp,ura_osatyo,ura_unemp,ura_ret,
# labels=('työssä','osa-aikatyö','työtön','vanhuuseläke'), colors=pal)
ax.stackplot(x,ura_emp,ura_unemp,ura_ret,
labels=('työssä','työtön','vanhuuseläke'), colors=pal)
if start_from is None:
ax.set_xlim(self.min_age,self.max_age)
else:
ax.set_xlim(60,self.max_age)
if ymaxlim is None:
ax.set_ylim(0, 100)
else:
ax.set_ylim(yminlim,ymaxlim)
else:
if parent:
if self.version in set([1,2,3,4]):
ax.plot(x,ura_mother,label='äitiysvapaa')
ax.plot(x,ura_dad,label='isyysvapaa')
ax.plot(x,ura_kht,label='khtuki')
elif unemp:
ax.plot(x,ura_unemp,label='tyött')
if self.version in set([1,2,3,4]):
ax.plot(x,ura_tyomarkkinatuki,label='tm-tuki')
ax.plot(x,ura_student,label='student')
ax.plot(x,ura_outsider,label='outsider')
ax.plot(x,ura_pipe,label='putki')
elif oa_unemp:
ax.plot(x,ura_unemp,label='tyött')
if self.version in set([1,2,3,4]):
ax.plot(x,ura_tyomarkkinatuki,label='tm-tuki')
ax.plot(x,ura_student,label='student')
ax.plot(x,ura_outsider,label='outsider')
ax.plot(x,ura_pipe,label='putki')
ax.plot(x,ura_osatyo,label='osa-aika')
elif emp:
ax.plot(x,ura_emp,label='työssä')
#if self.version in set([1,2,3,4]):
ax.plot(x,ura_osatyo,label='osatyö')
else:
ax.plot(x,ura_unemp,label='tyött')
ax.plot(x,ura_ret,label='eläke')
ax.plot(x,ura_emp,label='työ')
if self.version in set([1,2,3,4]):
ax.plot(x,ura_osatyo,label='osatyö')
ax.plot(x,ura_disab,label='tk')
ax.plot(x,ura_pipe,label='putki')
ax.plot(x,ura_tyomarkkinatuki,label='tm-tuki')
ax.plot(x,ura_mother,label='äitiysvapaa')
ax.plot(x,ura_dad,label='isyysvapaa')
ax.plot(x,ura_kht,label='khtuki')
ax.plot(x,ura_vetyo,label='ve+työ')
ax.plot(x,ura_veosatyo,label='ve+osatyö')
ax.plot(x,ura_student,label='student')
ax.plot(x,ura_outsider,label='outsider')
ax.plot(x,ura_army,label='armeijassa')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabel)
if show_legend:
if not reverse:
lgd=ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
handles, labels = ax.get_legend_handles_labels()
lgd=ax.legend(handles[::-1], labels[::-1], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if ylimit is not None:
ax.set_ylim([0,ylimit])
#fig.tight_layout()
if figname is not None:
if show_legend:
plt.savefig(figname,bbox_inches='tight',bbox_extra_artists=(lgd,), format='eps')
else:
plt.savefig(figname,bbox_inches='tight', format='eps')
plt.show()
def plot_toe(self):
if self.version in set([1,2,3,4]):
self.plot_ratiostates(self.stat_toe,'työssäolo-ehdon pituus 28 kk aikana [v]',stack=False)
def plot_sal(self):
self.plot_ratiostates(self.salaries_emp,'Keskipalkka [e/v]',stack=False)
def plot_moved(self):
siirtyneet_ratio=self.siirtyneet/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
pysyneet_ratio=self.pysyneet/self.alive*100
self.plot_states(pysyneet_ratio,ylabel='Pysyneet tilassa',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(pysyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,1]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet työhön tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,4]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet putkeen tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,0]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet työttömäksi tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,13]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet tm-tuelle tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,10]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet osa-aikatyöhön tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
# def plot_army(self):
# x=np.linspace(self.min_age,self.max_age,self.n_time)
# fig,ax=plt.subplots()
# ax.plot(x,100*self.empstate[:,14]/self.alive[:,0],label='armeijassa ja siviilipalveluksessa olevat')
# emp_statsratio=100*self.army_stats()
# ax.plot(x,emp_statsratio,label='havainto')
# ax.set_xlabel(self.labels['age'])
# ax.set_ylabel(self.labels['ratio'])
# ax.legend()
# plt.show()
#
# def plot_group_army(self):
# fig,ax=plt.subplots()
# for gender in range(2):
# if gender==0:
# leg='Armeija Miehet'
# opiskelijat=np.sum(self.gempstate[:,14,0:3],axis=1)
# alive=np.zeros((self.galive.shape[0],1))
# alive[:,0]=np.sum(self.galive[:,0:3],1)
# else:
# leg='Armeija Naiset'
# opiskelijat=np.sum(self.gempstate[:,14,3:6],axis=1)
# alive=np.zeros((self.galive.shape[0],1))
# alive[:,0]=np.sum(self.galive[:,3:6],1)
#
# opiskelijat=np.reshape(opiskelijat,(self.galive.shape[0],1))
# osuus=100*opiskelijat/alive
# x=np.linspace(self.min_age,self.max_age,self.n_time)
# ax.plot(x,osuus,label=leg)
#
# emp_statsratio=100*self.army_stats(g=1)
# ax.plot(x,emp_statsratio,label=self.labels['havainto, naiset'])
# emp_statsratio=100*self.army_stats(g=2)
# ax.plot(x,emp_statsratio,label=self.labels['havainto, miehet'])
# ax.set_xlabel(self.labels['age'])
# ax.set_ylabel(self.labels['ratio'])
# ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.show()
#
def plot_ave_stay(self):
self.plot_ratiostates(self.time_in_state,ylabel='Ka kesto tilassa',stack=False)
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
plt.plot(x,self.time_in_state[:,1]/self.empstate[:,1])
ax.set_xlabel('Aika')
ax.set_ylabel('Ka kesto työssä')
plt.show()
fig,ax=plt.subplots()
ax.set_xlabel('Aika')
ax.set_ylabel('ka kesto työttömänä')
plt.plot(x,self.time_in_state[:,0]/self.empstate[:,0])
plt.show()
def plot_ove(self):
self.plot_ratiostates(self.infostats_ove,ylabel='Ove',stack=False)
def plot_reward(self):
self.plot_ratiostates(self.rewstate,ylabel='Keskireward tilassa',stack=False)
self.plot_ratiostates(self.rewstate,ylabel='Keskireward tilassa',stack=False,no_ve=True)
self.plot_ratiostates(self.rewstate,ylabel='Keskireward tilassa',stack=False,oa_unemp=True)
x=np.linspace(self.min_age,self.max_age,self.n_time)
total_reward=np.sum(self.rewstate,axis=1)
fig,ax=plt.subplots()
ax.plot(x,total_reward)
ax.set_xlabel('Aika')
ax.set_ylabel('Koko reward tilassa')
ax.legend()
plt.show()
def vector_to_array(self,x):
return x[:,None]
def comp_scaled_consumption(self,x0,averaged=False,t0=1):
'''
Computes discounted actual reward at each time point
with a given scaling x
averaged determines whether the value is averaged over time or not
'''
x=x0[0]
u=np.zeros(self.n_time)
for k in range(self.n_pop):
#g=self.infostats_group[k]
for t in range(1,self.n_time-1):
age=t+self.min_age
income=self.infostats_poptulot_netto[t,k]
employment_state=self.popempstate[t,k]
v,_=self.env.log_utility((1+x)*income,employment_state,age)
if np.isnan(v):
print('NaN',v,income,employment_state,age)
v=0
u[t]+=v
#print(age,v)
t=self.n_time-1
age=t+self.min_age
income=self.infostats_poptulot_netto[t,k]
employment_state=self.popempstate[t,k]
v0,_=self.env.log_utility(income,employment_state,age)
factor=self.poprewstate[t,k]/v0 # life expectancy
v,_=self.env.log_utility((1+x)*income,employment_state,age)
if np.isnan(v):
print('NaN',v,income,employment_state,age)
if np.isnan(factor):
print('NaN',factor,v0)
#print(age,v*factor,factor)
u[t]+=v*factor
if np.isnan(u[t]):
print('NaN',age,v,v*factor,factor,u[t],income,employment_state)
u=u/self.n_pop
w=np.zeros(self.n_time)
w[-1]=u[-1]
for t in range(self.n_time-2,0,-1):
w[t]=u[t]+self.gamma*w[t+1]
if averaged:
ret=np.mean(w[t0:])
else:
ret=w[t0]
if np.isnan(ret):
print(u,w)
u=np.zeros(self.n_time)
for k in range(self.n_pop):
#g=self.infostats_group[k]
for t in range(1,self.n_time-1):
age=t+self.min_age
income=self.infostats_poptulot_netto[t,k]
employment_state=self.popempstate[t,k]
v,_=self.env.log_utility((1+x)*income,employment_state,age) #,g=g,pinkslip=pinkslip)
#print(t,k,v,income)
u[t]+=v
t=self.n_time-1
age=t-1+self.min_age
income=self.infostats_poptulot_netto[t,k]
employment_state=self.popempstate[t,k]
v0,_=self.env.log_utility(income,employment_state,age) #,g=g,pinkslip=pinkslip)
factor=self.poprewstate[t,k]/v0 # life expectancy
v,_=self.env.log_utility((1+x)*income,employment_state,age) #,g=g,pinkslip=pinkslip)
#print(t,k,v,income)
u[t]+=v*factor
#print(x,ret)
return ret
def comp_presentvalue(self):
'''
Computes discounted actual reward at each time point
with a given scaling x
averaged determines whether the value is averaged over time or not
'''
u=np.zeros((self.n_time,self.n_pop))
u[self.n_time-1,:]=self.poprewstate[self.n_time-1,:]
for t in range(self.n_time-2,-1,-1):
u[t,:]=self.poprewstate[t,:]+self.gamma*u[t+1,:]
return u
def comp_realoptimrew(self,discountfactor=None):
'''
Computes discounted actual reward at each time point
'''
if discountfactor is None:
discountfactor=self.gamma
realrew=np.zeros(self.n_time)
for k in range(self.n_pop):
prew=np.zeros(self.n_time)
prew[-1]=self.poprewstate[-1,k]
for t in range(self.n_time-2,0,-1):
prew[t]=discountfactor*prew[t+1]+self.poprewstate[t,k]
realrew+=prew
realrew/=self.n_pop
realrew=np.mean(realrew[1:])
return realrew
def get_reward(self,discounted=False):
return self.comp_total_reward(output=False,discounted=discounted) #np.sum(self.rewstate)/self.n_pop
def comp_total_reward(self,output=False,discounted=False,discountfactor=None):
if not discounted:
total_reward=np.sum(self.rewstate)
rr=total_reward/self.n_pop
disco='undiscounted'
else:
#discount=discountfactor**np.arange(0,self.n_time*self.timestep,self.timestep)[:,None]
#total_reward=np.sum(self.poprewstate*discount)
rr=self.comp_realoptimrew(discountfactor) #total_reward/self.n_pop
disco='discounted'
#print('total rew1 {} rew2 {}'.format(total_reward,np.sum(self.poprewstate)))
#print('ave rew1 {} rew2 {}'.format(rr,np.mean(np.sum(self.poprewstate,axis=0))))
#print('shape rew2 {} pop {} alive {}'.format(self.poprewstate.shape,self.n_pop,self.alive[0]))
if output:
print(f'Ave {disco} reward {rr}')
return rr
def comp_total_netincome(self,output=True):
rr=np.sum(self.infostats_tulot_netto)/self.n_pop/(self.n_time+21.0) # 21 approximates the time in pension
eq=np.sum(self.infostats_equivalent_income)/self.n_pop/(self.n_time+21.0) # 21 approximates the time in pension
if output:
print('Ave net income {} Ave equivalent net income {}'.format(rr,eq))
return rr,eq
def plot_wage_reduction(self):
self.plot_ratiostates(self.stat_wage_reduction,ylabel='wage-reduction tilassa',stack=False)
self.plot_ratiostates(self.stat_wage_reduction,ylabel='wage-reduction tilassa',stack=False,unemp=True)
self.plot_ratiostates(self.stat_wage_reduction,ylabel='wage-reduction tilassa',stack=False,emp=True)
#self.plot_ratiostates(np.log(1.0+self.stat_wage_reduction),ylabel='log 5wage-reduction tilassa',stack=False)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,0:3],axis=2),ylabel='wage-reduction tilassa naiset',stack=False)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,3:6],axis=2),ylabel='wage-reduction tilassa miehet',stack=False)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,0:3],axis=2),ylabel='wage-reduction tilassa, naiset',stack=False,unemp=True)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,3:6],axis=2),ylabel='wage-reduction tilassa, miehet',stack=False,unemp=True)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,0:3],axis=2),ylabel='wage-reduction tilassa, naiset',stack=False,emp=True)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,3:6],axis=2),ylabel='wage-reduction tilassa, miehet',stack=False,emp=True)
def plot_distrib(self,label='',plot_emp=False,plot_bu=False,ansiosid=False,tmtuki=False,putki=False,outsider=False,max_age=500,laaja=False,max=4,figname=None):
unemp_distrib,emp_distrib,unemp_distrib_bu=self.comp_empdistribs(ansiosid=ansiosid,tmtuki=tmtuki,putki=putki,outsider=outsider,max_age=max_age,laaja=laaja)
tyoll_distrib,tyoll_distrib_bu=self.comp_tyollistymisdistribs(ansiosid=ansiosid,tmtuki=tmtuki,putki=putki,outsider=outsider,max_age=max_age,laaja=laaja)
if plot_emp:
self.plot_empdistribs(emp_distrib)
if plot_bu:
self.plot_unempdistribs_bu(unemp_distrib_bu)
else:
self.plot_unempdistribs(unemp_distrib,figname=figname)
#self.plot_tyolldistribs(unemp_distrib,tyoll_distrib,tyollistyneet=False)
if plot_bu:
self.plot_tyolldistribs_both_bu(unemp_distrib_bu,tyoll_distrib_bu,max=max)
else:
self.plot_tyolldistribs_both(unemp_distrib,tyoll_distrib,max=max,figname=figname)
def plot_irr(self,figname=''):
self.comp_aggirr()
self.comp_irr()
self.plot_irrdistrib(self.infostats_irr,figname=figname)
def plot_irrdistrib(self,irr_distrib,grayscale=True,figname=''):
if grayscale:
plt.style.use('grayscale')
plt.rcParams['figure.facecolor'] = 'white' # Or any suitable colour...
print('Nans {} out of {}'.format(np.sum(np.isnan(irr_distrib)),irr_distrib.shape[0]))
fig,ax=plt.subplots()
ax.set_xlabel('Sisäinen tuottoaste [%]')
lbl=ax.set_ylabel('Taajuus')
#ax.set_yscale('log')
#max_time=50
#nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(-5,5,100)
scaled,x2=np.histogram(irr_distrib,x)
#scaled=scaled/np.nansum(np.abs(irr_distrib))
ax.bar(x2[1:-1],scaled[1:],align='center')
if figname is not None:
plt.savefig(figname+'irrdistrib.eps', bbox_inches='tight', format='eps')
plt.show()
fig,ax=plt.subplots()
ax.hist(irr_distrib,bins=40)
plt.show()
print('Keskimääräinen irr {:.3f} %'.format(np.nanmean(irr_distrib)))
print('Mediaani irr {:.3f} %'.format(np.nanmedian(irr_distrib)))
count = (irr_distrib < 0).sum(axis=0)
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus irr<0 {} %:lla'.format(100*percent))
count = (irr_distrib <=-50).sum(axis=0)
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus irr<-50 {} %:lla'.format(100*percent))
count = (np.sum(self.infostats_paid_tyel_pension,axis=0)<0.1).sum()
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus eläke ei maksussa {} %:lla'.format(100*percent))
count1 = np.sum(self.popempstate[0:self.map_age(63),:]==15)
count = (np.sum(self.infostats_paid_tyel_pension,axis=0)<0.1).sum()-count1
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus eläke ei maksussa, ei kuollut {} %:lla'.format(100*percent))
count = np.sum(self.popempstate==15)
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus kuolleet {} %:lla'.format(100*percent))
def get_initial_reward(self,startage=None):
real=self.comp_presentvalue()
if startage is None:
startage=self.min_age
age=max(1,startage-self.min_age)
realage=max(self.min_age+1,startage)
print('Initial discounted reward at age {}: {}'.format(realage,np.mean(real[age,:])))
return np.mean(real[age,:])
def plot_stats(self,grayscale=False,figname=None):
if grayscale:
plt.style.use('grayscale')
plt.rcParams['figure.facecolor'] = 'white' # Or any suitable colour...
self.comp_total_reward()
self.comp_total_reward(discounted=True)
self.comp_total_netincome()
#self.plot_rewdist()
#self.plot_emp(figname=figname)
if self.version in set([1,2,3,4]):
q=self.comp_budget(scale=True)
q_stat=self.stat_budget()
df1 = pd.DataFrame.from_dict(q,orient='index',columns=['e/v'])
df2 = pd.DataFrame.from_dict(q_stat,orient='index',columns=['toteuma'])
df=df1.copy()
df['toteuma']=df2['toteuma']
df['ero']=df1['e/v']-df2['toteuma']
print('Rahavirrat skaalattuna väestötasolle')
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.2f"))
q=self.comp_participants(scale=True)
q_stat=self.stat_participants_2018()
q_days=self.stat_days_2018()
df1 = pd.DataFrame.from_dict(q,orient='index',columns=['arvio (htv)'])
df2 = pd.DataFrame.from_dict(q_stat,orient='index',columns=['toteuma'])
df3 = pd.DataFrame.from_dict(q_days,orient='index',columns=['htv_tot'])
df=df1.copy()
df['toteuma (kpl)']=df2['toteuma']
df['toteuma (htv)']=df3['htv_tot']
df['ero (htv)']=df['arvio (htv)']-df['toteuma (htv)']
print('Henkilöitä tiloissa skaalattuna väestötasolle')
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.0f"))
else:
q=self.comp_participants(scale=True)
q_stat=self.stat_participants_2018()
q_days=self.stat_days_2018()
df1 = pd.DataFrame.from_dict(q,orient='index',columns=['arvio (htv)'])
df2 = pd.DataFrame.from_dict(q_stat,orient='index',columns=['toteuma'])
df3 = pd.DataFrame.from_dict(q_days,orient='index',columns=['htv_tot'])
df=df1.copy()
df['toteuma (kpl)']=df2['toteuma']
df['toteuma (htv)']=df3['htv_tot']
df['ero (htv)']=df['arvio (htv)']-df['toteuma (htv)']
print('Henkilöitä tiloissa skaalattuna väestötasolle')
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.0f"))
G=self.comp_gini()
print('Gini coefficient is {}'.format(G))
print('Real discounted reward {}'.format(self.comp_realoptimrew()))
print('vrt reward {}'.format(self.comp_scaled_consumption(np.array([0]))))
real=self.comp_presentvalue()
print('Initial discounted reward {}'.format(np.mean(real[1,:])))
self.plot_emp(figname=figname)
if self.version in set([1,2,3,4]):
self.plot_outsider()
print('Keskikestot käytettyjen ansiosidonnaisten päivärahojen mukaan')
keskikesto=self.comp_unemp_durations()
df = pd.DataFrame.from_dict(keskikesto,orient='index',columns=['0-6 kk','6-12 kk','12-18 kk','18-24kk','yli 24 kk'])
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.2f"))
print('Keskikestot viimeisimmän työttömyysjakson mukaan')
keskikesto=self.comp_unemp_durations_v2()
df = pd.DataFrame.from_dict(keskikesto,orient='index',columns=['0-6 kk','6-12 kk','12-18 kk','18-24kk','yli 24 kk'])
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.2f"))
if self.version in set([1,2,3,4]):
print('Lisäpäivillä on {:.0f} henkilöä'.format(self.count_putki()))
if self.version in set([4]):
self.plot_spouse()
if self.version==101:
self.plot_savings()
if self.version in set([3]):
self.plot_ove()
self.plot_unemp(unempratio=True,figname=figname)
self.plot_unemp(unempratio=False)
self.plot_unemp_shares()
if self.version in set([1,2,3,4]):
self.plot_group_emp(figname=figname)
self.plot_parttime_ratio(figname=figname)
self.plot_sal()
if self.version in set([1,2,3,4]):
self.plot_taxes()
self.plot_pinkslip()
self.plot_outsider()
self.plot_student()
self.plot_kassanjasen()
#self.plot_army()
self.plot_group_student()
#self.plot_group_army()
self.plot_group_disab()
self.plot_moved()
self.plot_ave_stay()
self.plot_reward()
self.plot_pensions()
self.plot_career()
self.plot_toe()
self.plot_wage_reduction()
#self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki+putki, no max age',ansiosid=True,tmtuki=True,putki=True,outsider=False)
self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki+putki, jakso päättynyt ennen 50v ikää',ansiosid=True,tmtuki=True,putki=True,outsider=False,max_age=50,figname=figname)
if self.version in set([1,2,3,4]):
#self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki+putki, jakso päättynyt ennen 50v ikää, jäljellä oleva aika',plot_bu=True,ansiosid=True,tmtuki=True,putki=True,outsider=False,max_age=50)
self.plot_distrib(label='Jakauma ansiosidonnainen+putki, jakso päättynyt ennen 50v ikää, jäljellä oleva aika',plot_bu=False,ansiosid=True,tmtuki=False,putki=True,outsider=False,max_age=50)
#self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki ilman putkea',ansiosid=True,tmtuki=True,putki=False,outsider=False)
#self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki ilman putkea, max Ikä 50v',ansiosid=True,tmtuki=True,putki=False,outsider=False,max_age=50)
self.plot_distrib(label='Jakauma tmtuki',ansiosid=False,tmtuki=True,putki=False,outsider=False)
#self.plot_distrib(label='Jakauma työvoiman ulkopuoliset',ansiosid=False,tmtuki=False,putki=False,outsider=True)
#self.plot_distrib(label='Jakauma laaja (ansiosidonnainen+tmtuki+putki+ulkopuoliset)',laaja=True)
def plot_final(self):
print('Keskikestot käytettyjen ansiosidonnaisten päivärahojen mukaan')
keskikesto=self.comp_unemp_durations()
df = pd.DataFrame.from_dict(keskikesto,orient='index',columns=['0-6 kk','6-12 kk','12-18 kk','18-24kk','yli 24 kk'])
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.2f"))
print('Keskikestot viimeisimmän työttömyysjakson mukaan')
keskikesto=self.comp_unemp_durations_v2()
df = pd.DataFrame.from_dict(keskikesto,orient='index',columns=['0-6 kk','6-12 kk','12-18 kk','18-24kk','yli 24 kk'])
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.2f"))
self.plot_emp()
if self.version in set([1,2,3,4]):
print('Lisäpäivillä on {:.0f} henkilöä'.format(self.count_putki()))
self.plot_unemp(unempratio=True)
self.plot_unemp(unempratio=False)
self.plot_unemp_shares()
if self.version in set([1,2,3,4]):
self.plot_group_emp()
self.plot_parttime_ratio()
self.plot_sal()
self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki+putki, no max age',ansiosid=True,tmtuki=True,putki=True,outsider=False)
self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki+putki, jakso päättynyt ennen 50v ikää',ansiosid=True,tmtuki=True,putki=True,outsider=False,max_age=50)
#self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki+putki, jakso päättynyt ennen 50v ikää, jäljellä oleva aika',plot_bu=True,ansiosid=True,tmtuki=True,putki=True,outsider=False,max_age=50)
self.plot_distrib(label='Jakauma ansiosidonnainen+putki, jakso päättynyt ennen 50v ikää, jäljellä oleva aika',plot_bu=False,ansiosid=True,tmtuki=False,putki=True,outsider=False,max_age=50)
#self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki ilman putkea',ansiosid=True,tmtuki=True,putki=False,outsider=False)
#self.plot_distrib(label='Jakauma ansiosidonnainen+tmtuki ilman putkea, max Ikä 50v',ansiosid=True,tmtuki=True,putki=False,outsider=False,max_age=50)
self.plot_distrib(label='Jakauma tmtuki',ansiosid=False,tmtuki=True,putki=False,outsider=False)
#self.plot_distrib(label='Jakauma työvoiman ulkopuoliset',ansiosid=False,tmtuki=False,putki=False,outsider=True)
#self.plot_distrib(label='Jakauma laaja (ansiosidonnainen+tmtuki+putki+ulkopuoliset)',laaja=True)
if self.version in set([1,2,3,4]):
self.plot_outsider()
self.plot_student()
#self.plot_army()
self.plot_group_student()
#self.plot_group_army()
self.plot_group_disab()
self.plot_moved()
self.plot_ave_stay()
self.plot_reward()
self.plot_pensions()
self.plot_career()
self.plot_toe()
self.plot_wage_reduction()
def plot_img(self,img,xlabel="eläke",ylabel="Palkka",title="Employed"):
fig, ax = plt.subplots()
im = ax.imshow(img)
heatmap = plt.pcolor(img)
plt.colorbar(heatmap)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.title(title)
plt.show()
def save_sim(self,filename):
f = h5py.File(filename, 'w')
ftype='float64'
_ = f.create_dataset('version', data=self.version, dtype=ftype)
_ = f.create_dataset('n_pop', data=self.n_pop, dtype=int)
_ = f.create_dataset('empstate', data=self.empstate, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('gempstate', data=self.gempstate, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('deceiced', data=self.deceiced, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('rewstate', data=self.rewstate, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('salaries_emp', data=self.salaries_emp, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('actions', data=self.actions, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('alive', data=self.alive, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('galive', data=self.galive, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('siirtyneet', data=self.siirtyneet, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('siirtyneet_det', data=self.siirtyneet_det, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('pysyneet', data=self.pysyneet, dtype=int,compression="gzip", compression_opts=9)
if self.dynprog:
_ = f.create_dataset('aveV', data=self.aveV, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('pop_predrew', data=self.pop_predrew, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('time_in_state', data=self.time_in_state, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('stat_tyoura', data=self.stat_tyoura, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('stat_toe', data=self.stat_toe, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('stat_pension', data=self.stat_pension, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('stat_paidpension', data=self.stat_paidpension, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('stat_unemp_len', data=self.stat_unemp_len, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('popempstate', data=self.popempstate, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('stat_wage_reduction', data=self.stat_wage_reduction, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('stat_wage_reduction_g', data=self.stat_wage_reduction_g, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('popunemprightleft', data=self.popunemprightleft, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('popunemprightused', data=self.popunemprightused, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_taxes', data=self.infostats_taxes, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_wagetaxes', data=self.infostats_wagetaxes, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_taxes_distrib', data=self.infostats_taxes_distrib, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_etuustulo', data=self.infostats_etuustulo, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_etuustulo_group', data=self.infostats_etuustulo_group, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_perustulo', data=self.infostats_perustulo, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_palkkatulo', data=self.infostats_palkkatulo, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_palkkatulo_eielakkeella', data=self.infostats_palkkatulo_eielakkeella, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_ansiopvraha', data=self.infostats_ansiopvraha, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_asumistuki', data=self.infostats_asumistuki, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_valtionvero', data=self.infostats_valtionvero, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_kunnallisvero', data=self.infostats_kunnallisvero, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_valtionvero_distrib', data=self.infostats_valtionvero_distrib, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_kunnallisvero_distrib', data=self.infostats_kunnallisvero_distrib, dtype=ftype)
_ = f.create_dataset('infostats_ptel', data=self.infostats_ptel, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_tyotvakmaksu', data=self.infostats_tyotvakmaksu, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_tyoelake', data=self.infostats_tyoelake, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_kokoelake', data=self.infostats_kokoelake, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_opintotuki', data=self.infostats_opintotuki, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_isyyspaivaraha', data=self.infostats_isyyspaivaraha, dtype=ftype)
_ = f.create_dataset('infostats_aitiyspaivaraha', data=self.infostats_aitiyspaivaraha, dtype=ftype)
_ = f.create_dataset('infostats_kotihoidontuki', data=self.infostats_kotihoidontuki, dtype=ftype)
_ = f.create_dataset('infostats_sairauspaivaraha', data=self.infostats_sairauspaivaraha, dtype=ftype)
_ = f.create_dataset('infostats_toimeentulotuki', data=self.infostats_toimeentulotuki, dtype=ftype)
_ = f.create_dataset('infostats_tulot_netto', data=self.infostats_tulot_netto, dtype=ftype)
_ = f.create_dataset('poprewstate', data=self.poprewstate, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_pinkslip', data=self.infostats_pinkslip, dtype=int)
if self.version<2:
_ = f.create_dataset('infostats_pop_pinkslip', data=self.infostats_pop_pinkslip, dtype=int)
_ = f.create_dataset('infostats_paid_tyel_pension', data=self.infostats_paid_tyel_pension, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_tyelpremium', data=self.infostats_tyelpremium, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_npv0', data=self.infostats_npv0, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_irr', data=self.infostats_irr, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_group', data=self.infostats_group, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_sairausvakuutus', data=self.infostats_sairausvakuutus, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_pvhoitomaksu', data=self.infostats_pvhoitomaksu, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_ylevero', data=self.infostats_ylevero, dtype=ftype)
_ = f.create_dataset('infostats_ylevero_distrib', data=self.infostats_ylevero_distrib, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_mother_in_workforce', data=self.infostats_mother_in_workforce, dtype=ftype)
_ = f.create_dataset('infostats_children_under3', data=self.infostats_children_under3, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_children_under7', data=self.infostats_children_under7, dtype=int,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_unempwagebasis', data=self.infostats_unempwagebasis, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_unempwagebasis_acc', data=self.infostats_unempwagebasis_acc, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_toe', data=self.infostats_toe, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_ove', data=self.infostats_ove, dtype=int)
_ = f.create_dataset('infostats_kassanjasen', data=self.infostats_kassanjasen, dtype=int)
_ = f.create_dataset('infostats_poptulot_netto', data=self.infostats_poptulot_netto, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_equivalent_income', data=self.infostats_equivalent_income, dtype=ftype)
_ = f.create_dataset('infostats_pop_wage', data=self.infostats_pop_wage, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_pop_pension', data=self.infostats_pop_pension, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('infostats_puoliso', data=self.infostats_puoliso, dtype=ftype)
_ = f.create_dataset('infostats_alv', data=self.infostats_alv)
_ = f.create_dataset('params', data=str(self.params))
if self.version==101:
_ = f.create_dataset('infostats_savings', data=self.infostats_savings, dtype=ftype,compression="gzip", compression_opts=9)
_ = f.create_dataset('sav_actions', data=self.sav_actions, dtype=ftype,compression="gzip", compression_opts=9)
f.close()
def save_to_hdf(self,filename,nimi,arr,dtype):
f = h5py.File(filename, 'w')
dset = f.create_dataset(nimi, data=arr, dtype=dtype)
f.close()
def load_hdf(self,filename,nimi):
f = h5py.File(filename, 'r')
val=f.get(nimi).value
f.close()
return val
def load_sim(self,filename,n_pop=None,print_pop=False):
f = h5py.File(filename, 'r')
if 'version' in f.keys():
version=int(f['version'][()])
else:
version=1
self.empstate=f['empstate'][()]
self.gempstate=f['gempstate'][()]
self.deceiced=f['deceiced'][()]
self.rewstate=f['rewstate'][()]
if 'poprewstate' in f.keys():
self.poprewstate=f['poprewstate'][()]
if 'pop_predrew' in f.keys():
self.pop_predrew=f['pop_predrew'][()]
self.salaries_emp=f['salaries_emp'][()]
self.actions=f['actions'][()]
self.alive=f['alive'][()]
self.galive=f['galive'][()]
self.siirtyneet=f['siirtyneet'][()]
self.pysyneet=f['pysyneet'][()]
if 'aveV' in f.keys():
self.aveV=f['aveV'][()]
self.time_in_state=f['time_in_state'][()]
self.stat_tyoura=f['stat_tyoura'][()]
self.stat_toe=f['stat_toe'][()]
self.stat_pension=f['stat_pension'][()]
self.stat_paidpension=f['stat_paidpension'][()]
self.stat_unemp_len=f['stat_unemp_len'][()]
self.popempstate=f['popempstate'][()]
self.stat_wage_reduction=f['stat_wage_reduction'][()]
self.popunemprightleft=f['popunemprightleft'][()]
self.popunemprightused=f['popunemprightused'][()]
if 'infostats_wagetaxes' in f.keys(): # older runs do not have these
self.infostats_wagetaxes=f['infostats_wagetaxes'][()]
if 'infostats_taxes' in f.keys(): # older runs do not have these
self.infostats_taxes=f['infostats_taxes'][()]
self.infostats_etuustulo=f['infostats_etuustulo'][()]
self.infostats_perustulo=f['infostats_perustulo'][()]
self.infostats_palkkatulo=f['infostats_palkkatulo'][()]
self.infostats_ansiopvraha=f['infostats_ansiopvraha'][()]
self.infostats_asumistuki=f['infostats_asumistuki'][()]
self.infostats_valtionvero=f['infostats_valtionvero'][()]
self.infostats_kunnallisvero=f['infostats_kunnallisvero'][()]
self.infostats_ptel=f['infostats_ptel'][()]
self.infostats_tyotvakmaksu=f['infostats_tyotvakmaksu'][()]
self.infostats_tyoelake=f['infostats_tyoelake'][()]
self.infostats_kokoelake=f['infostats_kokoelake'][()]
self.infostats_opintotuki=f['infostats_opintotuki'][()]
self.infostats_isyyspaivaraha=f['infostats_isyyspaivaraha'][()]
self.infostats_aitiyspaivaraha=f['infostats_aitiyspaivaraha'][()]
self.infostats_kotihoidontuki=f['infostats_kotihoidontuki'][()]
self.infostats_sairauspaivaraha=f['infostats_sairauspaivaraha'][()]
self.infostats_toimeentulotuki=f['infostats_toimeentulotuki'][()]
self.infostats_tulot_netto=f['infostats_tulot_netto'][()]
if 'infostats_etuustulo_group' in f.keys(): # older runs do not have these
self.infostats_etuustulo_group=f['infostats_etuustulo_group'][()]
if 'infostats_valtionvero_distrib' in f.keys(): # older runs do not have these
self.infostats_valtionvero_distrib=f['infostats_valtionvero_distrib'][()]
self.infostats_kunnallisvero_distrib=f['infostats_kunnallisvero_distrib'][()]
if 'infostats_taxes_distrib' in f.keys(): # older runs do not have these
self.infostats_taxes_distrib=f['infostats_taxes_distrib'][()]
self.infostats_ylevero_distrib=f['infostats_ylevero_distrib'][()]
if 'infostats_pinkslip' in f.keys(): # older runs do not have these
self.infostats_pinkslip=f['infostats_pinkslip'][()]
if 'infostats_pop_pinkslip' in f.keys():
self.infostats_pop_pinkslip=f['infostats_pop_pinkslip'][()]
if 'infostats_paid_tyel_pension' in f.keys(): # older runs do not have these
self.infostats_paid_tyel_pension=f['infostats_paid_tyel_pension'][()]
self.infostats_tyelpremium=f['infostats_tyelpremium'][()]
if 'infostats_npv0' in f.keys(): # older runs do not have these
self.infostats_npv0=f['infostats_npv0'][()]
self.infostats_irr=f['infostats_irr'][()]
if 'infostats_chilren7' in f.keys(): # older runs do not have these
self.infostats_chilren7=f['infostats_chilren7'][()]
if 'infostats_chilren18' in f.keys(): # older runs do not have these
self.infostats_chilren18=f['infostats_chilren18'][()]
if 'infostats_group' in f.keys(): # older runs do not have these
self.infostats_group=f['infostats_group'][()]
if 'infostats_sairausvakuutus' in f.keys():
self.infostats_sairausvakuutus=f['infostats_sairausvakuutus'][()]
self.infostats_pvhoitomaksu=f['infostats_pvhoitomaksu'][()]
self.infostats_ylevero=f['infostats_ylevero'][()]
if 'infostats_mother_in_workforce' in f.keys():
self.infostats_mother_in_workforce=f['infostats_mother_in_workforce'][()]
if 'stat_wage_reduction_g' in f.keys():
self.stat_wage_reduction_g=f['stat_wage_reduction_g'][()]
if 'siirtyneet_det' in f.keys():
self.siirtyneet_det=f['siirtyneet_det'][()]
if 'infostats_kassanjasen' in f.keys():
self.infostats_kassanjasen=f['infostats_kassanjasen'][()]
if 'n_pop' in f.keys():
self.n_pop=int(f['n_pop'][()])
else:
self.n_pop=np.sum(self.empstate[0,:])
if 'infostats_puoliso' in f.keys():
self.infostats_puoliso=f['infostats_puoliso'][()]
if 'infostats_ove' in f.keys():
self.infostats_ove=f['infostats_ove'][()]
if 'infostats_children_under3' in f.keys():
self.infostats_children_under3=f['infostats_children_under3'][()]
self.infostats_children_under7=f['infostats_children_under7'][()]
self.infostats_unempwagebasis=f['infostats_unempwagebasis'][()]
self.infostats_unempwagebasis_acc=f['infostats_unempwagebasis_acc'][()]
self.infostats_toe=f['infostats_toe'][()]
if 'infostats_palkkatulo_eielakkeella' in f.keys():
self.infostats_palkkatulo_eielakkeella=f['infostats_palkkatulo_eielakkeella'][()]
if 'infostats_tulot_netto' in f.keys():
self.infostats_tulot_netto=f['infostats_tulot_netto'][()]
if 'infostats_poptulot_netto' in f.keys():
self.infostats_poptulot_netto=f['infostats_poptulot_netto'][()]
if 'infostats_equivalent_income' in f.keys():
self.infostats_equivalent_income=f['infostats_equivalent_income'][()]
if 'infostats_pop_wage' in f.keys():
self.infostats_pop_wage=f['infostats_pop_wage'][()]
self.infostats_pop_pension=f['infostats_pop_pension'][()]
if 'infostats_savings' in f.keys():
self.infostats_savings=f['infostats_savings'][()]
self.sav_actions=f['sav_actions'][()]
if 'infostats_alv' in f.keys():
self.infostats_alv=f['infostats_alv'][()]
if n_pop is not None:
self.n_pop=n_pop
if 'params' in f.keys():
self.params=f['params'][()]
else:
self.params=None
if print_pop:
print('n_pop {}'.format(self.n_pop))
f.close()
def scatter_density(self,x,y,label1='',label2=''):
# Calculate the point density
xy = np.vstack([x,y])
z = gaussian_kde(xy)(xy)
# Sort the points by density, so that the densest points are plotted last
#idx = z.argsort()
#x, y, z = x[idx], y[idx], z[idx]
fig,ax=plt.subplots()
ax.scatter(x,y,c=z)
ax.set_xlabel(label1)
ax.set_ylabel(label2)
#plt.legend()
# plt.title('number of agents by pension level')
plt.show()
def plot_Vk(self,k,getV=None):
obsV=np.zeros((self.n_time))
#obsV[-1]=self.poprewstate[-1,k]
for t in range(self.n_time-2,-1,-1):
obsV[t]=self.poprewstate[t+1,k]+self.gamma*obsV[t+1]
rewerr=self.poprewstate[t+1,k]-self.pop_predrew[t+1,k]
delta=obsV[t]-self.aveV[t+1,k]
wage=self.infostats_pop_wage[t,k]
old_wage=self.infostats_pop_wage[max(0,t-1),k]
age=self.map_t_to_age(t)
old_age=int(self.map_t_to_age(max(0,t-1)))
emp=self.popempstate[t,k]
predemp=self.popempstate[max(0,t-1),k]
pen=self.infostats_pop_pension[t,k]
predwage=self.env.wage_process_mean(old_wage,old_age,state=predemp)
print(f'{age}: {obsV[t]:.4f} vs {self.aveV[t+1,k]:.4f} d {delta:.4f} re {rewerr:.6f} in state {emp} ({k},{wage:.2f},{pen:.2f}) ({predwage:.2f}) ({self.poprewstate[t+1,k]:.5f},{self.pop_predrew[t+1,k]:.5f})')
err=obsV-self.aveV[t,k]
obsV_error=np.abs(obsV-self.aveV[t,k])
def plot_Vstats(self):
obsV=np.zeros((self.n_time,self.n_pop))
obsVemp=np.zeros((self.n_time,3))
obsVemp_error=np.zeros((self.n_time,3))
obsVemp_abserror=np.zeros((self.n_time,3))
obsV[self.n_time-1,:]=self.poprewstate[self.n_time-1,:]
for t in range(self.n_time-2,0,-1):
obsV[t,:]=self.poprewstate[t,:]+self.gamma*obsV[t+1,:]
delta=obsV[t,:]-self.aveV[t,:]
for k in range(self.n_pop):
if np.abs(delta[k])>0.2:
wage=self.infostats_pop_wage[t,k]
pen=self.infostats_pop_pension[t,k]
#print(f't {t}: {obsV[t,k]} vs {self.aveV[t+1,k]} d {delta} in state {self.popempstate[t,k]} ({k},{wage:.2f},{pen:.2f})')
for state in range(2):
s=np.asarray(self.popempstate[t,:]==state).nonzero()
obsVemp[t,state]=np.mean(obsV[t,s])
obsVemp_error[t,state]=np.mean(obsV[t,s]-self.aveV[t,s])
obsVemp_abserror[t,state]=np.mean(np.abs(obsV[t,s]-self.aveV[t,s]))
err=obsV[:self.n_time]-self.aveV
obsV_error=np.abs(err)
mean_obsV=np.mean(obsV,axis=1)
mean_predV=np.mean(self.aveV,axis=1)
mean_abs_errorV=np.mean(np.abs(err),axis=1)
mean_errorV=np.mean(err,axis=1)
fig,ax=plt.subplots()
ax.plot(mean_abs_errorV[1:],label='abs. error')
ax.plot(mean_errorV[1:],label='error')
ax.plot(np.max(err,axis=1),label='max')
ax.plot(np.min(err,axis=1),label='min')
ax.set_xlabel('time')
ax.set_ylabel('error (pred-obs)')
plt.legend()
plt.show()
fig,ax=plt.subplots()
ax.plot(mean_obsV[1:],label='observed')
ax.plot(mean_predV[1:],label='predicted')
ax.set_xlabel('time')
ax.set_ylabel('V')
plt.legend()
plt.show()
fig,ax=plt.subplots()
ax.plot(obsVemp[1:,0],label='state 0')
ax.plot(obsVemp[1:,1],label='state 1')
ax.set_xlabel('time')
ax.set_ylabel('V')
plt.legend()
plt.show()
fig,ax=plt.subplots()
ax.plot(obsVemp_error[1:,0],label='state 0')
ax.plot(obsVemp_error[1:,1],label='state 1')
ax.set_xlabel('time')
ax.set_ylabel('error')
plt.legend()
plt.show()
fig,ax=plt.subplots()
ax.plot(obsVemp_abserror[1:,0],label='state 0')
ax.plot(obsVemp_abserror[1:,1],label='state 1')
ax.set_xlabel('time')
ax.set_ylabel('error')
plt.legend()
plt.show()
#self.scatter_density(self.infostats_pop_wage,obsV_error,label1='t',label2='emp obsV error')
def render(self,load=None,figname=None,grayscale=False):
if load is not None:
self.load_sim(load)
#self.plot_stats(5)
self.plot_stats(figname=figname,grayscale=grayscale)
self.plot_reward()
def stat_budget(self,scale=False):
if self.year==2018:
q={}
q['tyotulosumma']=89_134_200_000 #+4_613_400_000+1_239_900_000 # lähde: ETK, tyel + julkinen + yel + myel
q['tyotulosumma_eielakkeella']=0 #+4_613_400_000+1_239_900_000 # lähde: ETK, tyel + julkinen + yel + myel
q['etuusmeno']=0
q['verot+maksut']=0 # tuloverot 30_763_000_000 ml YLE ja kirkollisvero
q['valtionvero']=5_542_000_000
q['kunnallisvero']=18_991_000_000 # 18_791_000_000 ?
q['ptel']=5_560_000_000 # vai 6_804_000_000?
q['elakemaksut']=22_085_700_000 # Lähde: ETK
q['tyotvakmaksu']=0.019*q['tyotulosumma']
q['sairausvakuutusmaksu']=1_335_000_000+407_000_000
q['ylevero']=497_000_000
q['ansiopvraha']=3_895_333_045 # 1_930_846_464+1_964_486_581 # ansioturva + perusturva = 3 895 333 045
q['asumistuki']=1_488_900_000 + 600_100_000 # yleinen plus eläkkeensaajan
q['tyoelake']=27_865_000_000
q['kokoelake']=q['tyoelake']+2_357_000_000
q['opintotuki']=417_404_073+54_057
q['isyyspaivaraha']=104_212_164
q['aitiyspaivaraha']=341_304_991+462_228_789
q['kotihoidontuki']=245_768_701
q['sairauspaivaraha']=786_659_783
q['toimeentulotuki']=715_950_847
q['perustulo']=0
q['pvhoitomaksu']=0
q['alv']=21_364_000_000
q['etuusmeno']=q['ansiopvraha']+q['kokoelake']+q['opintotuki']+q['isyyspaivaraha']+\
q['aitiyspaivaraha']+q['sairauspaivaraha']+q['toimeentulotuki']+q['perustulo']+\
q['asumistuki']+q['kotihoidontuki']
q['verot+maksut']=q['valtionvero']+q['kunnallisvero']+q['ptel']+q['tyotvakmaksu']+\
q['ylevero']+q['sairausvakuutusmaksu']
q['ta_maksut']=0.2057*q['tyotulosumma'] # karkea
q['tulot_netto']=q['tyotulosumma']+q['etuusmeno']-q['verot+maksut']
q['muut tulot']=q['etuusmeno']-q['verot+maksut']
elif self.year==2019:
q={}
q['tyotulosumma']=89_134_200_000 #+4_613_400_000+1_239_900_000 # lähde: ETK, tyel + julkinen + yel + myel
q['tyotulosumma_eielakkeella']=0 #+4_613_400_000+1_239_900_000 # lähde: ETK, tyel + julkinen + yel + myel
q['etuusmeno']=0
q['verot+maksut']=0 # tuloverot 30_763_000_000 ml YLE ja kirkollisvero
q['valtionvero']=5_542_000_000
q['kunnallisvero']=19_376_000_000
q['ptel']=5_560_000_000 # vai 7_323_000_000
q['elakemaksut']=22_085_700_000 # Lähde: ETK
q['tyotvakmaksu']=0.019*q['tyotulosumma']
q['sairausvakuutusmaksu']=1_335_000_000+407_000_000
q['ylevero']=497_000_000
q['ansiopvraha']=3_895_333_045 # 1_930_846_464+1_964_486_581 # ansioturva + perusturva = 3 895 333 045
q['asumistuki']=1_488_900_000 + 600_100_000 # yleinen plus eläkkeensaajan
q['tyoelake']=27_865_000_000
q['kokoelake']=q['tyoelake']+2_357_000_000
q['opintotuki']=417_404_073+54_057
q['isyyspaivaraha']=104_212_164
q['aitiyspaivaraha']=341_304_991+462_228_789
q['kotihoidontuki']=245_768_701
q['sairauspaivaraha']=786_659_783
q['toimeentulotuki']=715_950_847
q['perustulo']=0
q['pvhoitomaksu']=0
q['alv']=21_974_000_000
q['etuusmeno']=q['ansiopvraha']+q['kokoelake']+q['opintotuki']+q['isyyspaivaraha']+\
q['aitiyspaivaraha']+q['sairauspaivaraha']+q['toimeentulotuki']+q['perustulo']+\
q['asumistuki']+q['kotihoidontuki']
q['verot+maksut']=q['valtionvero']+q['kunnallisvero']+q['ptel']+q['tyotvakmaksu']+\
q['ylevero']+q['sairausvakuutusmaksu']
q['ta_maksut']=0.2057*q['tyotulosumma'] # karkea
q['tulot_netto']=q['tyotulosumma']+q['etuusmeno']-q['verot+maksut']
q['muut tulot']=q['etuusmeno']-q['verot+maksut']
elif self.year==2020:
q={}
q['tyotulosumma']=89_134_200_000 #+4_613_400_000+1_239_900_000 # lähde: ETK, tyel + julkinen + yel + myel
q['tyotulosumma_eielakkeella']=0 #+4_613_400_000+1_239_900_000 # lähde: ETK, tyel + julkinen + yel + myel
q['etuusmeno']=0
q['verot+maksut']=0 # tuloverot 30_763_000_000 ml YLE ja kirkollisvero
q['valtionvero']=5_542_000_000 # vai 7_700_000_000
q['kunnallisvero']=20_480_000_000
q['ptel']=5_560_000_000
q['elakemaksut']=22_085_700_000 # Lähde: ETK
q['tyotvakmaksu']=0.019*q['tyotulosumma']
q['sairausvakuutusmaksu']=1_335_000_000+407_000_000
q['ylevero']=497_000_000
q['ansiopvraha']=3_895_333_045 # 1_930_846_464+1_964_486_581 # ansioturva + perusturva = 3 895 333 045
q['asumistuki']=1_488_900_000 + 600_100_000 # yleinen plus eläkkeensaajan
q['tyoelake']=27_865_000_000
q['kokoelake']=q['tyoelake']+2_357_000_000
q['opintotuki']=417_404_073+54_057
q['isyyspaivaraha']=104_212_164
q['aitiyspaivaraha']=341_304_991+462_228_789
q['kotihoidontuki']=245_768_701
q['sairauspaivaraha']=786_659_783
q['toimeentulotuki']=715_950_847
q['perustulo']=0
q['pvhoitomaksu']=0
q['alv']=21_775_000_000
q['etuusmeno']=q['ansiopvraha']+q['kokoelake']+q['opintotuki']+q['isyyspaivaraha']+\
q['aitiyspaivaraha']+q['sairauspaivaraha']+q['toimeentulotuki']+q['perustulo']+\
q['asumistuki']+q['kotihoidontuki']
q['verot+maksut']=q['valtionvero']+q['kunnallisvero']+q['ptel']+q['tyotvakmaksu']+\
q['ylevero']+q['sairausvakuutusmaksu']
q['ta_maksut']=0.2057*q['tyotulosumma'] # karkea
q['tulot_netto']=q['tyotulosumma']+q['etuusmeno']-q['verot+maksut']
q['muut tulot']=q['etuusmeno']-q['verot+maksut']
return q
def comp_budget(self,scale=True):
demog2=self.empstats.get_demog()
scalex=demog2/self.n_pop
q={}
q['tyotulosumma']=np.sum(self.infostats_palkkatulo*scalex)
q['tyotulosumma_eielakkeella']=np.sum(self.infostats_palkkatulo_eielakkeella*scalex) #np.sum(self.comp_ps_norw()*scalex)*self.timestep
q['etuusmeno']= | np.sum(self.infostats_etuustulo*scalex) | numpy.sum |
#####
# Fix data length issue.
####
# • Should serve week data with 0s during weeks where data is missing
import os
import copy
import numpy as np
import pandas as pd
from lib.classes.dataset_classes.BabyDataset import BabyDataset
from lib.classes.TargetRate import TargetRate
class SubjectDataset(BabyDataset):
"""
Represents a single subject's data. This is currently all we need in terms of functionality. Will probably
be extended in the future
"""
def __init__(self, master_config=None, subject_index=""):
if master_config is not None:
self.master_config = master_config
if subject_index is not "":
self.subject_index = subject_index
elif master_config is not None:
self.subject_index = master_config.Core_Config.Data_Config.subject_index
self.valid_weeks = self._create_valid_week_array()
self.valid_mask = self._create_valid_mask_array()
super().__init__()
def get_full_generator(self, batch_size=1):
data_config = self.master_config.Core_Config.Data_Config
model_config = self.master_config.Core_Config.Model_Config
feature_names = data_config.feature_names
for_training = data_config.for_training
while True:
feature_batch = self._get_feature_batch(feature_names, for_training)
label_batch = self._get_label_batch(data_config, model_config)
yield (feature_batch, label_batch)
def get_training_generator(self, batch_size=1, sample_weight_mode=None):
data_config = self.master_config.Core_Config.Data_Config
model_config = self.master_config.Core_Config.Model_Config
feature_names = data_config.feature_names
for_training = data_config.for_training
while True:
feature_batch = self._get_feature_batch(feature_names, for_training)
label_batch = self._get_label_batch(data_config, model_config)
sample_weights = self._get_sample_weights(model_config)
if sample_weight_mode is not None:
yield (feature_batch, label_batch, sample_weights)
else:
yield (feature_batch, label_batch)
def get_validation_generator(self):
yield None, None, None
def _get_feature_batch(self, feature_names, for_training):
### Get feature batch
valid_weeks_pop_list = copy.deepcopy(self.valid_weeks)
week_list = []
for mask_value in self.valid_mask:
if mask_value:
subject_location = self._create_week_filename(valid_weeks_pop_list.pop(0))
subject_dataframe = pd.read_csv(subject_location)
if feature_names == "all":
subject_np = subject_dataframe.to_numpy()
week_list.append(subject_np)
else:
subset_dataframe = subject_dataframe[feature_names]
subset_np = subset_dataframe.to_numpy()
week_list.append(subset_np)
else:
if feature_names == "all":
nan_week = np.zeros(shape=(self.get_data_length(), 43))
week_list.append(nan_week)
else:
nan_week = np.zeros(shape=(self.get_data_length(), len(feature_names)))
week_list.append(nan_week)
week_stack = np.stack(week_list)
if for_training:
week_stack = np.expand_dims(week_stack, axis=0)
feature_batch = week_stack
return feature_batch
def _get_label_batch(self, data_config, model_config):
### Get label batch
### Create target rate generator and generate labels
target_rate = TargetRate(length=self.get_n_weeks(), offset=data_config.Label.offset,
rate_type=data_config.Label.rate_type)
label_batch = target_rate.generate_series(model_config.Convolution.n_filters)
return label_batch
def get_all_feature_names(self):
"""
Return a list of all feature names
"""
subject_location = self._create_week_filename(self.valid_weeks[0])
subject_dataframe = pd.read_csv(subject_location)
return list(subject_dataframe.columns)
def get_n_weeks(self):
"""
Return number of weeks, valid and invalid
"""
final_week_index = self.valid_weeks[len(self.valid_weeks)-1]
return final_week_index
def get_total_seconds(self):
"""
Return total seconds measured in data
"""
return 300
def get_data_length(self):
"""
Return length of data
"""
subject_location = self._create_week_filename(self.valid_weeks[0])
subject_dataframe = pd.read_csv(subject_location)
return subject_dataframe.shape[0]
def _get_sample_weights(self, model_config):
"""
Returns a numpy version of valid mask with an extra dimension to represent the sample index within a batch
"""
sample_weights = np.asarray(self.valid_mask)
sample_weights = | np.expand_dims(sample_weights, axis=0) | numpy.expand_dims |
"""
Deep Q-learning in Pytorch.
Main structure of the code is as follows:
1. Class is initialized using an environment, model, optimizer and other parameters
2. Update function involves computing TD loss for Q-learning
3. Solved for a fixed nummber of frames with in which the learning is assumed to be complete
4. Model at regular intervals of episodes is saved for comparision.
An example can be seen in the notebooks folder
"""
import math, random
import pdb
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
USE_CUDA = torch.cuda.is_available()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
# this throws a warning but it is also allows the code to be less clumsy with requires_grad parameter
print('Using CUDA: {}'.format(USE_CUDA))
from collections import deque
import pickle
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from IPython.display import clear_output
import matplotlib.pyplot as plt
seed = 123
torch.manual_seed(seed)
class ReplayBuffer:
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return | np.concatenate(state) | numpy.concatenate |
import numpy as np
import pytest
from astropy.io import fits
from astropy.wcs import WCS
__all__ = ['BaseImviz_WCS_NoWCS', 'BaseImviz_WCS_WCS']
class BaseImviz_WCS_NoWCS:
@pytest.fixture(autouse=True)
def setup_class(self, imviz_app):
hdu = fits.ImageHDU(np.arange(100).reshape((10, 10)), name='SCI')
# Apply some celestial WCS from
# https://learn.astropy.org/rst-tutorials/celestial_coords1.html
hdu.header.update({'CTYPE1': 'RA---TAN',
'CUNIT1': 'deg',
'CDELT1': -0.0002777777778,
'CRPIX1': 1,
'CRVAL1': 337.5202808,
'NAXIS1': 10,
'CTYPE2': 'DEC--TAN',
'CUNIT2': 'deg',
'CDELT2': 0.0002777777778,
'CRPIX2': 1,
'CRVAL2': -20.833333059999998,
'NAXIS2': 10})
# Data with WCS
imviz_app.load_data(hdu, data_label='has_wcs')
# Data without WCS
imviz_app.load_data(hdu, data_label='no_wcs')
imviz_app.app.data_collection[1].coords = None
self.wcs = WCS(hdu.header)
self.imviz = imviz_app
self.viewer = imviz_app.default_viewer
# Since we are not really displaying, need this to test zoom.
self.viewer.shape = (100, 100)
self.viewer.state._set_axes_aspect_ratio(1)
class BaseImviz_WCS_WCS:
@pytest.fixture(autouse=True)
def setup_class(self, imviz_app):
arr = | np.ones((10, 10)) | numpy.ones |
"""
References:
[1] <NAME> and <NAME>, Superposition of vortex cylinders for steady and unsteady simulation of rotors of finite tip-speed ratio - Wind Energy, 2014
[X] <NAME>, <NAME> - Cylindrical vortex wake model: right cylinder - Wind Energy, 2014
[X] <NAME>, <NAME> - Cylindrical vortex wake model: skewed cylinder, application to yawed or tilted rotors - Wind Energy, 2015
[X] <NAME> - Wind Turbine Aerodynamics and Vorticity Based Method, Springer, 2017
"""
#--- Legacy python 2.7
from __future__ import division
from __future__ import print_function
# --- General
import unittest
import numpy as np
import scipy.optimize as sciopt
import scipy.integrate as sciint
def Ct_const_cutoff(CT0,r_bar_cut,vr_bar,r_bar_tip=None):
""" Returns an almost constant Ct,
linearly dropping to zero below a cut-off radius
linearly dropping to zero after a tip radius
"""
Ct=np.ones(vr_bar.shape)*CT0
I=vr_bar<r_bar_cut
Ct[I]=CT0*vr_bar[I]/r_bar_cut
if r_bar_tip is not None:
I=vr_bar>r_bar_tip
Ct[I]=CT0*(1-(vr_bar[I]-r_bar_tip)/(1-r_bar_tip))
return Ct
def InductionsFromPrescribedCtCq_ST(vr_bar,Ct,Cq,Lambda,bSwirl):
"""
Returns the stream tube theory inductions based on a given Ct and Cq.
Based on script fGetInductions_Prescribed_CT_CQ_ST
"""
lambda_r=Lambda*vr_bar
# --- Stream Tube theory
a_ST = 1/2*(1-np.sqrt(1-Ct))
if bSwirl:
a_prime_ST = Cq/(4*(1-a_ST)*lambda_r)
# a_prime_ST = 0.5*(sqrt(1+(4*a_ST.*(1-a_ST)./lambda_r.^2))-1);
else:
a_prime_ST =0
return a_ST,a_prime_ST
def CirculationFromPrescribedCt(vr_bar,vCt,Lambda,bSwirl):
""" Finds k to match a given Ct
Solves equation (32) of [1]
Based on script fGetInductions_Prescribed_CT_CQ2.py
"""
vk = np.zeros(vr_bar.shape)
vlambda_r = Lambda * vr_bar
if bSwirl:
for ir,(r_bar,lambda_r,Ct) in enumerate(zip(vr_bar,vlambda_r,vCt)):
if Ct>0 and r_bar>0:
res=sciopt.minimize_scalar(lambda k:np.abs(k*(1+k/(4*lambda_r**2))-Ct), bounds=[0,1.8], method='bounded')
vk[ir] = res.x
else:
vk = vCt
return vk
def InductionsFromCirculation_VC_Cont(vr_bar,lambda_r,k,bSwirl,method='analytical',bHighThrustCorr=True,Cq=None):
"""
Based on "Continuous formulation". Based on script fInductionCoefficientsCylinders.py
If Cq is provided, an attempt of using it is made.
Not recommended, results in lower Tangential inductions
The circulation obtained from Cq is not smooth
For now the continuous results seems a bit off on the axial induction compared to the discrete ones
The tangential induction seem to be the same for the continuous and discrete case
"""
# Critical values for high-thrust Spera correction
ac = 0.34
Ct_c = 4*ac*(1-ac)
misc={}
# Safety
if lambda_r[0]==0:
lambda_r[0]=lambda_r[1]*0.01;
# --- Finding Cylinders intensities
if method.lower()=='analytical':
if bSwirl:
a_prime = k/(4*lambda_r**2) # NOTE: entirely determined from Gamma! Eq.(38)
Ir=np.arange(len(k)-1,-1,-1)
Ct_rot = -8*sciint.cumtrapz(lambda_r[Ir]**2*a_prime[Ir]**2/vr_bar[Ir], vr_bar[Ir])# Eq.(6)
Ct_rot = np.concatenate(([0],Ct_rot))
Ct_rot=Ct_rot[Ir];
else:
a_prime = k*0
Ct_rot = k*0
Ct_KJ = k*(1+a_prime)
Ct_eff = Ct_KJ-Ct_rot
a=np.zeros(vr_bar.shape)
if bHighThrustCorr:
Icorr=Ct_eff> Ct_c
Inorm=Ct_eff<=Ct_c
# Spera correction
a[Icorr]=(Ct_eff[Icorr]-4*ac**2)/(4*(1-2*ac));
a[Inorm]=1/2*(1-np.sqrt(1-Ct_eff[Inorm]))
else:
a=1/2*(1-np.sqrt(1-Ct_eff));
# Using Cq
if Cq is not None:
a_2 = 1 - lambda_r*Cq/k
k2 = Cq*lambda_r/(1-a)
a_prime = k2/(4*lambda_r**2)
misc['k_Cq'] = k2
else:
raise NotImplementedError('')
misc['Ct_KJ'] = Ct_KJ
misc['Ct_eff'] = Ct_eff
misc['Ct_rot'] = Ct_rot
return a,a_prime,misc
def WakeVorticityFromCirculation_Cont(r_cp,Gamma_cp,R,U0,Omega,bSwirl,method='analytical',bHighThrustCorr=True):
"""
Uses continuous formulation to find wake vorticity that matches a given circulation distribution
(need to solve for pitch angle, and hence induction)
"""
r_cp = np.asarray(r_cp).ravel()
Gamma_cp = np.asarray(Gamma_cp).ravel()
if r_cp[0]==0:
r_cp[0]=r_cp[1]*0.5;
# Non dimensional parameters
k = Omega*Gamma_cp/(np.pi*U0**2)
vr_bar = r_cp/R
lambda_r = Omega*r_cp/U0
# Finding inductions
a,a_prime,misc= InductionsFromCirculation_VC_Cont(vr_bar,lambda_r,k,bSwirl,method=method,bHighThrustCorr=bHighThrustCorr)
# Computing convection
misc['Vz'] = U0*(1-2*a)
misc['h'] = misc['Vz']*2*np.pi/(Omega*(1+2*a_prime))
misc['a'] = a
misc['a_prime'] = a_prime
misc['Gamma_cp'] = Gamma_cp
misc['r_cp'] = r_cp
# Vortex intensities
Gamma_tilde = Gamma_cp - np.concatenate((Gamma_cp[1:],[0])) #Gamma_tilde = Gamma_i-Gamma_{i+1}
gamma_t = - Gamma_tilde/misc['h']
if bSwirl:
gamma_l = Gamma_tilde/(2*np.pi*r_cp)
Gamma_r = - Gamma_cp[0]
else:
gamma_l = 0
Gamma_r = 0
return gamma_t,gamma_l,Gamma_r,misc
def WakeVorticityFromCirculation_Discr(r_cp,Gamma_cp,R,U0,Omega,nB,bSwirl,method='analytical',bTanInd_k=True,bHighThrustCorr=True,r_cyl=None):
"""
Implements discrete algorithm from [1] to find gamma_t, and the inductions
"""
r_cp = np.asarray(r_cp).ravel()
Gamma_cp = np.asarray(Gamma_cp).ravel()
if r_cyl is None:
r_cyl = np.zeros(r_cp.shape)
if len(r_cyl)==1:
r_cyl=np.array([R])
else:
r_cyl[:-1] = (r_cp[1:] + r_cp[:-1])/2
r_cyl[-1] = r_cp[-1] + (r_cp[-1]-r_cp[-2])/2
else:
r_cyl = np.asarray(r_cyl).ravel()
misc={}
# --- Checks
if np.amax(r_cyl) < np.amax(r_cp):
warnings.warn('Cylinders are assumed to enclose the control points')
import pdb; pdb.set_trace()
if r_cyl[0] == 0:
raise Exception('First cylinder at 0, impossible')
## Interpolating Circulation on the control points
Gamma_cp = Gamma_cp * nB # IMPORTANT
lambda_rcp = Omega*r_cp/U0
k_r = Gamma_cp*Omega/(np.pi*U0**2) # Equation (27) of [Superp]
Ct_KJ_cp = k_r * (1+k_r / (4*lambda_rcp**2)) # Equation (32) of [Superp]
## Some init
n = len(r_cyl)
# Critical values for high-thrust Spera correction
ac = 0.34
Ct_c = 4*ac*(1-ac)
# --- Finding Cylinders intensities
if method.lower()=='analytical':
# --- Case 1: Using k / ct to determine gamma
# Using a_prime to correct Ct
# Tangential convection velocity
if not bTanInd_k:
ap_cyl_conv = 0 * Gamma_cp
else:
# Tangential convection velocity as the mean of velocity on both side of the cylinder)
# See Equation (26) of [1]: a'_c,i = (\Gamma_i+\Gamma_i+1)/(4 pi Omega R_i^2)
ap_cyl_conv = np.zeros(Gamma_cp.shape)
ap_cyl_conv[:-1] = (Gamma_cp[:-1] + Gamma_cp[1:])/(4*np.pi*Omega*r_cyl[:-1])**2
ap_cyl_conv[-1] = Gamma_cp[-1]/(4*np.pi*Omega*r_cyl[-1])**2
# --- Allocation
b = np.zeros(n) # Cummulative sum of gamma_bar
S = np.zeros(n) # Term under the square root
Sbis = np.zeros(n) # Term under the square root, account
Ctrot = np.zeros(n)
Ctrot_loc = np.zeros(n)
Cti = np.zeros(n)
Cteff = np.zeros(n)
gamma_t_bar = np.zeros(n)
# --- Init
C = Gamma_cp*Omega/(np.pi*U0**2)*(1 + ap_cyl_conv)
lambda_R = Omega * r_cyl / U0
# Looping through radii in reversed order
for i in | np.arange(n-1,-1,-1) | numpy.arange |
"""
Test DOE Driver and Generators.
"""
import unittest
import os
import os.path
import glob
import csv
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.paraboloid_distributed import DistParab
from openmdao.test_suite.groups.parallel_groups import FanInGrouped
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.general_utils import run_driver, printoptions
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class ParaboloidArray(om.ExplicitComponent):
"""
Evaluates the equation f(x,y) = (x-3)^2 + x*y + (y+4)^2 - 3.
Where x and y are xy[0] and xy[1] respectively.
"""
def setup(self):
self.add_input('xy', val=np.array([0., 0.]))
self.add_output('f_xy', val=0.0)
def compute(self, inputs, outputs):
"""
f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
"""
x = inputs['xy'][0]
y = inputs['xy'][1]
outputs['f_xy'] = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
class ParaboloidDiscrete(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=10, tags='xx')
self.add_discrete_input('y', val=0, tags='yy')
self.add_discrete_output('f_xy', val=0, tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = int(f_xy)
class ParaboloidDiscreteArray(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=np.ones((2, )), tags='xx')
self.add_discrete_input('y', val=np.ones((2, )), tags='yy')
self.add_discrete_output('f_xy', val=np.ones((2, )), tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = f_xy.astype(np.int)
class TestErrors(unittest.TestCase):
def test_generator_check(self):
prob = om.Problem()
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.FullFactorialGenerator)
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but a class object was found: FullFactorialGenerator")
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.Problem())
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but an instance of Problem was found.")
def test_lhc_criterion(self):
with self.assertRaises(ValueError) as err:
om.LatinHypercubeGenerator(criterion='foo')
self.assertEqual(str(err.exception),
"Invalid criterion 'foo' specified for LatinHypercubeGenerator. "
"Must be one of ['center', 'c', 'maximin', 'm', 'centermaximin', "
"'cm', 'correlation', 'corr', None].")
@use_tempdirs
class TestDOEDriver(unittest.TestCase):
def setUp(self):
self.expected_fullfact3 = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
def test_no_generator(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 0)
def test_list(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# create DOEDriver using provided list of cases
prob.driver = om.DOEDriver(cases)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_list_errors(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# data does not contain a list
cases = {'desvar': 1.0}
with self.assertRaises(RuntimeError) as err:
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
self.assertEqual(str(err.exception), "Invalid DOE case data, "
"expected a list but got a dict.")
# data contains a list of non-list
cases = [{'desvar': 1.0}]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n{'desvar': 1.0}")
# data contains a list of list, but one has the wrong length
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.y', 1., 'foo']]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n"
"[['p1.x', 1.0], ['p2.y', 1.0, 'foo']]")
# data contains a list of list, but one case has an invalid design var
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"'p2.z' is not a valid design variable:\n"
"[['p1.x', 1.0], ['p2.z', 1.0]]")
# data contains a list of list, but one case has multiple invalid design vars
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.y', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"['p1.y', 'p2.z'] are not valid design variables:\n"
"[['p1.y', 1.0], ['p2.z', 1.0]]")
def test_csv(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for (var, val) in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_csv_array(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', [0., 1.]))
model.add_subsystem('p2', om.IndepVarComp('y', [0., 1.]))
model.add_subsystem('comp1', Paraboloid())
model.add_subsystem('comp2', Paraboloid())
model.connect('p1.x', 'comp1.x', src_indices=[0])
model.connect('p2.y', 'comp1.y', src_indices=[0])
model.connect('p1.x', 'comp2.x', src_indices=[1])
model.connect('p2.y', 'comp2.y', src_indices=[1])
model.add_design_var('p1.x', lower=0.0, upper=1.0)
model.add_design_var('p2.y', lower=0.0, upper=1.0)
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = [
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 1.])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 16)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['p1.x'][0], expected_case['p1.x'][0])
self.assertEqual(outputs['p2.y'][0], expected_case['p2.y'][0])
self.assertEqual(outputs['p1.x'][1], expected_case['p1.x'][1])
self.assertEqual(outputs['p2.y'][1], expected_case['p2.y'][1])
def test_csv_errors(self):
# test invalid file name
with self.assertRaises(RuntimeError) as err:
om.CSVGenerator(1.23)
self.assertEqual(str(err.exception),
"'1.23' is not a valid file name.")
# test file not found
with self.assertRaises(RuntimeError) as err:
om.CSVGenerator('nocases.csv')
self.assertEqual(str(err.exception),
"File not found: nocases.csv")
# create problem and a list of DOE cases
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# test CSV file with an invalid design var
header = [var for var, _ in cases[0]]
header[-1] = 'foobar'
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case file, "
"'foobar' is not a valid design variable.")
# test CSV file with invalid design vars
header = [var + '_bad' for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case file, "
"%s are not valid design variables." %
str(header))
# test CSV file with invalid values
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([np.ones((2, 2)) * val for _, val in case])
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= LooseVersion("1.14"):
opts = {'legacy': '1.13'}
else:
opts = {}
with printoptions(**opts):
# have to use regex to handle differences in numpy print formats for shape
msg = f"Error assigning p1.x = \[ 0. 0. 0. 0.\]: could not broadcast " \
f"input array from shape \(4.*\) into shape \(1.*\)"
with self.assertRaisesRegex(ValueError, msg):
prob.run_driver()
def test_uniform(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=5, seed=0))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# all values should be between -10 and 10, check expected values for seed = 0
expected = [
{'x': np.array([0.97627008]), 'y': np.array([4.30378733])},
{'x': np.array([2.05526752]), 'y': np.array([0.89766366])},
{'x': np.array([-1.52690401]), 'y': np.array([2.91788226])},
{'x': np.array([-1.24825577]), 'y': np.array([7.83546002])},
{'x': np.array([9.27325521]), 'y': np.array([-2.33116962])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 5)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y'):
assert_near_equal(outputs[name], expected_case[name], 1e-4)
def test_full_factorial(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_full_factorial_factoring(self):
class Digits2Num(om.ExplicitComponent):
"""
Makes from two vectors with 2 elements a 4 digit number.
For singe digit integers always gives a unique output number.
"""
def setup(self):
self.add_input('x', val=np.array([0., 0.]))
self.add_input('y', val=np.array([0., 0.]))
self.add_output('f', val=0.0)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'] = x[0] * 1000 + x[1] * 100 + y[0] * 10 + y[1]
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', np.array([0.0, 0.0]))
model.set_input_defaults('y', np.array([0.0, 0.0]))
model.add_subsystem('comp', Digits2Num(), promotes=['*'])
model.add_design_var('x', lower=0.0, upper=np.array([1.0, 2.0]))
model.add_design_var('y', lower=0.0, upper=np.array([3.0, 4.0]))
model.add_objective('f')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
objs = [int(cr.get_case(case).outputs['f']) for case in cases]
self.assertEqual(len(objs), 16)
# Testing uniqueness. If all elements are unique, it should be the same length as the
# number of cases
self.assertEqual(len(set(objs)), 16)
def test_full_factorial_array(self):
prob = om.Problem()
model = prob.model
model.set_input_defaults('xy', np.array([0., 0.]))
model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])
model.add_design_var('xy', lower=np.array([-10., -50.]), upper=np.array([10., 50.]))
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'xy': np.array([-10., -50.])},
{'xy': np.array([0., -50.])},
{'xy': np.array([10., -50.])},
{'xy': np.array([-10., 0.])},
{'xy': np.array([0., 0.])},
{'xy': np.array([10., 0.])},
{'xy': np.array([-10., 50.])},
{'xy': np.array([0., 50.])},
{'xy': np.array([10., 50.])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['xy'][0], expected_case['xy'][0])
self.assertEqual(outputs['xy'][1], expected_case['xy'][1])
def test_full_fact_dict_levels(self):
# Specifying levels only for one DV, the other is defaulted
prob = om.Problem()
model = prob.model
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
# size = prob.comm.size
# rank = prob.comm.rank
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels={"y": 3}))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 6)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x'], expected_case['x'])
self.assertEqual(outputs['y'], expected_case['y'])
self.assertEqual(outputs['f_xy'], expected_case['f_xy'])
def test_generalized_subset(self):
# All DVs have the same number of levels
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels=2, reduction=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.0]), 'y': np.array([0.0]), 'f_xy': np.array([22.0])},
{'x': np.array([1.0]), 'y': np.array([1.0]), 'f_xy': np.array([27.0])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 2)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_generalized_subset_dict_levels(self):
# Number of variables specified individually for all DVs (scalars).
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels={'x': 3, 'y': 6}, reduction=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.])},
{'x': np.array([0.]), 'y': np.array([0.4]), 'f_xy': np.array([25.36])},
{'x': np.array([0.]), 'y': np.array([0.8]), 'f_xy': np.array([29.04])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.])},
{'x': np.array([1.]), 'y': np.array([0.4]), 'f_xy': np.array([20.76])},
{'x': np.array([1.]), 'y': np.array([0.8]), 'f_xy': np.array([24.84])},
{'x': np.array([0.5]), 'y': np.array([0.2]), 'f_xy': np.array([20.99])},
{'x': np.array([0.5]), 'y': np.array([0.6]), 'f_xy': np.array([24.71])},
{'x': np.array([0.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertAlmostEqual(outputs[name][0], expected_case[name][0])
def test_generalized_subset_array(self):
# Number of levels specified individually for all DVs (arrays).
class Digits2Num(om.ExplicitComponent):
"""
Makes from two vectors with 2 elements a 4 digit number.
For singe digit integers always gives a unique output number.
"""
def setup(self):
self.add_input('x', val=np.array([0., 0.]))
self.add_input('y', val=np.array([0., 0.]))
self.add_output('f', val=0.0)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'] = x[0] * 1000 + x[1] * 100 + y[0] * 10 + y[1]
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', np.array([0.0, 0.0]))
model.set_input_defaults('y', np.array([0.0, 0.0]))
model.add_subsystem('comp', Digits2Num(), promotes=['*'])
model.add_design_var('x', lower=0.0, upper=np.array([1.0, 2.0]))
model.add_design_var('y', lower=0.0, upper=np.array([3.0, 4.0]))
model.add_objective('f')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels={'x': 5, 'y': 8}, reduction=14))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
objs = [int(cr.get_case(case).outputs['f']) for case in cases]
self.assertEqual(len(objs), 104) # The number can be verified with standalone pyDOE2
# Testing uniqueness. If all elements are unique, it should be the same length as the number of cases
self.assertEqual(len(set(objs)), 104)
def test_plackett_burman(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.PlackettBurmanGenerator())
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_box_behnken(self):
upper = 10.
center = 1
prob = om.Problem()
model = prob.model
indep = model.add_subsystem('indep', om.IndepVarComp(), promotes=['*'])
indep.add_output('x', 0.0)
indep.add_output('y', 0.0)
indep.add_output('z', 0.0)
model.add_subsystem('comp', om.ExecComp('a = x**2 + y - z'), promotes=['*'])
model.add_design_var('x', lower=0., upper=upper)
model.add_design_var('y', lower=0., upper=upper)
model.add_design_var('z', lower=0., upper=upper)
model.add_objective('a')
prob.driver = om.DOEDriver(om.BoxBehnkenGenerator(center=center))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
# The Box-Behnken design for 3 factors involves three blocks, in each of
# which 2 factors are varied thru the 4 possible combinations of high & low.
# It also includes centre points (all factors at their central values).
# ref: https://en.wikipedia.org/wiki/Box-Behnken_design
self.assertEqual(len(cases), (3*4)+center)
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'z': np.array([5.])},
{'x': np.array([10.]), 'y': np.array([0.]), 'z': np.array([5.])},
{'x': np.array([0.]), 'y': np.array([10.]), 'z': np.array([5.])},
{'x': np.array([10.]), 'y': np.array([10.]), 'z': np.array([5.])},
{'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([0.])},
{'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([0.])},
{'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([10.])},
{'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([0.])},
{'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([0.])},
{'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([5.]), 'z': np.array([5.])},
]
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'z'):
self.assertEqual(outputs[name], expected_case[name])
def test_latin_hypercube(self):
samples = 4
bounds = np.array([
[-1, -10], # lower bounds for x and y
[1, 10] # upper bounds for x and y
])
xlb, xub = bounds[0][0], bounds[1][0]
ylb, yub = bounds[0][1], bounds[1][1]
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=xlb, upper=xub)
model.add_design_var('y', lower=ylb, upper=yub)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.options['generator'] = om.LatinHypercubeGenerator(samples=4, seed=0)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# the sample space for each variable should be divided into equal
# size buckets and each variable should have a value in each bucket
all_buckets = set(range(samples))
x_offset = - xlb
x_bucket_size = xub - xlb
x_buckets_filled = set()
y_offset = - ylb
y_bucket_size = yub - ylb
y_buckets_filled = set()
# expected values for seed = 0
expected = [
{'x': np.array([-0.19861831]), 'y': np.array([-6.42405317])},
{'x': np.array([0.2118274]), 'y': np.array([9.458865])},
{'x': np.array([0.71879361]), 'y': np.array([3.22947057])},
{'x': np.array([-0.72559325]), 'y': np.array([-2.27558409])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
x = outputs['x']
y = outputs['y']
bucket = int((x + x_offset) / (x_bucket_size / samples))
x_buckets_filled.add(bucket)
bucket = int((y + y_offset) / (y_bucket_size / samples))
y_buckets_filled.add(bucket)
assert_near_equal(x, expected_case['x'], 1e-4)
assert_near_equal(y, expected_case['y'], 1e-4)
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_array(self):
samples = 4
bounds = np.array([
[-10, -50], # lower bounds for x and y
[10, 50] # upper bounds for x and y
])
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('xy', np.array([50., 50.])), promotes=['*'])
model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])
model.add_design_var('xy', lower=bounds[0], upper=bounds[1])
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=4, seed=0))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# the sample space for each variable should be divided into equal
# size buckets and each variable should have a value in each bucket
all_buckets = set(range(samples))
xlb, xub = bounds[0][0], bounds[1][0]
x_offset = - xlb
x_bucket_size = xub - xlb
x_buckets_filled = set()
ylb, yub = bounds[0][1], bounds[1][1]
y_offset = - ylb
y_bucket_size = yub - ylb
y_buckets_filled = set()
# expected values for seed = 0
expected = [
{'xy': np.array([-1.98618312, -32.12026584])},
{'xy': np.array([2.118274, 47.29432502])},
{'xy': np.array([7.18793606, 16.14735283])},
{'xy': np.array([-7.25593248, -11.37792043])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
x = outputs['xy'][0]
y = outputs['xy'][1]
bucket = int((x + x_offset) / (x_bucket_size / samples))
x_buckets_filled.add(bucket)
bucket = int((y + y_offset) / (y_bucket_size / samples))
y_buckets_filled.add(bucket)
assert_near_equal(x, expected_case['xy'][0], 1e-4)
assert_near_equal(y, expected_case['xy'][1], 1e-4)
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_center(self):
samples = 4
upper = 10.
prob = om.Problem()
model = prob.model
indep = model.add_subsystem('indep', om.IndepVarComp())
indep.add_output('x', 0.0)
indep.add_output('y', 0.0)
model.add_subsystem('comp', Paraboloid())
model.connect('indep.x', 'comp.x')
model.connect('indep.y', 'comp.y')
model.add_design_var('indep.x', lower=0., upper=upper)
model.add_design_var('indep.y', lower=0., upper=upper)
model.add_objective('comp.f_xy')
prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=samples, criterion='c'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), samples)
# the sample space for each variable (0 to upper) should be divided into
# equal size buckets and each variable should have a value in each bucket
bucket_size = upper / samples
all_buckets = set(range(samples))
x_buckets_filled = set()
y_buckets_filled = set()
# with criterion of 'center', each value should be in the center of it's bucket
valid_values = [round(bucket_size * (bucket + 1 / 2), 3) for bucket in all_buckets]
for case in cases:
outputs = cr.get_case(case).outputs
x = float(outputs['indep.x'])
y = float(outputs['indep.y'])
x_buckets_filled.add(int(x/bucket_size))
y_buckets_filled.add(int(y/bucket_size))
self.assertTrue(round(x, 3) in valid_values, '%f not in %s' % (x, valid_values))
self.assertTrue(round(y, 3) in valid_values, '%f not in %s' % (y, valid_values))
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_record_bug(self):
# There was a bug that caused values to be recorded in driver_scaled form.
prob = om.Problem()
model = prob.model
ivc = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
ivc.add_output('x', val=1.)
model.add_subsystem('obj_comp', om.ExecComp('y=2*x'), promotes=['*'])
model.add_subsystem('con_comp', om.ExecComp('z=3*x'), promotes=['*'])
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.recording_options['includes'] = ['*']
model.add_design_var('x', lower=0., upper=10., ref=3.0)
model.add_constraint('z', lower=2.0, scaler=13.0)
model.add_objective('y', scaler=-1)
prob.setup(check=True)
prob.run_driver()
cr = om.CaseReader("cases.sql")
final_case = cr.list_cases('driver', out_stream=None)[-1]
outputs = cr.get_case(final_case).outputs
assert_near_equal(outputs['x'], 10.0, 1e-7)
assert_near_equal(outputs['y'], 20.0, 1e-7)
assert_near_equal(outputs['z'], 30.0, 1e-7)
def test_discrete_desvar_list(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', 5), ('y', 1)],
[('x', 3), ('y', 6)],
[('x', -1), ('y', 3)],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': 5, 'y': 1, 'f_xy': 31},
{'x': 3, 'y': 6, 'f_xy': 115},
{'x': -1, 'y': 3, 'f_xy': 59},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
self.assertTrue(isinstance(outputs[name], int))
def test_discrete_desvar_alltypes(self):
# Make sure we can handle any allowed type for discrete variables.
class PassThrough(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val='abc')
self.add_discrete_output('y', val='xyz')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
discrete_outputs['y'] = discrete_inputs['x']
prob = om.Problem()
model = prob.model
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 'abc')
model.add_subsystem('parab', PassThrough(), promotes=['*'])
model.add_design_var('x')
model.add_constraint('y')
my_obj = Paraboloid()
samples = [[('x', 'abc'), ],
[('x', None), ],
[('x', my_obj, ), ]
]
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = ['abc', None]
for case, expected_value in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x'], expected_value)
# Can't read/write objects through SQL case.
self.assertEqual(prob['y'], my_obj)
def test_discrete_array_output(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', np.ones((2, ), dtype=np.int))
indeps.add_discrete_output('y', np.ones((2, ), dtype=np.int))
# Add components
model.add_subsystem('parab', ParaboloidDiscreteArray(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x', np.array([5, 1]))
model.add_design_var('y', np.array([1, 4]))
model.add_objective('f_xy')
recorder = om.SqliteRecorder("cases.sql")
prob.driver.add_recorder(recorder)
prob.add_recorder(recorder)
prob.recording_options['record_inputs'] = True
prob.setup()
prob.run_driver()
prob.record("end")
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('problem', out_stream=None)
case = cr.get_case('end')
inputs = case.inputs
outputs = case.outputs
for name in ('x', 'y'):
self.assertTrue(isinstance(inputs[name], np.ndarray))
self.assertTrue(inputs[name].shape, (2,))
self.assertTrue(isinstance(outputs[name], np.ndarray))
self.assertTrue(outputs[name].shape, (2,))
def test_discrete_arraydesvar_list(self):
prob = om.Problem()
model = prob.model
# Add components
model.add_subsystem('parab', ParaboloidDiscreteArray(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', np.array([5, 1])), ('y', np.array([1, 4]))],
[('x', np.array([3, 2])), ('y', np.array([6, -3]))],
[('x', np.array([-1, 0])), ('y', np.array([3, 5]))],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.set_val('x', np.ones((2, ), dtype=np.int))
prob.set_val('y', np.ones((2, ), dtype=np.int))
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': np.array([5, 1]), 'y': np.array([1, 4]), 'f_xy': np.array([31, 69])},
{'x': np.array([3, 2]), 'y': np.array([6, -3]), 'f_xy': np.array([115, -7])},
{'x': np.array([-1, 0]), 'y': np.array([3, 5]), 'f_xy': np.array([59, 87])},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name][0], expected_case[name][0])
self.assertEqual(outputs[name][1], expected_case[name][1])
def test_discrete_desvar_csv(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = '\n'.join([" x , y",
"5, 1",
"3, 6",
"-1, 3",
])
# this file contains design variable inputs in CSV format
with open('cases.csv', 'w') as f:
f.write(samples)
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': 5, 'y': 1, 'f_xy': 31},
{'x': 3, 'y': 6, 'f_xy': 115},
{'x': -1, 'y': 3, 'f_xy': 59},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
self.assertTrue(isinstance(outputs[name], int))
def test_desvar_indices(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x**2',
x=np.array([1., 2., 3.]),
y= | np.zeros(3) | numpy.zeros |
#
# Tests the utility functions.
#
import numpy as np
import os
import pybamm
import unittest
class TestUtil(unittest.TestCase):
"""
Test the functionality in util.py
"""
def test_load_function(self):
# Test filename ends in '.py'
with self.assertRaisesRegex(
ValueError, "Expected filename.py, but got doesnotendindotpy"
):
pybamm.load_function("doesnotendindotpy")
# Test exception if absolute file not found
with self.assertRaisesRegex(
ValueError, "is an absolute path, but the file is not found"
):
nonexistent_abs_file = os.path.join(os.getcwd(), "i_dont_exist.py")
pybamm.load_function(nonexistent_abs_file)
# Test exception if relative file not found
with self.assertRaisesRegex(
ValueError, "cannot be found in the PyBaMM directory"
):
pybamm.load_function("i_dont_exist.py")
# Test exception if relative file found more than once
with self.assertRaisesRegex(
ValueError, "found multiple times in the PyBaMM directory"
):
pybamm.load_function("__init__.py")
# Test exception if no matching function found in module
with self.assertRaisesRegex(ValueError, "No function .+ found in module .+"):
pybamm.load_function("process_symbol_bad_function.py")
# Test function load with absolute path
abs_test_path = os.path.join(
os.getcwd(),
"tests",
"unit",
"test_parameters",
"data",
"process_symbol_test_function.py",
)
self.assertTrue(os.path.isfile(abs_test_path))
func = pybamm.load_function(abs_test_path)
self.assertEqual(func(2), 246)
# Test function load with relative path
func = pybamm.load_function("process_symbol_test_function.py")
self.assertEqual(func(3), 369)
def test_rmse(self):
self.assertEqual(pybamm.rmse(np.ones(5), np.zeros(5)), 1)
self.assertEqual(pybamm.rmse(2 * np.ones(5), np.zeros(5)), 2)
self.assertEqual(pybamm.rmse(2 * | np.ones(5) | numpy.ones |
import math
import numpy as np
import os
import random
from scipy.stats import mode
def bald(X_Pool_Dropout, num_classes, model, batch_size=32, dropout_iterations=10):
print (X_Pool_Dropout[0].shape)
score_All = np.zeros(shape=(X_Pool_Dropout[0].shape[0], num_classes))
All_Entropy_Dropout = | np.zeros(shape=X_Pool_Dropout[0].shape[0]) | numpy.zeros |
"""SMAL Renderer object responsible for visualising a SMAL sequence"""
from data.data_loader import extract_npz, DataSources
from vis.mesh_viewer import BBox
import torch
from smal_fitter.smbld_model.smbld_mesh import SMBLDMesh
from scipy import signal
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.tri import Triangulation
from vis.utils import save_animation
from tqdm import tqdm
import os
path_join = os.path.join
class SMALRenderer:
out_dir = r"C:\Users\Ollie\Videos\iib_project\KDN meshes"
def __init__(self, src, freq=30, crop=None, smooth=False):
self.src = src
self.freq = freq
data = extract_npz(path_join(DataSources.smal_outputs, src + ".npz"))
self.data = data
self.img_names = data['imgname']
self.has_bbox = data['has_bbox']
n_frames, *_ = data['pose'].shape
self.n_frames = n_frames
pose = torch.from_numpy(data['pose'].reshape(n_frames, 35, 3))
smbld = SMBLDMesh(n_batch=n_frames)
if smooth:
pose = torch.from_numpy(signal.savgol_filter(pose, window_length=9, polyorder=5, axis=0)) # smooth pose
else:
pose = pose # smooth pose
self.n_betas = 26
mean_betas = data['betas'].mean(axis=0).astype(np.float64)
for n, b in enumerate(mean_betas):
smbld.multi_betas[n] = b
# temp fix to add global rotation if desired
# j = 2
# smalx.global_rot[:, j] = np.pi - pose[:, 0, j] # only keep z rotation globally
smbld.joint_rot[:] = pose[:, 1:]
smbld.joint_rot[:, 0] *= 0 # rear-to-front rotation to 0 for now
v, f, J = smbld.get_verts(return_joints=True)
if crop is not None:
crop_frames = int(crop * freq)
v = v[:crop_frames]
J = J[crop_frames]
self.n_frames = crop_frames
self.v, self.f, self.J = v.detach().numpy(), f, J.detach().numpy()
def plot(self, azim=90, elev=0, playback=1.0, ext=""):
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
X, Y, Z = np.swapaxes(self.v[0], 0, 1)
b = BBox(X, Y, Z)
b.equal_3d_axes(ax, zoom=1.75)
ax.axis("off")
ax.view_init(azim=azim, elev=elev)
# PLOT FACES
tri = Triangulation(X, Y, triangles=self.f)
p = tqdm(total=self.n_frames)
plot = [ax.plot([], [])[0]]
def anim(i):
plot[0].remove()
X, Y, Z = | np.swapaxes(self.v[i], 0, 1) | numpy.swapaxes |
r"""
Prototype for object model backend for the libNeuroML project
"""
import numpy as np
import neuroml
class ArrayMorphology(neuroml.Morphology):
"""Core of the array-based object model backend.
Provides the core arrays - vertices,connectivity etc.
node_types.
The connectivity array is a list of indices pointing to which
other element an element is attached. So for instance,
connectivity[3] is an integer with the index of the section
it refers to in the Backend
- EXAMPLE:
Vertices[3] and connectivity[3] refer to the vertex
and connectivity of the same node.
.. note::
The root section by convention has connectivity == -1.
"""
def __init__(
self,
vertices=[],
connectivity=[],
id=None,
node_types=None,
name=None,
physical_mask=None,
fractions_along=None,
):
super(ArrayMorphology, self).__init__()
self.connectivity = np.array(connectivity)
self.vertices = np.array(vertices)
self.id = id
if np.any(physical_mask):
self.physical_mask = np.array(physical_mask)
else:
self.physical_mask = np.zeros(len(connectivity), dtype="bool")
if np.any(node_types):
self.node_types = np.array(node_types)
else:
self.node_types = np.zeros(len(connectivity), dtype="int32")
if np.any(fractions_along):
self.fractions_along = np.array(fractions_along)
else:
self.fractions_along = np.zeros(len(connectivity), dtype="int32")
# it will need a reference to its parent?
self.segments = SegmentList(self)
assert self.valid_morphology, "invalid_morphology"
@property
def valid_morphology(self):
all_nodes = self.__all_nodes_satisfied
all_vertices = self.__all_vertices_present
return all_nodes and all_vertices
@property
def __all_vertices_present(self):
try:
all_vertices_present = self.vertices.shape[1] == 4
except:
all_vertices_present = False
num_vertices = len(self.vertices)
return all_vertices_present or num_vertices == 0
@property
def valid_ids(self):
valid_flag = True
for internal_id in self.segments.instantiated_segments.keys():
external_id = self.segments.instantiated_segments[internal_id].id
valid_flag = (internal_id == external_id) * valid_flag
return valid_flag
@property
def __all_nodes_satisfied(self):
m = self.vertices.shape[0]
n = self.connectivity.shape[0]
p = self.node_types.shape[0]
all_nodes_satisfied = m == n == p
return all_nodes_satisfied
@property
def root_index(self):
return np.where(self.connectivity == -1)[0][0]
@property
def root_vertex(self):
return self.vertices[self.root_index]
@property
def num_vertices(self):
return len(self.vertices)
@property
def physical_indices(self):
"""returns indices of vertices which are physical"""
physical_indices = np.where(self.physical_mask == 0)[0]
return physical_indices
def children(self, index):
"""Returns an array with indexes of children"""
return np.where(self.connectivity == index)
def to_root(self, index):
"""
Changes the connectivity matrix
so that the node at index becomes the root
"""
old_root_index = self.root_index
new_root_index = index
# do a tree traversal:
parent_index = self.connectivity[index]
grandparent_index = self.connectivity[parent_index]
while index != old_root_index:
self.connectivity[parent_index] = index
index = parent_index
parent_index = grandparent_index
grandparent_index = self.connectivity[parent_index]
self.connectivity[new_root_index] = -1
def parent_id(self, index):
"""Return the parent index for the given index"""
return self.connectivity[index]
def vertex(self, index):
"""Return vertex corresponding to index in morphology"""
return self.vertices[index]
def __len__(self):
return len(self.connectivity)
def pop(self, index):
"""
TODO:This is failing tests (understandably) - need to fix!
Deletes a node from the morphology, its children become
children of the deleted node's parent.
"""
self.vertices = np.delete(self.vertices, index)
self.node_types = np.delete(self.node_types, index)
self.connectivity = np.delete(self.connectivity, index)
k = 0
for i in self.connectivity:
if i >= index:
self.connectivity[k] = i - 1
k += 1
pass
def to_neuroml_morphology(self, id=""):
morphology = neuroml.Morphology()
morphology.id = id
# need to traverse the tree:
for index in range(self.num_vertices - 1):
seg = self.segment_from_vertex_index(index)
morphology.segments.append(seg)
return morphology
def segment_from_vertex_index(self, index):
parent_index = self.connectivity[index]
node_x = self.vertices[index][0]
node_y = self.vertices[index][1]
node_z = self.vertices[index][2]
node_d = self.vertices[index][3]
parent_x = self.vertices[parent_index][0]
parent_y = self.vertices[parent_index][1]
parent_z = self.vertices[parent_index][2]
parent_d = self.vertices[parent_index][3]
p = neuroml.Point3DWithDiam(x=node_x, y=node_y, z=node_z, diameter=node_d)
d = neuroml.Point3DWithDiam(
x=parent_x, y=parent_y, z=parent_z, diameter=parent_d
)
seg = neuroml.Segment(proximal=p, distal=d, id=index)
if index > 1:
parent = neuroml.SegmentParent(segments=parent_index)
seg.parent = parent
return seg
class SegmentList(object):
"""
This class is a proxy, it returns a segment either
from the arraymorph or if it has already been instantiated
it returns the relevant segment.
"""
def __init__(self, arraymorph):
self.arraymorph = arraymorph
self.instantiated_segments = {}
def __vertex_index_from_segment_index__(self, index):
"""
The existence of a physical mask means that segment and
and vertex indices fall out of sync. This function returns the
index of the proximal vertex in the vertices array of the arraymorph
which corresponds to the segment index.
"""
physical_mask = self.arraymorph.physical_mask
segment_distal_vertex_indexes = np.where(physical_mask == False)[0] + 1
return segment_distal_vertex_indexes[index]
def __len__(self):
"""
Override the __len__ magic method to give total numer of
segments which is number of vertices - 1 and minus all
floating segments.
"""
num_vertices = self.arraymorph.num_vertices
num_floating = np.sum(self.arraymorph.physical_mask)
num_segments = num_vertices - num_floating - 1
if num_segments < 0:
num_segments = 0
return int(num_segments)
def __iadd__(self, segment_list):
for segment in segment_list:
self.append(segment)
return self
def __getitem__(self, segment_index):
if segment_index in self.instantiated_segments:
neuroml_segment = self.instantiated_segments[segment_index]
else:
vertex_index = self.__vertex_index_from_segment_index__(segment_index)
neuroml_segment = self.arraymorph.segment_from_vertex_index(vertex_index)
self.instantiated_segments[segment_index] = neuroml_segment
return neuroml_segment
def __setitem__(self, index, user_set_segment):
self.instantiated_segments[index] = user_set_segment
def append(self, segment):
"""
Adds a new segment
TODO: Correct connectivity is currently being ignored -
The new segment is always connected to the root node.
"""
dist_vertex_index = len(self.arraymorph.vertices)
prox_vertex_index = dist_vertex_index + 1
prox_x = segment.proximal.x
prox_y = segment.proximal.y
prox_z = segment.proximal.z
prox_diam = segment.proximal.diameter
dist_x = segment.distal.x
dist_y = segment.distal.y
dist_z = segment.distal.z
distal_diam = segment.distal.diameter
prox_vertex = [prox_x, prox_y, prox_z, prox_diam]
dist_vertex = [dist_x, dist_y, dist_z, distal_diam]
if len(self.arraymorph.vertices) > 0:
self.arraymorph.vertices = np.append(
self.arraymorph.vertices, [dist_vertex, prox_vertex], axis=0
)
else:
self.arraymorph.vertices = np.array([dist_vertex, prox_vertex])
self.arraymorph.connectivity = np.append(
self.arraymorph.connectivity, [-1, dist_vertex_index]
)
if len(self.arraymorph.physical_mask) == 0:
self.arraymorph.physical_mask = | np.array([0, 0]) | numpy.array |
# import python libraries
import glob
import numpy as np
import cv2
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from skimage.feature import hog
from scipy.ndimage.measurements import label
### grab images from a directory
### returns np array of images
def read_image_dir(path):
Images = np.array([plt.imread(file) for file in glob.glob(path + '*')])
return Images
### apply color thresholds to image
### returns binarized image
def binarize(img, sobelx_thresh, sobely_thresh, saturation_thresh, value_thresh):
# grab S channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
# grab v channel
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
v_channel = hsv[:, :, 2]
# grab grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# take derivative of the gradient w.r.t. X
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
# take derivative of the gradient w.r.t. Y
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# take absolute value of the derivatives
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# rescale back to 255
scaled_sobelx = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
scaled_sobely = np.uint8(255 * abs_sobely / np.max(abs_sobely))
# translate pixels in each filtered image to binary values
sobelx_binary = | np.zeros_like(scaled_sobelx) | numpy.zeros_like |
import os
import pickle
import sys
import matplotlib.pyplot as plt
import numpy as np
from utils.dumpLoaders import RFdataLoader
# sample size
n_sample = 100
n_classes = 1000
exts = ("png", "pdf")
def main(
analysis_root,
rf_root,
model_names,
labels,
key_layer_and_chmax_blockmax=None,
vmin=None,
vmax=None,
out_dir=None,
):
resnet34_model_names, plainnet34_model_names = model_names
labels_resnets, labels_plainnets = labels
keywords = {"channel", "config", "config-script"}
rfdataloader = RFdataLoader(analysis_root, rf_root, keywords=keywords)
data_resnets = {}
data_plainnets = {}
if out_dir is None:
out_dir = "analysis_numberInClass"
# ResNet34
if key_layer_and_chmax_blockmax is None:
key_layer_and_chmax_blockmax = [
("maxpool", 64, None),
("layer1", 64, 3),
("layer2", 128, 4),
("layer3", 256, 6),
("layer4", 512, 3),
]
for key_layer, ch_max, block_max in key_layer_and_chmax_blockmax:
def get_top_act_classes(model_name, key_layer, block_id):
rfdataloader.set_vis_layer(model_name, key_layer, block_id)
top_act_classes = []
for ch in range(ch_max):
rfdataloader.set_ch_data(ch)
activation_indeces = rfdataloader.activation_indeces
if activation_indeces is not None:
img_idxs = activation_indeces[0][:n_sample]
top_act_classes.append(
np.array(rfdataloader.dataset.targets)[img_idxs]
)
top_act_classes = np.asarray(top_act_classes)
return top_act_classes.copy()
block_list = (
range(block_max)
if block_max is not None
else [
None,
]
)
for block_id in block_list:
print(key_layer, block_id)
for model_name in resnet34_model_names:
res_top_act_classes = get_top_act_classes(
model_name, key_layer, block_id
)
res_uniques = [
np.unique(x, return_counts=True) for x in res_top_act_classes
]
res_unique_len = np.asarray([len(unique) for unique, _ in res_uniques])
key = "{}-{}-{}".format(model_name, key_layer, block_id)
data_resnets[key] = [res_top_act_classes, res_uniques, res_unique_len]
for model_name in plainnet34_model_names:
res_top_act_classes = get_top_act_classes(
model_name, key_layer, block_id
)
res_uniques = [
np.unique(x, return_counts=True) for x in res_top_act_classes
]
res_unique_len = np.asarray([len(unique) for unique, _ in res_uniques])
key = "{}-{}-{}".format(model_name, key_layer, block_id)
data_plainnets[key] = [res_top_act_classes, res_uniques, res_unique_len]
show(
data_resnets,
data_plainnets,
labels_resnets,
labels_plainnets,
key_layer,
block_id,
block_max,
ch_max,
vmin=vmin,
vmax=vmax,
out_dir=out_dir,
)
# save datas
if len(resnet34_model_names) > 0:
config = {
"model_names": resnet34_model_names,
}
save_datas(data_resnets, config, "data_resnets.pkl", out_dir=out_dir)
if len(plainnet34_model_names) > 0:
config = {
"model_names": plainnet34_model_names,
}
save_datas(data_plainnets, config, "data_plainnets.pkl", out_dir=out_dir)
def show(
data_resnets,
data_plainnets,
labels_resnets,
labels_plainnets,
key_layer,
block_id,
block_max,
ch_max,
show_flag=False,
out_dir="analysis_numberInClass",
vmin=None,
vmax=None,
):
res_unique_lens = []
for _, item in data_resnets.items():
res_unique_lens.append(item[2])
res_unique_lens = np.asarray(res_unique_lens)
plain_unique_lens = []
for _, item in data_plainnets.items():
plain_unique_lens.append(item[2])
plain_unique_lens = np.asarray(plain_unique_lens)
if vmin is None or vmax is None:
vmin = n_classes + 1
vmax = -1
for _, item in data_resnets.items():
_vmin = item[2].min()
_vmax = item[2].max()
if _vmin < vmin:
vmin = _vmin
if _vmax > vmax:
vmax = _vmax
for _, item in data_plainnets.items():
_vmin = item[2].min()
_vmax = item[2].max()
if _vmin < vmin:
vmin = _vmin
if _vmax > vmax:
vmax = _vmax
block_list = (
range(block_max)
if block_max is not None
else [
None,
]
)
for block_id in block_list:
res_unique_lens = []
for key, item in data_resnets.items():
if "{}-{}".format(key_layer, block_id) in key:
res_unique_lens.append(item[2])
res_unique_lens = | np.asarray(res_unique_lens) | numpy.asarray |
import os as os
import matplotlib.pyplot as plt
import numpy as np
def get_histogram(plume, bins):
n, bins = np.histogram(plume, bins=bins, density=True)
return n
def plot_side_byside_plumes(ax, x_vals, plume_list, label_list, style_array, color_array, lw,
show_legend=True, legend_loc='best'):
loop_array = range(len(plume_list))
for i in loop_array:
ax.plot(x_vals, plume_list[i], label=label_list[i], color=color_array[i],
linestyle=style_array[i], lw=lw)
if show_legend:
ax.legend(loc=legend_loc, fontsize=13)
else:
ax.set_ylabel('particle density')
ax.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2, 2))
ax.set_xlabel(r'$x/l_Y$')
# specify data and figure save directories
paper_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_data_folder = os.path.join(paper_folder, 'data', 'stencil_dtmvp_comparison')
fig_save_folder = os.path.join(paper_folder, 'plots')
plt.rcParams.update({'font.size': 20})
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
plt.rcParams.update({'figure.autolayout': True})
plt.rc('text', usetex=True)
plt.rc('font',**{'family':'serif','serif':['Stix']})
legend_size = 13
fmt='pdf'
dt_coefficients = [1, 4, 10, 20]
for struct in ['exp', 'gauss']:
# load data
save_path = os.path.join(git_data_folder, struct + '_stencil_long_compare.npz')
data = | np.load(save_path) | numpy.load |
# -- VERY PRELIMINARY!!!! -----
import numpy as np
try:
from matplotlib import path
except(ImportError):
pass
region_types = ['circle', 'ellipse', 'polygon', 'box']
def load_regfile(regfile):
reglist = []
f = open(regfile, 'r')
for line in f:
istype = [r in line for r in region_types]
if True in istype:
rtype = region_types[np.where(istype)[0][0]]
numstring = line[line.find("(")+1:line.find(")")].replace('"', '')
if rtype == 'ellipse':
reglist.append(Ellipse(numstring))
elif rtype == 'circle':
reglist.append(Circle(numstring))
elif rtype == 'polygon':
reglist.append(Polygon(numstring))
f.close()
return reglist
def get_image_angle(wcs, at_coord=None):
"""
:returns angle:
In radians, degrees East of North
"""
if at_coord is None:
lon, lat = wcs.wcs.crval[:2]
else:
lon, lat = at_coord
x, y = world2pix(wcs, lon, lat)
lat_offset = lat + 1./3600.
xoff, yoff = world2pix(wcs, lon, lat_offset)
angle = np.arctan2(xoff-x, yoff-y)
try:
return angle[0]
except(TypeError, IndexError):
return angle
def world2pix(wcs, lon, lat, **extras):
"""Wrap the wcs world2pix method to be less finicky and take our defaults.
"""
try:
x, y = wcs.wcs_world2pix(lon, lat, 0)
except(TypeError):
x, y, _ = wcs.wcs_world2pix(lon, lat, np.array([0]), 0)
return x, y
class Region(object):
def __init__(self, defstring):
self.parse(defstring)
def parse(self, defstring):
raise(NotImplementedError)
@property
def unparse(self):
raise(NotImplementedError)
def plate_scale(self, wcs):
try:
pscale = 3600. * np.sqrt((wcs.wcs.cd[:2, :2]**2).sum(axis=1))
except (AttributeError):
pscale = 3600. * np.abs(wcs.wcs.cdelt[:2])
return pscale.mean()
def print_to(self, fileobj=None, color='green', label=''):
fstring = 'fk5;{0}({1}) # color={2} text={{{3}}}\n'
line = fstring.format(self.shape, self.unparse, color, label)
if fileobj is None:
print(line)
else:
fileobj.write(line)
class Circle(Region):
shape = 'circle'
def parse(self, defstring):
bits = defstring.split(',')
self.ra = float(bits[0])
self.dec = float(bits[1])
self.radius = float(bits[2])
@property
def unparse(self):
return ','.join([str(a) for a in [self.ra, self.dec, self.radius]])
def contains(self, points=None, x=None, y=None, wcs=None, **extras):
if wcs is not None:
# Should actually do the plate scale separately for x and y
# and use great circle distances?
plate_scale = self.plate_scale(wcs)
r = self.radius / plate_scale
cx, cy = world2pix(wcs, self.ra, self.dec)
else:
raise ValueError('No WCS given!!!')
if points is not None:
x, y = points.T
dx, dy = x-cx, y-cy
return (dx**2 + dy**2) <= r**2
class Ellipse(Region):
shape = 'ellipse'
def parse(self, defstring):
bits = defstring.split(',')
self.ra = float(bits[0]) #degrees
self.dec = float(bits[1]) #degrees
self.a = float(bits[2]) #arcsec
self.b = float(bits[3]) #arcsec
self.pa = float(bits[4]) #degrees East of North for a
#swap a and b if poorly defined
if self.b > self.a:
self.a, self.b = self.b, self.a
self.pa = self.pa + 90.
@property
def unparse(self):
return ','.join([str(a) for a in [self.ra, self.dec, self.a, self.b, self.pa]])
def contains(self, points=None, x=None, y=None, wcs=None, **extras):
"""
Determine whether pixel locations x,y are within the ellipse.
Requires that a WCS be given. Assumes tangent projection, and
will fail for large ellipses/images. Assumes N is up and E to
the left.
x, and y are zero indexed, and can be produced by flattened versions of
`y, x = np.indices(image)`
"""
if wcs is not None:
cx, cy = world2pix(wcs, self.ra, self.dec)
# should actually do the plate scale separately for x and y
# and use great circle distances?
plate_scale = self.plate_scale(wcs)
a, b = self.a / plate_scale, self.b / plate_scale
theta_image = get_image_angle(wcs)
else:
raise ValueError('No WCS given!!!')
if points is not None:
x, y = points.T
# --- Translate ---
dx, dy = x - cx, y - cy
# --- Rotate ---
# convert to radians from positive x
theta = | np.deg2rad(self.pa) | numpy.deg2rad |
import os,json
import numpy as np
from mpi4py import MPI
from bond_lattice import bond_lattice
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Read input file
if rank == 0:
with open("input.json", "r") as f:
input_data = json.load(f)
else:
input_data = None
input_data = comm.bcast(input_data, root=0)
procs_per_worker = input_data['workers_per_value']
if size % procs_per_worker!=0:
comm.Barrier()
print("NONFACTORIZING PROCS OR WORKERS! EXITING!")
MPI.Finalize()
exit()
bins = input_data['md']['bins']
N = input_data['md']['n']
THERM = input_data['md']['therm']
STEPS = input_data['md']['steps']
D0 = input_data['potential']['D0']
a0 = input_data['potential']['a0']
AL = input_data['potential']['AL']
rlim = [input_data['potential']['r_min'], input_data['potential']['r_max']]
am_array = input_data['am_array']
T_array = input_data['t_array']
RT_array = input_data['RT_array']
seed = int(1000.0*np.random.uniform())
# Histgram properties
dump_folder = input_data['dump_folder']
potname = "M"
if rank==0:
os.system("mkdir -p %s" % dump_folder)
kb = 8.617e-5
# job splitting. Will iterate over all T,am,AL combinations
E_array=[]
am_RT_T_array = []
for am in am_array:
for RT in RT_array:
for T in T_array:
am_RT_T_array.append([am,RT,T])
color = rank // procs_per_worker
lrank = rank % procs_per_worker
nworkers = size // procs_per_worker
njobs = len(am_RT_T_array)
jobs_per_worker = njobs // nworkers
c=0
jl=np.arange(nworkers+1) * jobs_per_worker
jl[-1] = njobs
if lrank==0:
for i in np.arange(jl[color],jl[color+1]):
am,RT,T = am_RT_T_array[i]
lcomm = comm.Split(color,lrank)
comm.Barrier()
"""
CorrelationType = 0 : Just |b_perp|,b_para,|b| marginals
CorrelationType = 1 : += |b_perp|,b_para joint distribution
CorrelationType = 2 : += b_para joint dist. for (l,l+7) and (l,l+1)
CorrelationType > 0 can give quite large files, GB of data in a batch
"""
CorrelationType = 2 * input_data["JointHist"]
for ji in np.arange(jl[color],jl[color+1]):
am,RT,T = am_RT_T_array[ji]
sim_pbc = bond_lattice(a0=a0,D=D0,AL=AL,RT=RT,N=N,fcc=True,CorrelationType=CorrelationType,\
min_r=rlim[0],max_r=rlim[1],\
bins=bins,am=1.0,steps=STEPS,therm_steps=THERM,seed=seed+rank,rank=rank,\
CentralForce=False)
dump_string = "N%d_S%dk_RT%f_T%f_a%f_%s" % (N,STEPS//1000,RT,T,am,potname)
# collect data from each worker
if CorrelationType>0:
r,H,UmU,CH = sim_pbc.run(T=T*kb,am=am)
else:
r,H,UmU = sim_pbc.run(T=T*kb,am=am)
# Take value and square value for ensemble averages
Ums = np.zeros(12)
Ums[:6] = UmU[-6:]
Ums[-6:] = UmU[-6:]**2
BondPotential = UmU[:-6]
lcomm.Barrier()
data = lcomm.gather(H, root=0)
Udata = lcomm.gather(Ums, root=0)
if CorrelationType>0:
cdata = lcomm.gather(CH, root=0)
if lrank == 0:
H = data[0]
meanstdU = Udata[0] / procs_per_worker
for i in range(1,procs_per_worker):
meanstdU += Udata[i] / procs_per_worker
H += data[i]
meanstdU[-6:] = | np.sqrt(meanstdU[-6:]-meanstdU[:6]**2) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 4 18:13:18 2018
@author: matteo
"""
"""References
PGPE: Sehnke, Frank, et al. "Policy gradients with parameter-based exploration for
control." International Conference on Artificial Neural Networks. Springer,
Berlin, Heidelberg, 2008.
"""
import numpy as np
from baselines import logger
import warnings
from contextlib import contextmanager
import time
from baselines.common import colorize
@contextmanager
def timed(msg):
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
def eval_trajectory(env, pol, gamma, horizon, feature_fun):
ret = disc_ret = 0
t = 0
ob = env.reset()
done = False
while not done and t<horizon:
s = feature_fun(ob) if feature_fun else ob
a = pol.act(s)
ob, r, done, _ = env.step(a)
ret += r
disc_ret += gamma**t * r
t+=1
return ret, disc_ret, t
#BINARY line search
def line_search_binary(pol, newpol, actor_params, rets, alpha, natgrad,
normalize=True,
use_rmax=True,
use_renyi=True,
max_search_ite=30, rmax=None, delta=0.2, reassign=None):
rho_init = newpol.eval_params()
bound_init = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta)
n_bounds = len(bound_init)
low = np.zeros(n_bounds)
high = np.nan * np.ones(n_bounds)
#old_delta_bound = 0.
rho_opt = rho_init
i_opt = 0.
delta_bound_opt = np.zeros(n_bounds)
epsilon_opt = np.zeros(n_bounds)
epsilon = np.ones(n_bounds)
if max_search_ite<=0:
rho = rho_init + alpha*natgrad
newpol.set_params(rho)
delta_bound = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta) - bound_init
return rho, np.ones(len(epsilon)), delta_bound, 0
for i in range(max_search_ite):
rho = rho_init + reassign(epsilon) * natgrad * alpha
newpol.set_params(rho)
bound = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta)
delta_bound = bound - bound_init
cond = np.logical_or(delta_bound<=delta_bound_opt, np.isnan(bound))
cond = np.logical_not(cond)
if np.any(np.isnan(bound)):
warnings.warn('Got NaN bound value')
delta_bound = np.where(np.isnan(delta_bound), -np.inf*np.ones(n_bounds), delta_bound)
high = np.where(cond, high, epsilon)
low = np.where(cond, epsilon, low)
rho_opt = np.where(reassign(cond), rho, rho_opt)
if np.any(delta_bound>delta_bound_opt):
i_opt = i
delta_bound_opt = np.where(cond, delta_bound, delta_bound_opt)
epsilon_opt = np.where(cond, epsilon, epsilon_opt)
old_epsilon = epsilon
epsilon = np.where(np.isnan(high), 2*epsilon, (low + high)/2)
if np.linalg.norm(old_epsilon - epsilon) < 1e-6:
break
return rho_opt, epsilon_opt, delta_bound_opt, i_opt+1
def line_search_parabola(pol, newpol, actor_params, rets, alpha, natgrad,
normalize=True,
use_rmax=True,
use_renyi=True,
max_search_ite=30, rmax=None, delta=0.2, reassign=None):
rho_init = newpol.eval_params()
bound_init = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta)
n_bounds = len(bound_init)
epsilon = np.ones(n_bounds)
epsilon_old = np.zeros(n_bounds)
max_increase=2.
delta_bound_tol=1e-4
delta_bound_old = -np.inf * np.ones(n_bounds)
rho_old = rho_init
if max_search_ite<=0:
rho = rho_init + alpha*natgrad
newpol.set_params(rho)
delta_bound = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta) - bound_init
return rho, np.ones(len(epsilon)), delta_bound, 0
for i in range(max_search_ite):
stepsize = alpha*reassign(epsilon)
stepsize = np.where(np.isnan(stepsize), np.zeros(len(stepsize)), stepsize)
rho = rho_init + stepsize * natgrad
newpol.set_params(rho)
bound = newpol.eval_bound(actor_params, rets, pol, rmax,
normalize, use_rmax, use_renyi, delta)
if np.any(np.isnan(bound)):
warnings.warn('Got NaN bound value!')
if np.all(np.isnan(bound)):
return rho_old, epsilon_old, delta_bound_old, i + 1
epsilon_old = epsilon
delta_bound = bound - bound_init
epsilon = np.where(delta_bound > (1. - 1. / (2 * max_increase)) * epsilon_old,
epsilon_old*max_increase,
epsilon_old ** 2 / (2 * (epsilon_old - delta_bound)))
if np.all(delta_bound <= delta_bound_old + delta_bound_tol):
if np.all(delta_bound_old < 0.):
return rho_init, np.zeros(n_bounds), | np.zeros(n_bounds) | numpy.zeros |
# Training the whole Full World Model
# Importing the libraries
from mpi4py import MPI
import numpy as np
import json
import os
import subprocess
import sys
from model import make_model, simulate
from es import CMAES, SimpleGA, OpenES, PEPG
import argparse
import time
# Setting the Hyperparameters
num_episode = 1
eval_steps = 25
retrain_mode = True
cap_time_mode = True
num_worker = 8
num_worker_trial = 1
population = num_worker * num_worker_trial
gamename = 'carracing'
optimizer = 'cma'
antithetic = True
batch_mode = 'mean'
filebase = None
model = None
num_params = -1
es = None
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
PRECISION = 10000
SOLUTION_PACKET_SIZE = (5+num_params)*num_worker_trial
RESULT_PACKET_SIZE = 4*num_worker_trial
# Setting seed for reproducibility
seed_start = 0
# Making a function that initializes all the settings of the different optimizers
def initialize_settings(sigma_init=0.1, sigma_decay=0.9999):
global population, filebase, game, model, num_params, es, PRECISION, SOLUTION_PACKET_SIZE, RESULT_PACKET_SIZE
population = num_worker * num_worker_trial
filebase = gamename+'.'+optimizer+'.'+str(num_episode)+'.'+str(population)
model = make_model()
num_params = model.param_count
print("size of model", num_params)
if optimizer == 'ses':
ses = PEPG(num_params,
sigma_init=sigma_init,
sigma_decay=sigma_decay,
sigma_alpha=0.2,
sigma_limit=0.02,
elite_ratio=0.1,
weight_decay=0.005,
popsize=population)
es = ses
elif optimizer == 'ga':
ga = SimpleGA(num_params,
sigma_init=sigma_init,
sigma_decay=sigma_decay,
sigma_limit=0.02,
elite_ratio=0.1,
weight_decay=0.005,
popsize=population)
es = ga
elif optimizer == 'cma':
cma = CMAES(num_params,
sigma_init=sigma_init,
popsize=population)
es = cma
elif optimizer == 'pepg':
pepg = PEPG(num_params,
sigma_init=sigma_init,
sigma_decay=sigma_decay,
sigma_alpha=0.20,
sigma_limit=0.02,
learning_rate=0.01,
learning_rate_decay=1.0,
learning_rate_limit=0.01,
weight_decay=0.005,
popsize=population)
es = pepg
else:
oes = OpenES(num_params,
sigma_init=sigma_init,
sigma_decay=sigma_decay,
sigma_limit=0.02,
learning_rate=0.01,
learning_rate_decay=1.0,
learning_rate_limit=0.01,
antithetic=antithetic,
weight_decay=0.005,
popsize=population)
es = oes
PRECISION = 10000
SOLUTION_PACKET_SIZE = (5+num_params)*num_worker_trial
RESULT_PACKET_SIZE = 4*num_worker_trial
# Making a function that prints the training information in the standard output (this function is created because we are using parallel execution for each worker)
def sprint(*args):
print(args)
sys.stdout.flush()
# Building a class that acts as a seeder into the process of choosing data for batches
class OldSeeder:
# Initializing all the parameters and variables of the OldSeeder class
def __init__(self, init_seed=0):
self._seed = init_seed
# Making a method that returns the next seed by just incrementing the current seed
def next_seed(self):
result = self._seed
self._seed += 1
return result
# Making a method that produces ids for the next batch
def next_batch(self, batch_size):
result = np.arange(self._seed, self._seed+batch_size).tolist()
self._seed += batch_size
return result
# Building a class that acts as a seeder into the process of choosing data for batches
class Seeder:
# Initializing all the parameters and variables of the Seeder class
def __init__(self, init_seed=0):
np.random.seed(init_seed)
self.limit = np.int32(2**31-1)
# Making a method that returns a random next seed
def next_seed(self):
result = np.random.randint(self.limit)
return result
# Making a method that produces ids for the next batch
def next_batch(self, batch_size):
result = np.random.randint(self.limit, size=batch_size).tolist()
return result
# Making a function that encodes and packs solutions from the workers into packets (a packet is a set of data sent as one element, to fit the format of the MPI framework)
def encode_solution_packets(seeds, solutions, train_mode=1, max_len=-1):
n = len(seeds)
result = []
worker_num = 0
for i in range(n):
worker_num = int(i / num_worker_trial) + 1
result.append([worker_num, i, seeds[i], train_mode, max_len])
result.append(np.round(np.array(solutions[i])*PRECISION,0))
result = np.concatenate(result).astype(np.int32)
result = np.split(result, num_worker)
return result
# Making a function that decodes and unpacks received packets of solutions from the workers
def decode_solution_packet(packet):
packets = np.split(packet, num_worker_trial)
result = []
for p in packets:
result.append([p[0], p[1], p[2], p[3], p[4], p[5:].astype(np.float)/PRECISION])
return result
# Making a function that encodes and packs results obtained by the workers into packets
def encode_result_packet(results):
r = np.array(results)
r[:, 2:4] *= PRECISION
return r.flatten().astype(np.int32)
# Making a function that decodes and unpacks received packets of results obtained by the workers
def decode_result_packet(packet):
r = packet.reshape(num_worker_trial, 4)
workers = r[:, 0].tolist()
jobs = r[:, 1].tolist()
fits = r[:, 2].astype(np.float)/PRECISION
fits = fits.tolist()
times = r[:, 3].astype(np.float)/PRECISION
times = times.tolist()
result = []
n = len(jobs)
for i in range(n):
result.append([workers[i], jobs[i], fits[i], times[i]])
return result
# Making a function that creates a worker with a specific set of weights (parameters) for the model, runs the simulation with them, and returns the average reward after the simulation is played
def worker(weights, seed, train_mode_int=1, max_len=-1):
train_mode = (train_mode_int == 1)
model.set_model_params(weights)
reward_list, t_list = simulate(model,
train_mode=train_mode, render_mode=False, num_episode=num_episode, seed=seed, max_len=max_len)
if batch_mode == 'min':
reward = np.min(reward_list)
else:
reward = np.mean(reward_list)
t = np.mean(t_list)
return reward, t
# Making a function that creates a slave (a slave is an instance of the model with specific solutions and weights) for the main model, gets the possible solutions of the environment, creates a worker for each solution, and returns the reward
def slave():
model.make_env()
packet = | np.empty(SOLUTION_PACKET_SIZE, dtype=np.int32) | numpy.empty |
from graspologic.cluster import GaussianCluster
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
import csv
import random
from sklearn.metrics import adjusted_rand_score
def brute_graspy_cluster(
Ns, x, covariance_types, ks, c_true, savefigs=None, graphList=None
):
if graphList != None and "all_bics" in graphList:
_, ((ax0, ax1), (ax2, ax3)) = plt.subplots(
2, 2, sharey="row", sharex="col", figsize=(10, 10)
)
titles = ["full", "tied", "diag", "spherical"]
best_bic = -np.inf
for N in Ns:
bics = np.zeros([len(ks), len(covariance_types), N])
aris = np.zeros([len(ks), len(covariance_types), N])
for i in np.arange(N):
graspy_gmm = GaussianCluster(
min_components=ks[0],
max_components=ks[len(ks) - 1],
covariance_type=covariance_types,
random_state=i,
)
c_hat = graspy_gmm.fit_predict(x, y=c_true)
ari = adjusted_rand_score(c_hat, c_true)
bic_values = -graspy_gmm.bic_.values
ari_values = graspy_gmm.ari_.values
bics[:, :, i] = bic_values
aris[:, :, i] = ari_values
bic = bic_values.max()
if bic > best_bic:
idx = | np.argmax(bic_values) | numpy.argmax |
#!/usr/bin/env python
"""
This file is part of IMSIS
Licensed under the MIT license:
http://www.opensource.org/licenses/MIT-license
This module contains image processing methods
"""
import os
import sys
import cv2 as cv
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.misc
from matplotlib import pyplot as plt
import numpy.random as random
from matplotlib.colors import hsv_to_rgb
from datetime import datetime
class Image(object):
@staticmethod
def load(filename, verbose=True):
"""Load image
Supported file formats: PNG, TIF, BMP
note: by default images are converted to grayscale (8bit gray), conversion to 8 bit can be disabled.
:Parameters: filename, gray=True, verbose=False
:Returns: image
"""
img = None
if (os.path.isfile(filename)):
img = cv.imread(filename, -1)
if (verbose == True):
print("load file ", filename, img.shape, img.dtype)
else:
print('Error, file does not exist. ', filename)
sys.exit()
try:
q = img.shape
except:
print('Error, File could not be read. ', filename)
sys.exit()
return img
@staticmethod
def crop_rectangle(img, rect):
"""Crop an image using rectangle shape as input [(x0,y0),(x1,y1)]
:Parameters: image, rectangle
:Returns: image
"""
if len(rect) > 0:
out = Image.crop(img, rect[0][0], rect[0][1], rect[1][0], rect[1][1])
else:
print("Error: rectangle not defined.")
out = img
return out
@staticmethod
def crop(img, x0, y0, x1, y1):
"""Crop an image using pixels at x0,y0,x1,y1
:Parameters: image, x0, y0, x1, y1
:Returns: image
"""
res = img[y0:y1, x0:x1] # Crop from y0:y1,x0:x1
# print("Cropped region: (" , x0,y0,x1,y1,")")
return res
@staticmethod
def crop_percentage(img, scale=1.0):
"""Crop an image centered
:Parameters: image, scale=1.0
:Returns: image
"""
center_x, center_y = img.shape[1] / 2, img.shape[0] / 2
width_scaled, height_scaled = img.shape[1] * scale, img.shape[0] * scale
left_x, right_x = center_x - width_scaled / 2, center_x + width_scaled / 2
top_y, bottom_y = center_y - height_scaled / 2, center_y + height_scaled / 2
img_cropped = img[int(top_y):int(bottom_y), int(left_x):int(right_x)]
return img_cropped
@staticmethod
def resize(img, factor=0.5):
"""Resize image
:Parameters: image, factor
:Returns: image
"""
small = cv.resize(img, (0, 0), fx=factor, fy=factor)
return small
@staticmethod
def _blur_edge(img, d=31):
"""blur edge
:Parameters: image, d
:Returns: image
"""
h, w = img.shape[:2]
img_pad = cv.copyMakeBorder(img, d, d, d, d, cv.BORDER_WRAP)
img_blur = cv.GaussianBlur(img_pad, (2 * d + 1, 2 * d + 1), -1)[d:-d, d:-d]
y, x = np.indices((h, w))
dist = np.dstack([x, w - x - 1, y, h - y - 1]).min(-1)
w = np.minimum(np.float32(dist) / d, 1.0)
return img * w + img_blur * (1 - w)
@staticmethod
def _motion_kernel(angle, d, sz=65):
"""determine motion kernel value
:Parameters: angle, d, size
:Returns: kernel
"""
kern = np.ones((1, d), np.float32)
c, s = np.cos(angle), np.sin(angle)
A = np.float32([[c, -s, 0], [s, c, 0]])
sz2 = sz // 2
A[:, 2] = (sz2, sz2) - np.dot(A[:, :2], ((d - 1) * 0.5, 0))
kern = cv.warpAffine(kern, A, (sz, sz), flags=cv2.INTER_CUBIC)
return kern
@staticmethod
def _defocus_kernel(d, sz=65):
"""determine defocus kernel value
:Parameters: d, size
:Returns: kernel
"""
kern = np.zeros((sz, sz), np.uint8)
cv.circle(kern, (sz, sz), d, 255, -1, cv.LINE_AA, shift=1)
kern = np.float32(kern) / 255.0
return kern
@staticmethod
def _image_stats(image):
# compute the mean and standard deviation of each channel
(l, a, b) = cv.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return (lMean, lStd, aMean, aStd, bMean, bStd)
@staticmethod
def save(img, fn):
"""Save image (PNG,TIF)
:Parameters: image, filename
"""
try:
if (os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn), exist_ok=True) #mkdir if not empty
cv.imwrite(fn, img)
print("file saved. ", fn)
except:
print("Error: cannot save file {}".format(fn))
@staticmethod
def save_withuniquetimestamp(img):
"""Save PNG image with unique timestamp.
:Parameters: image
"""
path = "./output/"
os.makedirs(os.path.dirname(path), exist_ok=True)
sttime = datetime.now().strftime('Image_%Y%m%d%H%M%S')
fn = path + sttime + '.png'
print("file saved. ", fn)
cv.imwrite(fn, img)
'''
@staticmethod
def PSNR(img1, img2):
"""Return peaksignal to noise ratio
:Parameters: image1, image2
:Returns: float
"""
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
PIXEL_MAX = 255.0
# print(np.sqrt(mse))
n = np.sqrt(mse)
# n=255/3.525
return 20 * np.log10(PIXEL_MAX / n)
'''
# implemented twice remove the 2nd one
@staticmethod
def cut(img, center=[0, 0], size=[0, 0]):
"""return a image cut out
:Parameters: image, center=[0, 0], size=[0, 0]
:Returns: image
"""
x0 = center[0] - round(size[0] * 0.5)
x1 = center[0] + round(size[0] * 0.5)
y0 = center[1] - round(size[1] * 0.5)
y1 = center[1] + round(size[1] * 0.5)
if x0 < 0:
x0 = 0
if y0 < 0:
y0 = 0
template = Image.crop(img, int(x0), int(y0), int(x1), int(y1))
return template
@staticmethod
def _multipleof2(number):
"""Rounds the given number to the nearest multiple of two."""
remainder = number % 2
if remainder > 1:
number += (2 - remainder)
else:
number -= remainder
return int(number)
@staticmethod
def subtract(img0, img1):
"""subtract 2 images
:Parameters: image1, image2
:Returns: image
"""
out = cv.subtract(img0, img1)
return out
'''
@staticmethod
def add(img0, img1):
"""add 2 images
:Parameters: image1, image2
:Returns: image
"""
out = cv.addWeighted(img0, 0.5, img1, 0.5, 0.0)
return out
'''
@staticmethod
def add(img0, img1, alpha=0.5):
"""add 2 images weighted (default alpha=0.5)
:Parameters: image1, image2, alpha
:Returns: image
"""
a = img0
b = img1
beta = 1 - alpha
out = cv.addWeighted(a, alpha, b, beta, gamma)
return out
@staticmethod
def new(height, width):
"""Create a new blank image
:Parameters: height,width
:Returns: image
"""
img = np.zeros((height, width), np.uint8)
return img
@staticmethod
def gaussiankernel(kernlen=21, nsig=3):
"""returns a 2D gaussian kernel
:Parameters: kernelsize, nsig
:Returns: image
"""
x = np.linspace(-nsig, nsig, kernlen + 1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d / kern2d.sum()
@staticmethod
def info(img):
"""get image properties
:Parameters: img
"""
print(img.shape)
print(img.size)
print(img.dtype)
@staticmethod
def unique_colours(image):
"""get number of unique colors in an image
:Parameters: img
"""
print(image.shape)
if (len(image.shape) == 3):
out = len(np.unique(image.reshape(-1, image.shape[2]), axis=0))
# b, g, r = cv.split(image)
# out_in_32U_2D = np.int32(b) << 16 + np.int32(g) << 8 + np.int32(r) # bit wise shift 8 for each channel.
# out_in_32U_1D = out_in_32U_2D.reshape(-1) # convert to 1D
# np.unique(out_in_32U_1D)
# out = len(np.unique(out_in_32U_1D))
else:
out_in_32U_2D = np.int32(image) # bit wise shift 8 for each channel.
out_in_32U_1D = out_in_32U_2D.reshape(-1) # convert to 1D
np.unique(out_in_32U_1D)
out = len(np.unique(out_in_32U_1D))
print(out)
return out
@staticmethod
def video_to_imagesondisk(file_in='video.avi', path_out='images'):
"""video to image
:Parameters: video_filename
:Returns: images
"""
video_file = file_in
output_folder = path_out
vidcap = cv.VideoCapture(video_file)
success, image = vidcap.read()
count = 0
success = True
while success:
fn = output_folder + "/" + "frame%d.png" % count
cv.imwrite(fn, image) # save frame as JPEG file
success, image = vidcap.read()
print('Read a new frame: ', success, fn)
count += 1
print("ready.")
@staticmethod
def imagesfromdisk_to_video(path_in, file_out='video.avi', framerate=15):
"""images from file to video
:Parameters: path with list of frames
:Returns: video
"""
image_folder = path_in
video_name = file_out
output_folder = "output"
fn = image_folder + "/" + output_folder + "/"
print(fn)
os.makedirs(os.path.dirname(fn), exist_ok=True)
images = [img for img in os.listdir(image_folder) if (img.endswith(".tif") or img.endswith(".png"))]
frame = cv.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv.VideoWriter(fn + video_name, 0, framerate, (width, height))
for image in images:
video.write(cv.imread(os.path.join(image_folder, image)))
cv.destroyAllWindows()
video.release()
'''
@staticmethod
def zoom(image0, factor=2):
"""
zoom image, resize with factor n, crop in center to same size as original image
:Parameters: image0, zoom factor
:Returns: image
"""
h = image0.shape[0]
w = image0.shape[1]
img = Image.resize(image0,factor)
x0 = int(factor*w/4)
y0 = int(factor*h/4)
x1 = x0+w
y1 = y0+h
print(x0,y0,x1,y1,w,h,img.shape[0],img.shape[1])
img = Image.crop(img,x0,y0,x1,y1)
return img
'''
@staticmethod
def zoom(image0, factor=2, cx=0.5, cy=0.5):
"""
zoom image, resize with factor n, crop in center to same size as original image
:Parameters: image0, zoom factor
:Returns: image
"""
h = image0.shape[0]
w = image0.shape[1]
img = Image.resize(image0, factor)
x0 = int(factor * w * cx * 0.5)
y0 = int(factor * h * cy * 0.5)
x1 = x0 + w
y1 = y0 + h
# print(x0, y0, x1, y1, w, h, img.shape[0], img.shape[1])
img = Image.crop(img, x0, y0, x1, y1)
return img
class Process:
@staticmethod
def directionalsharpness(img, ksize=-1):
"""
DirectionalSharpness
Measure sharnpess in X and Y seperately
Note: Negative slopes are missed when converting to unaryint8, therefore convert to float
:Parameters: image, kernel
:Returns: gradientx , gradienty, gradientxy, theta
"""
sobelx64f = cv.Sobel(img, cv.CV_64F, 1, 0, ksize=ksize)
sobely64f = cv.Sobel(img, cv.CV_64F, 0, 1, ksize=ksize)
grad = np.power(np.power(sobelx64f, 2.0) + np.power(sobely64f, 2.0), 0.5)
theta = np.arctan2(sobely64f, sobelx64f)
Gx = np.absolute(sobelx64f)
Gy = np.absolute(sobely64f)
mx = cv.mean(Gx)[0]
my = cv.mean(Gy)[0]
return mx, my, grad, theta
@staticmethod
def gradient_image(img, kx=11, ky=3):
"""Create a gradient image
Method used: gradient by bi-directional sobel filter
:Parameters: image, blurkernelx, blurkernely
:Returns: image
"""
# Calculate gradient
gx = cv.Sobel(img, cv.CV_32F, 1, 0, ksize=1)
gy = cv.Sobel(img, cv.CV_32F, 0, 1, ksize=1)
# mag, angle = cv.cartToPolar(gx, gy, angleInDegrees=True)
blurredgx = cv.GaussianBlur(gx, (kx, ky), 1)
blurredgy = cv.GaussianBlur(gy, (kx, ky), 1)
mag, angle = cv.cartToPolar(blurredgx, blurredgy)
return mag, angle
@staticmethod
def gradient_image_nonmaxsuppressed(img, blur=5, threshold=40):
"""Apply non maximum suppressed gradient filter sequence
threshold not used??
:Parameters: image, blur=5, threshold=40
:Returns: image, angle
"""
def nonmaxsuppression(im, grad):
# Non-maximum suppression
gradSup = grad.copy()
for r in range(im.shape[0]):
for c in range(im.shape[1]):
# Suppress pixels at the image edge
if r == 0 or r == im.shape[0] - 1 or c == 0 or c == im.shape[1] - 1:
gradSup[r, c] = 0
continue
tq = thetaQ[r, c] % 4
if tq == 0: # 0 is E-W (horizontal)
if grad[r, c] <= grad[r, c - 1] or grad[r, c] <= grad[r, c + 1]:
gradSup[r, c] = 0
if tq == 1: # 1 is NE-SW
if grad[r, c] <= grad[r - 1, c + 1] or grad[r, c] <= grad[r + 1, c - 1]:
gradSup[r, c] = 0
if tq == 2: # 2 is N-S (vertical)
if grad[r, c] <= grad[r - 1, c] or grad[r, c] <= grad[r + 1, c]:
gradSup[r, c] = 0
if tq == 3: # 3 is NW-SE
if grad[r, c] <= grad[r - 1, c - 1] or grad[r, c] <= grad[r + 1, c + 1]:
gradSup[r, c] = 0
return gradSup
img = Image.Convert.toGray(img)
im = np.array(img, dtype=float) # Convert to float to prevent clipping values
# Gaussian Blur
im2 = cv.GaussianBlur(im, (blur, blur), 0)
# Find gradients
im3h = cv.filter2D(im2, -1, np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]))
im3v = cv.filter2D(im2, -1, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# Get gradient and direction
grad = np.power(np.power(im3h, 2.0) + np.power(im3v, 2.0), 0.5)
theta = np.arctan2(im3v, im3h)
thetaQ = (np.round(theta * (5.0 / np.pi)) + 5) % 5 # Quantize direction
gradSup = nonmaxsuppression(im, grad)
return gradSup, thetaQ
@staticmethod
def nonlocalmeans(img, h=10, templatewindowsize=7, searchwindowsize=21):
"""Apply a non-local-means filter with filtering strength (h), template windowsize (blocksize), searchwindowsize
:Parameters: image, h=10, templatewindowsize=7, searchwindowsize=21
:Returns: image
"""
# img = cv.pyrDown(img)
dst = cv.fastNlMeansDenoising(img, None, h, templatewindowsize, searchwindowsize)
return dst
@staticmethod
def deconvolution_wiener(img, d=3, noise=11):
"""Apply Wiener deconvolution
grayscale images only
:Parameters: image, d, noise
:Returns: kernel
"""
img = Image.Convert.toGray(img)
noise = 10 ** (-0.1 * noise)
img = np.float32(img) / 255.0
IMG = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT)
psf = Image._defocus_kernel(d)
psf /= psf.sum()
psf_pad = np.zeros_like(img)
kh, kw = psf.shape
psf_pad[:kh, :kw] = psf
PSF = cv.dft(psf_pad, flags=cv.DFT_COMPLEX_OUTPUT, nonzeroRows=kh)
PSF2 = (PSF ** 2).sum(-1)
iPSF = PSF / (PSF2 + noise)[..., np.newaxis]
RES = cv.mulSpectrums(IMG, iPSF, 0)
res = cv.idft(RES, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT)
res = np.roll(res, -kh // 2, 0)
res = np.roll(res, -kw // 2, 1)
return res
@staticmethod
def median(image, kernel=5):
"""Apply a median filter
:Parameters: image
:Returns: image
"""
out = cv.medianBlur(image, kernel)
return out
@staticmethod
def cannyedge_auto(image, sigma=0.33):
"""Apply a Canny Edge filter automatically
:Parameters: image, sigma
:Returns: image
"""
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv.Canny(image, lower, upper)
return edged
# smooth, threshold
@staticmethod
def gaussian_blur(img, smooth=3):
"""Gaussian blur image with kernel n
:Parameters: image, kernel
:Returns: image
"""
# img = cv.pyrDown(img)
imout = cv.GaussianBlur(img, (smooth, smooth), 0)
return imout
@staticmethod
def unsharp_mask(img, kernel_size=5, sigma=1.0, amount=1.0, threshold=0):
"""Unsharp mask filter
:Parameters: image, kernel_size=5, sigma=1.0, amount=1.0, threshold=0
:Returns: image
"""
blurred = cv.GaussianBlur(img, (5, 5), sigma)
sharpened = float(amount + 1) * img - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(img - blurred) < threshold
np.copyto(sharpened, img, where=low_contrast_mask)
return sharpened
@staticmethod
def FFT(img):
"""Apply a fourier transform
generate a discrete fourier transform shift matrix and a magnitude spectrum image for viewing
:Parameters: image
:Returns: dft_shift, specimage
"""
# img = Image.Convert.toGray(img)
# do dft saving as complex output
dft = np.fft.fft2(img, axes=(0, 1))
# apply shift of origin to center of image
dft_shift = np.fft.fftshift(dft)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
# magnitude_spectrum[np.isneginf(magnitude_spectrum)] = 0
return dft_shift, spec
@staticmethod
def IFFT(fft_img):
"""Apply an inverse fourier transform
:Parameters: image_fft
:Returns: image
"""
back_ishift = np.fft.ifftshift(fft_img)
img_back = np.fft.ifft2(back_ishift, axes=(0, 1))
img_back = np.abs(img_back).clip(0, 255).astype(np.uint8)
return img_back
@staticmethod
def FD_bandpass_filter(img, D0=5, w=10, bptype=0):
gray = Image.Convert.toGray(img)
kernel = Image.FilterKernels.ideal_bandpass_kernel(gray, D0, w)
if bptype == 1:
kernel = Image.FilterKernels.gaussian_bandpass_kernel(gray, D0, w)
elif bptype == 2:
kernel = Image.FilterKernels.butterworth_bandpass_kernel(gray, D0, w)
gray = | np.float64(gray) | numpy.float64 |
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
import os
np.random.seed(0)
tf.set_random_seed(0)
'''
Version 3.0 - Nov15/2019
<NAME>
<EMAIL>
<NAME>
'''
class DataSet(object):
def __init__(self,
data,
labels,
one_hot=False,
seed=None):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`. Seed arg provides for convenient deterministic testing.
"""
if one_hot:
vect_class, idx_class = np.unique(labels, return_inverse=True)
labels = self.dense_to_one_hot(idx_class, len(vect_class))
self.one_hot = one_hot
self._num_examples = data.shape[0]
self._num_features = data.shape[1]
self._data = data
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def data(self):
return self._data
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def num_features(self):
return self._num_features
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = np.arange(self._num_examples)
np.random.shuffle(perm0)
self._data = self.data[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
data_rest_part = self._data[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._data = self.data[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
data_new_part = self._data[start:end]
labels_new_part = self._labels[start:end]
return np.concatenate((data_rest_part, data_new_part), axis=0), np.concatenate(
(labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._data[start:end], self._labels[start:end]
def dense_to_one_hot(self, labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = | np.zeros((num_labels, num_classes)) | numpy.zeros |
import os
import datetime
import numpy as np
import gym
import tensorflow as tf
from tensorflow.keras.models import Sequential, clone_model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.summary import create_file_writer
from cpprb import ReplayBuffer, PrioritizedReplayBuffer
gamma = 0.99
batch_size = 1024
N_iteration = int(1e+5)
target_update_freq = 1000
eval_freq = 100
egreedy = 0.1
# Log
dir_name = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join("logs", dir_name)
writer = create_file_writer(logdir + "/metrics")
writer.set_as_default()
# Env
env = gym.make('CartPole-v1')
eval_env = gym.make('CartPole-v1')
# For CartPole: input 4, output 2
model = Sequential([Dense(64,activation='relu',
input_shape=(env.observation_space.shape)),
Dense(64,activation='relu'),
Dense(env.action_space.n)])
target_model = clone_model(model)
# Loss Function
@tf.function
def Huber_loss(absTD):
return tf.where(absTD > 1.0, absTD, tf.math.square(absTD))
loss_func = Huber_loss
optimizer = Adam()
buffer_size = 1e+6
env_dict = {"obs":{"shape": env.observation_space.shape},
"act":{"shape": 1,"dtype": np.ubyte},
"rew": {},
"next_obs": {"shape": env.observation_space.shape},
"done": {}}
# Nstep
nstep = 3
# nstep = False
if nstep:
Nstep = {"size": nstep, "rew": "rew", "next": "next_obs"}
discount = tf.constant(gamma ** nstep)
else:
Nstep = None
discount = tf.constant(gamma)
rb = PrioritizedReplayBuffer(buffer_size,env_dict,Nstep=Nstep,eps=0)
@tf.function
def Q_func(model,obs,act,act_shape):
return tf.reduce_sum(model(obs) * tf.one_hot(act,depth=act_shape), axis=1)
@tf.function
def DQN_target_func(model,target,next_obs,rew,done,gamma,act_shape):
return gamma*tf.reduce_max(target(next_obs),axis=1)*(1.0-done) + rew
@tf.function
def Double_DQN_target_func(model,target,next_obs,rew,done,gamma,act_shape):
"""
Double DQN: https://arxiv.org/abs/1509.06461
"""
act = tf.math.argmax(model(next_obs),axis=1)
return gamma*tf.reduce_sum(target(next_obs)*tf.one_hot(act,depth=act_shape), axis=1)*(1.0-done) + rew
target_func = Double_DQN_target_func
def evaluate(model,env):
obs = env.reset()
total_rew = 0
while True:
Q = tf.squeeze(model(obs.reshape(1,-1)))
act = np.argmax(Q)
obs, rew, done, _ = env.step(act)
total_rew += rew
if done:
return total_rew
# Start Experiment
observation = env.reset()
# Warming up
for n_step in range(100):
action = env.action_space.sample() # Random Action
next_observation, reward, done, info = env.step(action)
rb.add(obs=observation,
act=action,
rew=reward,
next_obs=next_observation,
done=done)
observation = next_observation
if done:
env.reset()
rb.on_episode_end()
n_episode = 0
observation = env.reset()
for n_step in range(N_iteration):
if np.random.rand() < egreedy:
action = env.action_space.sample()
else:
Q = tf.squeeze(model(observation.reshape(1,-1)))
action = | np.argmax(Q) | numpy.argmax |
import numpy as np
import math
def snr_plot(model, snrs, lbl, test_idx, X_test, Y_test, classes):
# Plot confusion matrix
acc = {}
for snr in snrs:
# extract classes @ SNR
test_SNRs = list(map(lambda x: lbl[x][1], test_idx))
test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]
# estimate classes
test_Y_i_hat = model.predict(test_X_i)
conf = np.zeros([len(classes), len(classes)])
confnorm = np.zeros([len(classes), len(classes)])
for i in range(0, test_X_i.shape[0]):
j = list(test_Y_i[i, :]).index(1)
k = int(np.argmax(test_Y_i_hat[i, :]))
conf[j, k] = conf[j, k] + 1
for i in range(0, len(classes)):
confnorm[i, :] = conf[i, :] / np.sum(conf[i, :])
cor = np.sum(np.diag(conf))
ncor = np.sum(conf) - cor
print(snr, "dB, Overall Accuracy: ", cor / (cor + ncor))
acc[snr] = 1.0 * cor / (cor + ncor)
return acc
def SNR_singlech(S, SN):
S = S-np.mean(S)# 消除直流分量
S = S/np.max(np.abs(S))#幅值归一化
mean_S = ( | np.sum(S) | numpy.sum |
#%%
import pandas as pd
import numpy as np
import warnings
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import normalized_mutual_info_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_regression
from sklearn.feature_selection import chi2
from sklearn.exceptions import DataConversionWarning
from preprocess import data_frame_min_max_norm
from preprocess import data_frame_reg_to_cls
from DQN_s_a_value import DQN1
from DQN_a_index import DQN2
from evaluation import downstream_task
from evaluation import test_task
name = "AirfoilSelfNoise"
# name = "ionosphere"
# data = pd.read_csv("AirfoilSelfNoise.csv")
data = pd.read_csv(name+".csv")
# data = pd.read_csv("ionosphere.csv")
# data = pd.read_csv("ionosphere_data.csv")
# data =data.replace({'column_ai': {'g': 1,'b':0}})
# def add_noise(data,ratio_0,ration_times):
# ratio_len = int(data.shape[0]*data.shape[1]*ratio_0)
# x = np.random.randint(0, data.shape[0], ratio_len)
# y = np.random.randint(0, data.shape[1]-1, ratio_len)
# for item in zip(x,y):
# data.iloc[item[0],item[1]] = 8*np.random.random()
# columns = data.columns
# noise = pd.DataFrame(np.random.random(data.shape)*ration_times)
# noise = pd.DataFrame(np.random.normal(0,10,size=data.shape))
# noise = -data
# noise.iloc[:,-1] = 0
# data = data.to_numpy() + noise.to_numpy()
# return pd.DataFrame(data,columns=columns)
# data = add_noise(data,0.9,10)
# data.to_csv('new_'+name+'.csv',index=False)
# support TASK = 'cls' METRIC = 'acc' and TASK = 'reg' METRIC = 'mae'
TASK = 'cls'
METRIC = 'acc'
# TASK = 'reg'
# METRIC = 'rae'
MIN_SIZE_TIME = 1.5
MAX_SIZE_TIME = 2.0
Original_size = data.shape[1] - 1
# normalize D0:
D0 = data_frame_min_max_norm(data)
# regression dataset to binary classification dataset
D0 = data_frame_reg_to_cls(D0)
# record the optimal dataset and performance
D_OPT = D0
Perf_OPT,_,_,_ = downstream_task(D0, task=TASK, metric=METRIC)
# Perf_OPT = np.Inf
#ignore warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
# O1 = ['sqrt', 'square','sin','cos']
O1 = ['sqrt', 'square','sin','cos','tanh']
O2 = ['+','-','*']
operation_set = O1+O2
EPISODES = 200
STEPS = 15
#the parameters of DQN1
STATE_DIM = 64
ACTION_DIM = 8
EPSILON = 0.95
MEMORY_CAPACITY=16
#the parameters of DQN2
N_STATES = STATE_DIM + ACTION_DIM
N_ACTIONS = len(operation_set)
dqn_meta = DQN1(STATE_DIM=STATE_DIM, ACTION_DIM=ACTION_DIM, MEMORY_CAPACITY=MEMORY_CAPACITY)
dqn_operation = DQN2(N_STATES=N_STATES, N_ACTIONS=N_ACTIONS,MEMORY_CAPACITY=MEMORY_CAPACITY)
def Feature_DB(X):
feature_matrix = []
for i in range(8):
feature_matrix = feature_matrix + list(X.describe().iloc[i,:].describe().values)
return feature_matrix
def choose_action(q_val_list):
if np.random.uniform() < EPSILON:
return np.argmax(np.array(q_val_list))
else:
return np.random.randint(0,len(q_val_list))
def select_meta_feature_from_data(Dg):
Dg = Dg.drop(Dg.columns[-1],axis=1)
state = Feature_DB(Dg)
q_val_list = []
for column in Dg.columns:
feature = Dg[column]
action = np.array(feature.describe())
q_val_list.append(dqn_meta.get_q_value(state,action).detach().numpy()[0])
act_index = choose_action(q_val_list)
return Dg.iloc[:,act_index], state
def select_new_feature_from_next_Dg(Dg_new):
Dg_new = Dg_new.drop(Dg_new.columns[-1],axis=1)
state_ = Feature_DB(Dg_new)
q_val_list = []
for column in Dg_new.columns:
feature = Dg_new[column]
action = np.array(feature.describe())
q_val_list.append(dqn_meta.get_q_value(state_,action).detach().numpy()[0])
act_index = | np.argmax(q_val_list) | numpy.argmax |
import cv2
import numpy as np
import argparse
import glob
def jacobian(x_shape, y_shape):
x = np.array(range(x_shape))
y = np.array(range(y_shape))
x, y = np.meshgrid(x, y)
ones = np.ones((y_shape, x_shape))
zeros = np.zeros((y_shape, x_shape))
row1 = np.stack((x, zeros, y, zeros, ones, zeros), axis=2)
row2 = np.stack((zeros, x, zeros, y, zeros, ones), axis=2)
jacob = np.stack((row1, row2), axis=2)
return jacob
def adjust_gamma(image, gamma=1.0):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
def CLAHE(image):
clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(image)
return cl1
def shift_data_Z_score(tmp_mean, img):
tmp_mean_matrix = np.full((img.shape), tmp_mean)
img_mean_matrix = np.full((img.shape), np.mean(img))
std_ = np.std(img)
z_score = np.true_divide((img.astype(int) - tmp_mean_matrix.astype(int)), std_)
dmean = np.mean(img)-tmp_mean
if dmean < 10:
shifted_img = -(z_score*std_).astype(int) + img_mean_matrix.astype(int)
else:
shifted_img = (z_score*std_).astype(int) + img_mean_matrix.astype(int)
return shifted_img.astype(dtype = np.uint8)
def InverseLK(img, tmp, parameters, rect, p):
# Initialization
rows, cols = tmp.shape
lr, iteration = parameters
# Calculate gradient of template
grad_x = cv2.Sobel(tmp, cv2.CV_64F, 1, 0, ksize=5)
grad_y = cv2.Sobel(tmp, cv2.CV_64F, 0, 1, ksize=5)
# Calculate Jacobian
jacob = jacobian(cols, rows)
p=np.zeros(6)
# Set gradient of a pixel into 1 by 2 vector
grad = np.stack((grad_x, grad_y), axis=2)
grad = np.expand_dims((grad), axis=2)
steepest_descents = np.matmul(grad, jacob)
steepest_descents_trans = np.transpose(steepest_descents, (0, 1, 3, 2))
# Compute Hessian matrix
hessian_matrix = np.matmul(steepest_descents_trans, steepest_descents).sum((0, 1))
for _ in range(iteration):
# Calculate warp image
warp_mat = np.array([[1 + p[0], p[2], p[4]], [p[1], 1 + p[3], p[5]]])
warp_img = cv2.warpAffine(img, warp_mat, (0, 0))
warp_img = warp_img[rect[0][1]:rect[1][1], rect[0][0]:rect[1][0]]
# Compute the error term
error = tmp.astype(float) - warp_img.astype(float)
error = error.reshape((rows, cols, 1, 1))
update = (steepest_descents_trans * error).sum((0, 1))
# print("uodate", update)
d_p = np.matmul(np.linalg.pinv(hessian_matrix), update).reshape((-1))
# Update p
d_p_deno = (1 + d_p[0]) * (1 + d_p[3]) - d_p[1] * d_p[2]
d_p_0 = (-d_p[0] - d_p[0] * d_p[3] + d_p[1] * d_p[2]) / d_p_deno
d_p_1 = (-d_p[1]) / d_p_deno
d_p_2 = (-d_p[2]) / d_p_deno
d_p_3 = (-d_p[3] - d_p[0] * d_p[3] + d_p[1] * d_p[2]) / d_p_deno
d_p_4 = (-d_p[4] - d_p[3] * d_p[4] + d_p[2] * d_p[5]) / d_p_deno
d_p_5 = (-d_p[5] - d_p[0] * d_p[5] + d_p[1] * d_p[4]) / d_p_deno
p[0] += lr * (d_p_0 + p[0] * d_p_0 + p[2] * d_p_1)
p[1] += lr * (d_p_1 + p[1] * d_p_0 + p[3] * d_p_1)
p[2] += lr * (d_p_2 + p[0] * d_p_2 + p[2] * d_p_3)
p[3] += lr * (d_p_3 + p[1] * d_p_2 + p[3] * d_p_3)
p[4] += lr * (d_p_4 + p[0] * d_p_4 + p[2] * d_p_5)
p[5] += lr * (d_p_5 + p[1] * d_p_4 + p[3] * d_p_5)
cv2.imshow('Gammacorrect_img', warp_img)
k=cv2.waitKey(1)
return p
ROIs={0:(73,53,104,89),390:(225 , 76 , 64 , 51),280:(190 , 63 ,68 , 55),480:(202,70,68,52),595:(228 ,61 , 85 , 65)
,170:(110,66 ,81 ,65),209:(139 ,67 ,74 ,55)} # Dataset:(x,y,w,h)
filepath="/home/arjun/Desktop/LKT/Car4/img/0001.jpg"
frame=cv2.imread(filepath)
x,y,w,h=ROIs[0]
color_template = frame[y:y+h,x:x+w]
rect=(x,y,w,h)
refPt=[[x, y], [x+w, y+h]]
template = cv2.cvtColor(color_template, cv2.COLOR_BGR2GRAY)
T = np.float32(template)/255
p=np.zeros(6)
images=[]
for img in glob.glob((r"/home/arjun/Desktop/LKT/Car4/img/*.jpg")):
images.append(img)
images.sort()
images=images[1:]
parameters = [2, 200]
count=2
for img in images:
if(count==170 or count ==209 or count==280
or count==390 or count ==480 or count ==595) :
frame=cv2.imread(img)
if count ==170 or count ==209:
frame = adjust_gamma(frame,gamma=1.5)
if count ==390 or count ==480 or count ==595 :
parameters = [2.5, 300]
x,y,w,h=ROIs[count]
rect=(x,y,w,h)
color_template = frame[y:y+h,x:x+w]
template = cv2.cvtColor(color_template, cv2.COLOR_BGR2GRAY)
T = np.float32(template)/255
refPt=[[x, y], [x+w, y+h]]
p= | np.zeros(6) | numpy.zeros |
## @package lstm_benchmark
# Module caffe2.python.lstm_benchmark
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import cnn, workspace, core, utils, rnn_cell
import argparse
import numpy as np
import time
import logging
logging.basicConfig()
log = logging.getLogger("lstm_bench")
log.setLevel(logging.DEBUG)
def generate_data(T, shape, num_labels):
'''
Fill a queue with input data
'''
log.info("Generating T={} sequence batches".format(T))
generate_input_init_net = core.Net('generate_input_init')
queue = generate_input_init_net.CreateBlobsQueue(
[], "inputqueue", num_blobs=1, capacity=T,
)
label_queue = generate_input_init_net.CreateBlobsQueue(
[], "labelqueue", num_blobs=1, capacity=T,
)
workspace.RunNetOnce(generate_input_init_net)
generate_input_net = core.Net('generate_input')
generate_input_net.EnqueueBlobs([queue, "scratch"], ["scratch"])
generate_input_net.EnqueueBlobs([label_queue, "label_scr"], ["label_scr"])
np.random.seed(2603)
for t in range(T):
if (t % (max(10, T // 10)) == 0):
print("Generating data {}/{}".format(t, T))
# Randomize the seqlength
random_shape = (
[np.random.randint(1, shape[0])] + shape[1:]
if t > 0 else shape
)
X = np.random.rand(*random_shape).astype(np.float32)
batch_size = random_shape[1]
L = num_labels * batch_size
labels = ( | np.random.rand(random_shape[0]) | numpy.random.rand |
import json
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
HYPERPARAMETERS = {
'num_features': 7,
'features': ('lokacija2', 'kvadratura', 'sprat', 'broj_soba', 'parking', 'lift', 'terasa'),
'learning_rate': [0.001, 0.003, 0.01, 0.03], # 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1
'mini_batch_size': [16, 32, 64, 128],
'random_seed': 1,
'outer_cv_fold': 10,
'inner_cv_fold': 10,
'iterations': 50
}
def plotting(it, J_train, J_val):
epochs = list(range(it))
plt.plot(epochs, J_train, '-b+', label='J train')
plt.plot(epochs, J_val, '-r', label='J val')
plt.legend()
plt.show()
def train_main():
# ------------------------------------------PREPROCESSING DATA----------------------------------------------------------
# I Create and import data and output
with open("../data/data_flats_sale.json", 'r') as infile:
json_data = json.load(infile)
m = len(json_data)
data = np.zeros([m, HYPERPARAMETERS['num_features']])
output = | np.zeros([m, 1]) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dependency
----------------------------------
Dependency analysis class
Created on Nov 8, 2018
Last edited on Nov 8, 2018
@author: <NAME>
"""
import os
import io
import sys
import datetime
import numpy as np
from IPython import embed
import pandas as pd
import logging
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
import re
import subprocess
import yaml
from glob import glob
import scipy
from statsmodels.robust.scale import mad
from collections import Counter
from collections import defaultdict as ddict
from sklearn.metrics import roc_curve, average_precision_score, f1_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection._search import ParameterGrid
import torch
import paths
from biovnn_model import BioVNNmodel
from utils import compute_AUC_bootstrap, plot_pred_true_r_by_gene_MAD, plot_pred_true_r_by_gene_mean, gene_level_cor, \
individual_auc, plot_ROC, plot_top_ROC, plot_hist_cor, plot_hist_auc
disease_mapping = {'Bladder Cancer': 'BLCA',
'Breast Cancer': 'BRCA',
'breast': 'BRCA',
'Cervical Cancer': 'CESC',
'Colon Cancer': 'COAD',
'Colon/Colorectal Cancer': 'COAD',
'colorectal': 'COAD',
'GBM/Brain Cancer': 'GBM',
'glioblastoma': 'GBM',
'Head and Neck Cancer': 'HNSC',
'upper_aerodigestive': 'HNSC',
'Liver Cancer': 'LIHC',
'liver': 'LIHC',
'Ovarian Cancer': 'OV',
'ovary': 'OV',
'Skin Cancer': 'SKCM',
'skin': 'SKCM',
'Gastric Cancer': 'STAD',
'Soft Tissue/ Thyroid Cancer': 'THCA',
'Thyroid Cancer': 'THCA',
'Endometrial Cancer': 'UCEC',
'Endometrial/Uterine Cancer': 'UCEC',
'uterus': 'UCEC',
'Esophageal Cancer': 'ESCA',
'esophagus': 'ESCA',
'Pancreatic Cancer': 'PAAD',
'pancreas': 'PAAD',
'Non-Small Cell Lung Cancer (NSCLC), Adenocarcinoma': 'LUAD',
'Non-Small Cell Lung Cancer (NSCLC), Squamous Cell Carcinoma': 'LUSC',
'Renal Carcinoma, clear cell': 'KIRC',
'Glioblastoma': 'GBM',
'Acute Myelogenous Leukemia (AML)': 'LAML',
'AML': 'LAML'}
def load_params(output_dir=None, param_f=None):
if param_f is None:
param_f = os.path.join(output_dir, 'param.yaml')
with open(param_f, 'r') as stream:
params = yaml.safe_load(stream)
return params
def save_params(output_dir, params):
with io.open(os.path.join(output_dir, 'param.yaml'), 'w', encoding='utf8') as outfile:
yaml.dump(params, outfile, default_flow_style=False, allow_unicode=True)
assert params == load_params(output_dir)
class Dependency(object):
def __init__(self, cancer_type, data_dir, result_dir, run_name, params,
depmap_ver='19Q3', use_hierarchy=True):
self.method = 'BioVNN'
self.cancer_type = cancer_type
self.n_cluster = None
self.run_name = run_name
self.patient_list = []
self.cancer_type_to_patients = ddict(list)
self.rna_dir = os.path.join(data_dir, 'DepMap', depmap_ver)
self.data_dir = data_dir
if 'ref_groups' in params and params['ref_groups'] == 'GO':
self.community_file = os.path.join(data_dir, 'GO', 'goa_human_20201212.gmt')
self.community_hierarchy_file = os.path.join(self.data_dir, 'GO', 'go_20201212_relation.txt')
else:
self.community_file = os.path.join(data_dir, 'Reactome', 'ReactomePathways.gmt')
self.community_hierarchy_file = os.path.join(self.data_dir, 'Reactome', 'ReactomePathwaysRelation.txt')
self.gene_id_file = os.path.join(self.data_dir, 'Reactome', 'Homo_sapiens_9606.gene_info')
self.gene_id_dict = pd.read_csv(self.gene_id_file, sep='\t', index_col=1)['Symbol'].to_dict()
self.Reactome_name_file = os.path.join(data_dir, 'Reactome', 'ReactomePathways.txt')
self.Reactome_name_dict = pd.read_csv(self.Reactome_name_file, sep='\t', index_col=0, header=None)[1].to_dict()
self.Reactome_reaction_file = os.path.join(self.data_dir, 'Reactome', 'NCBI2Reactome_PE_Reactions_human.txt')
self.Reactome_reaction_df = pd.read_csv(self.Reactome_reaction_file, sep='\t', index_col=None, header=None)
self.Reactome_gene_reaction_dict = ddict(list)
self.Reactome_reaction_gene_dict = ddict(list)
for i, row in self.Reactome_reaction_df.iterrows():
if 'HSA' in row[1] and 'HSA' in row[3]: # Make sure they are from human
if row[0] in self.gene_id_dict:
symbol = self.gene_id_dict[row[0]]
else:
symbol = row[2].split(' [')[0]
self.Reactome_gene_reaction_dict[symbol].append(row[3])
self.Reactome_reaction_gene_dict[row[3]].append(symbol)
self.community_dict = {}
self.community_hierarchy = []
self.community_hierarchy_all = None
self.community_hierarchy_random = []
self.community_hierarchy_random_all = None
self.community_hierarchy_ones = []
self.community_hierarchy_ones_all = None
self.community_hierarchy_dicts_all = {}
self.use_hierarchy = use_hierarchy
self.community_matrix = None
self.result_path = os.path.join(result_dir, self.__class__.__name__, run_name)
self.temp_path = os.path.join(result_dir, self.__class__.__name__, 'temp')
os.makedirs(self.result_path, exist_ok=True)
os.makedirs(self.temp_path, exist_ok=True)
self._dependency_classes = ['Dependency', 'Transfer', 'Postanalysis', 'Postanalysis_ts',
'Postanalysis_transfer', 'Prospective',
'Timestamped', 'Interpret', 'Interpret_ts']
self._dependency_classes_plot = ['Dependency', 'Transfer', 'Postanalysis', 'Postanalysis_ts',
'Postanalysis_transfer', 'Timestamped']
self.params = params
self.load_result = params.get('load_result', False)
self.load_result_dir_name = params.get('load_result_dir_name', False)
if self.load_result and self.load_result_dir_name:
if 'load_result_dir_suffix' in params:
if 'load_result_dir_full' in params:
if params['load_result_dir_full']:
self.load_result_dir = params['load_result_dir_suffix']
else:
self.load_result_dir = os.path.join(result_dir, params['load_result_dir_suffix'])
else:
self.load_result_dir = os.path.join(result_dir, params['load_result_dir_suffix'])
else:
self.load_result_dir = '/'.join(self.result_path.split('/')[:-1] + [self.load_result_dir_name])
params = load_params(self.load_result_dir)
if 'run_mode' in self.params:
run_mode = self.params['run_mode']
else:
run_mode = None
self.params.update(params)
params = self.params
if run_mode:
params['run_mode'] = run_mode
self.params['run_mode'] = run_mode
self.use_cuda = params.get('use_cuda', True)
self.data_types = params.get('data_types', ['rna'])
self.use_all_gene = params.get('use_all_gene', True)
self.exp_ratio_min = params.get('exp_ratio_min', 0.01)
self.feature_max = params.get('feature_max', 99999)
self.feature_per_group_max = params.get('feature_per_group_max', 100)
self.repeat_n = params.get('repeat_n', 1)
self.fold_n = params.get('fold_n', 5)
self.cv_fold = params.get('cv_fold', 0)
self.model_v = params.get('model_v', 'clh_v1')
self.cv_fold_only_run = params.get('cv_fold_only_run', 1)
self.other_cancer_types = params.get('other_cancer_types', [])
self.rna_top_n_std = params.get('rna_top_n_std', 10000)
self.community_affected_size_min = params.get('community_affected_size_min', 5)
self.community_affected_size_max = params.get('community_affected_size_max', 999999)
self.require_label_gene_in_gene_group = params.get('require_label_gene_in_gene_group', True)
self.clip_Xval_Xtest = params.get('clip_Xval_Xtest', [-1, 1])
self.use_MinMaxScaler = params.get('use_MinMaxScaler', False)
self.use_StandardScaler = params.get('use_StandardScaler', True)
self.use_tanh_feature = params.get('use_tanh_feature', False)
self.use_sigmoid_feature = params.get('use_sigmoid_feature', False)
self.use_community_filter = params.get('use_community_filter', True)
self.test_run = params.get('test_run', False)
self.select_genes_in_label = params.get('select_genes_in_label', 'dgidb_w_interaction')
self.use_classification = params.get('use_classification', True)
self.use_binary_dependency = params.get('use_binary_dependency', True)
self.use_class_weights = params.get('use_class_weights', True)
self.use_normalized_class_weights = params.get('use_normalized_class_weights', False)
self.use_sample_class_weights = params.get('use_sample_class_weights', False)
self.use_normalized_sample_class_weights = params.get('use_normalized_sample_class_weights', True)
self.use_all_dependency_gene = params.get('use_all_dependency_gene', True)
self.use_all_feature_for_random_group = params.get('use_all_feature_for_random_group', False)
self.use_all_feature_for_fully_net = params.get('use_all_feature_for_fully_net', False)
self.use_deletion_vector = params.get('use_deletion_vector', True)
self.use_consistant_groups_for_labels = params.get('use_consistant_groups_for_labels', False)
self.run_mode = params.get('run_mode',
'ref') # Could be ref, random_predictor, random, expression_control or full
self.random_group_permutation_ratio = params.get('random_group_permutation_ratio', 1)
self.random_group_hierarchy_permutation_ratio = params.get('random_group_hierarchy_permutation_ratio', 1)
self.random_group_permutation_seed = params.get('random_group_permutation_seed', 9527)
self.leaf_group_gene_in_label_max = params.get('leaf_group_gene_in_label_max', 50)
self.split_by_cancer_type = params.get('split_by_cancer_type', True)
self.save_model_ckpt = params.get('save_model_ckpt', True)
self.output_pred_small = ['RPS20', 'MYC', 'MYCN', 'PIK3CA']
self.GSP_min = params.get('GSP_min', 6)
self.GSN_min = params.get('GSN_min', 6)
self.gene_list = None
self.gene_list_name = None
self.accuracy = None
self.f1 = None
self.confusion_mat = None
self.mcc = None
self.pearson_r = None
self.spearman_rho = None
self.mse = None
self.feature_importance = []
metrics = ['accuracy', 'confusion_mat', 'f1', 'mcc', 'pearson_r', 'spearman_rho', 'mse', 'pearson_r2',
'AUC', 'PR']
data_splits = ['train', 'val', 'test']
for x in metrics:
self.__dict__[x] = ddict(dict)
for z in range(self.repeat_n + 1):
self.__dict__[x][z] = ddict(dict)
for y in data_splits:
self.__dict__[x][z][y] = ddict(list)
for x in ['pred', 'idx']:
self.__dict__[x] = ddict(dict)
for y in data_splits:
self.__dict__[x][y] = ddict(list)
self.metric_output = {}
for y in data_splits:
self.metric_output[y] = pd.DataFrame()
self.save_load_data = ['rna']
self.depmap_ver = depmap_ver
os.makedirs(self.rna_dir, exist_ok=True)
self.save_load_data = ['rna', 'dependency']
self.hdf5_df_file = os.path.join(self.temp_path,
'df_{}_depmap_{}.hdf5'.format('_'.join(sorted(self.data_types)),
self.depmap_ver))
def prepare_data(self):
self.load_communities()
self.load_known_genes()
self.load_selected_gene_list()
if not self.load_data():
self.load_dependency()
if 'rna' in self.data_types:
self.load_rna()
self.save_data()
self.align_data()
def load_selected_gene_list(self):
if isinstance(self.select_genes_in_label, str):
if self.select_genes_in_label.lower() == 'dgidb_w_interaction':
dgidb_file = os.path.join(self.data_dir, 'DGIdb_genes_w_interactions.txt')
else:
raise ValueError("Cannot recongnize select_genes_in_label {}".format(self.select_genes_in_label))
self.select_genes_in_label = pd.read_csv(dgidb_file, header=None)[0].tolist()
elif 'ref_leaf_group' in self.run_mode:
if isinstance(self.select_genes_in_label, list):
leaf_communities, df = self.load_leaf_communities()
initial_select = set(self.select_genes_in_label)
initial_n = len(initial_select)
logging.info("Selected genes {} were used to find additional genes in the same leaf gene groups".format(
self.select_genes_in_label))
leaf_communities_with_genes = {}
for group in leaf_communities:
if len(initial_select.intersection(self.community_dict[group])) > 0:
leaf_communities_with_genes[group] = len(self.community_dict[group])
# Select leaf groups from small to large groups until it reaches the self.leaf_group_gene_in_label_max
for group, size in sorted(leaf_communities_with_genes.items(), key=lambda x: x[1]):
if len(initial_select | set(self.community_dict[group])) < self.leaf_group_gene_in_label_max:
initial_select |= set(self.community_dict[group])
logging.info("{} gene group was added as genes in labels".format(group))
self.select_genes_in_label = sorted(list(initial_select))
logging.info(
"Additional {} genes in the same leaf gene groups with selected genes were added".format(
len(self.select_genes_in_label) - initial_n))
def save_label_genes(self, genes):
"""Save label genes to file."""
fout = open(os.path.join(self.result_path, 'dependency_genes.tsv'), 'w')
for x in genes:
fout.write('{}\n'.format(x))
fout.close()
def save_communities(self, d=None):
"""Save community genes to file."""
if d is None:
fout = open(os.path.join(self.result_path, 'community_list.tsv'), 'w')
d = self.community_dict
s = ''
else:
fout = open(os.path.join(self.result_path, 'community_random_list.tsv'), 'w')
s = '_random'
for k, v in d.items():
fout.write('{}\n'.format('\t'.join([k + s] + v)))
fout.close()
def save_data(self):
hf = pd.HDFStore(self.hdf5_df_file)
for x in self.save_load_data:
if x in self.__dict__:
hf[x] = self.__dict__[x]
hf['data_types'] = pd.DataFrame(self.data_types)
# hf['other_cancer_types'] = pd.DataFrame(self.other_cancer_types)
# hf['cancer_type'] = pd.DataFrame([self.cancer_type])
# for ct in set(self.cancer_type_to_patients.keys()) | set(self.other_cancer_types) | set([self.cancer_type]):
# hf[ct] = pd.DataFrame(self.cancer_type_to_patients[ct])
# if 'cancer_type_to_patients_target' in self.__dict__:
# for ct in set(self.cancer_type_to_patients_target.keys()) | set(self.other_cancer_types) | set(
# [self.cancer_type]):
# hf[ct + '_target'] = pd.DataFrame(self.cancer_type_to_patients_target[ct])
hf.close()
def load_data(self):
if os.path.isfile(self.hdf5_df_file):
hf = pd.HDFStore(self.hdf5_df_file)
try:
for x in self.save_load_data:
if x in hf:
self.__dict__[x] = hf[x]
self.__dict__[x + '_all'] = self.__dict__[x].copy()
logging.info("Loaded data from existing hdf5 file.")
hf.close()
return True
except:
logging.info(
"Current Data types, Cancer type or Other cancer types do not match that of existing hdf5 file.")
hf.close()
return False
else:
return False
def load_communities(self, load_original=True):
"""Parses out a geneset from file."""
if self.load_result and not load_original:
lines = open('{}/community_list.tsv'.format(self.load_result_dir)).readlines()
ind_key = 0
ind_gene = 1
else:
lines = open('{}'.format(self.community_file)).readlines()
if 'pathway' in self.community_file.lower():
ind_key = 1
ind_gene = 3
elif self.community_file.lower().endswith('.gmt'):
ind_key = 1
ind_gene = 3
else:
ind_key = 0
ind_gene = 1
self.community_genes = set()
self.community_dict = {}
self.gene_community_dict = ddict(list)
self.community_size_dict = {}
for line in lines:
line = line.strip().split('\t')
self.community_dict[line[ind_key]] = line[ind_gene:]
self.community_size_dict[line[ind_key]] = len(line[ind_gene:])
self.community_genes |= set(line[ind_gene:])
for g in line[ind_gene:]:
self.gene_community_dict[g].append(line[ind_key])
def load_random_communities(self, load_original=True):
"""Parses out a geneset from file."""
lines = open('{}/community_random_list.tsv'.format(self.load_result_dir)).readlines()
ind_key = 0
ind_gene = 1
self.random_community_genes = set()
self.community_dict_random = {}
self.random_community_size_dict = {}
for line in lines:
line = line.strip().split('\t')
group = line[ind_key].split('_')[0]
self.community_dict_random[group] = line[ind_gene:]
self.random_community_size_dict[group] = len(line[ind_gene:])
self.random_community_genes |= set(line[ind_gene:])
def load_leaf_communities(self):
f = self.community_hierarchy_file
# The first column 0 is the parent and the second column 1 is the child
df = pd.read_csv(f, sep='\t', header=None)
if 'Reactome' in f:
df = df.loc[df[0].str.contains('HSA')] # Get human-only pathways
# Make root as the parent of those gene groups without parents
df_root = pd.DataFrame(columns=df.columns)
for x in set(df[0]) - set(df[1]):
if x in self.community_dict or 'GO:' in x:
df_root = pd.concat([df_root, pd.DataFrame(['root', x]).T])
# Remove those relationship of groups not in the analysis
df = df.loc[df[1].isin(self.community_dict.keys()) & df[0].isin(self.community_dict.keys())]
df = pd.concat([df, df_root])
leaf_communities = sorted(list((set(df[1]) - set(df[0])) & set(self.community_dict.keys())))
return leaf_communities, df
def load_random_hierarchy(self):
f = '{}/random_group_hierarchy.tsv'.format(self.load_result_dir)
df = pd.read_csv(f, sep='\t', header=None)
return df
def load_known_genes(self, depmap_ver=None):
if depmap_ver is None:
depmap_ver = self.depmap_ver
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
depmap_dir = os.environ.get('DEPMAP_DIR')
if depmap_ver not in depmap_dir:
depmap_dir = regex.sub(depmap_ver, depmap_dir)
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
depmap_cell_line_file = os.path.join(depmap_dir, 'sample_info.csv')
else:
depmap_cell_line_file = os.path.join(depmap_dir, 'DepMap-20{}-celllines.csv'.format(depmap_ver.lower()))
self.cell_line_metadata = pd.read_csv(depmap_cell_line_file)
self.cell_line_metadata = self.cell_line_metadata.set_index('DepMap_ID')
try:
self.cell_line_id_mapping = self.cell_line_metadata['CCLE_Name'].to_dict()
self.cell_line_id_pri_dis = self.cell_line_metadata.set_index('CCLE_Name')
except:
self.cell_line_id_mapping = self.cell_line_metadata['CCLE Name'].to_dict()
self.cell_line_id_pri_dis = self.cell_line_metadata.set_index('CCLE Name')
try:
self.cell_line_id_pri_dis = self.cell_line_id_pri_dis['Primary Disease'].to_dict()
except:
self.cell_line_id_pri_dis = self.cell_line_id_pri_dis['lineage'].to_dict()
try:
self.cell_line_id_sub_dis = self.cell_line_metadata.set_index('CCLE_Name')
except:
self.cell_line_id_sub_dis = self.cell_line_metadata.set_index('CCLE Name')
try:
self.cell_line_id_sub_dis = self.cell_line_id_sub_dis['Subtype Disease'].to_dict()
except:
self.cell_line_id_sub_dis = self.cell_line_id_sub_dis['lineage_subtype'].to_dict()
self.cell_line_id_mapping = ddict(lambda: None, self.cell_line_id_mapping)
self.cell_line_id_pri_dis = ddict(lambda: None, self.cell_line_id_pri_dis)
self.cell_line_id_sub_dis = ddict(lambda: None, self.cell_line_id_sub_dis)
def load_dependency(self, depmap_ver=None, dep_data_type='Dependency'):
depmap_genetic_vulnerabilities_dir = os.environ.get('DEPMAP_DIR')
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
if depmap_ver is None:
depmap_ver = self.depmap_ver
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
if depmap_ver not in depmap_genetic_vulnerabilities_dir:
depmap_genetic_vulnerabilities_dir = regex.sub(depmap_ver, depmap_genetic_vulnerabilities_dir)
if dep_data_type == 'CERES':
depmap_file = 'Achilles_gene_effect.csv'
elif dep_data_type == 'Dependency':
depmap_file = 'Achilles_gene_dependency.csv'
self.dependency = pd.read_csv(os.path.join(depmap_genetic_vulnerabilities_dir, depmap_file), header=0,
index_col=0)
self.dependency.columns = [x.split(' (')[0] for x in self.dependency.columns]
self.dependency = self.dependency[sorted(self.dependency.columns)]
# Map cell line id to name
self.dependency.index = [self.cell_line_id_mapping[x] if x in self.cell_line_id_mapping else x for x in
self.dependency.index]
self.dependency = self.dependency.loc[sorted(self.dependency.index)]
self.dependency = self.dependency.fillna(0)
def load_rna(self, depmap_ver=None):
depmap_genomic_characterization_dir = os.environ.get('DEPMAP_DIR')
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
if depmap_ver is None:
depmap_ver = self.depmap_ver
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
if depmap_ver not in depmap_genomic_characterization_dir:
depmap_genomic_characterization_dir = regex.sub(depmap_ver, depmap_genomic_characterization_dir)
depmap_file = 'CCLE_expression.csv'
if '20Q2' in depmap_ver:
sep_str = '\t'
else:
sep_str = ','
self.rna = pd.read_csv(os.path.join(depmap_genomic_characterization_dir, depmap_file), header=0,
index_col=0, sep=sep_str)
self.rna.columns = [x.split(' (')[0] for x in self.rna.columns]
# Merge columns with the same gene symbol
dup_genes = [item for item, count in Counter(self.rna.columns).items() if count > 1]
unique_genes = list(set(self.rna.columns).difference(dup_genes))
RNAseq_gene = self.rna[unique_genes]
for col in set(dup_genes):
RNAseq_gene[col] = self.rna[col].sum(axis=1)
# Map cell line id to name
RNAseq_gene.index = [self.cell_line_id_mapping[x] if x in self.cell_line_id_mapping else x for x in
RNAseq_gene.index]
for cell in set(self.dependency.index).intersection(RNAseq_gene.index):
cell_type = self.cell_line_id_pri_dis[cell]
cell_subtype = self.cell_line_id_sub_dis[cell]
if cell_type in disease_mapping:
if cell not in self.cancer_type_to_patients[disease_mapping[cell_type]]:
self.cancer_type_to_patients[disease_mapping[cell_type]].append(cell)
elif cell_subtype in disease_mapping:
if cell not in self.cancer_type_to_patients[disease_mapping[cell_subtype]]:
self.cancer_type_to_patients[disease_mapping[cell_subtype]].append(cell)
if cell not in self.cancer_type_to_patients[cell_type]:
self.cancer_type_to_patients[cell_type].append(cell)
self.rna = RNAseq_gene
self.rna = self.rna[sorted(self.rna.columns)]
self.rna = self.rna.loc[sorted(self.rna.index)]
self.rna_all = self.rna.copy()
def _subset_samples(self):
# Get overlapping patients among data types
overlapping_patients = set(self.dependency.index)
for x in self.data_types:
# Get patient ID
overlapping_patients &= set(self.__dict__[x].index)
if self.cancer_type == 'PANC':
selected_samples = sorted(list(overlapping_patients))
else:
selected_samples = sorted(list(set(self.cancer_type_to_patients[self.cancer_type])))
overlapping_patients &= set(selected_samples)
overlapping_patients = sorted(list(overlapping_patients))
for x in self.data_types:
self.__dict__[x] = self.__dict__[x].loc[overlapping_patients]
self.dependency = self.dependency.loc[overlapping_patients]
logging.info("Total {} samples have {} and dependency data".format(
len(overlapping_patients), " ".join(self.data_types)))
def _subset_target_genes(self):
try:
self.genes_in_label = pd.read_csv(self.load_result_dir + '/dependency_genes.tsv', sep='\t', header=None)
self.genes_in_label = list(self.genes_in_label.values.T[0])
except:
if self.use_all_dependency_gene:
self.genes_in_label = sorted(list(set(self.community_genes).intersection(self.dependency.columns)))
else:
self.genes_in_label = sorted(list(set(self.genes).intersection(self.dependency.columns)))
if len(self.select_genes_in_label) > 0:
self.genes_in_label = sorted(list(set(self.genes_in_label).intersection(self.select_genes_in_label)))
genes_not_found = set(self.select_genes_in_label).difference(self.genes_in_label)
logging.debug("Genes not found: {}".format(genes_not_found))
if 'Timestamped' not in self.__class__.__name__:
logging.info("{} out of {} selected genes are in dependency data.".format(
len(self.genes_in_label) - len(genes_not_found),
len(self.select_genes_in_label)))
gsp_total = (self.dependency[self.genes_in_label] >= 0.5).sum()
cond = (gsp_total >= self.GSP_min) & (self.dependency.shape[0] - gsp_total >= self.GSN_min)
cond_col = sorted([y for x, y in zip(cond, cond.index) if x])
logging.info("{} genes have at least {} gold standard positives and {} negatives".format(len(cond_col),
self.GSP_min,
self.GSN_min))
self.dependency = self.dependency[cond_col]
self.genes_in_label = cond_col
self.gsp_n = (self.dependency >= 0.5).sum().sum()
self.gsn_n = (self.dependency < 0.5).sum().sum()
if self.use_classification:
logging.info("Positive:negative samples = {}:{}".format(self.gsp_n, self.gsn_n))
def _select_feature_genes(self):
overlapping_genes = set(self.community_genes)
try:
self.rna_mad = pd.read_csv(self.load_result_dir + '/RNA_mad.tsv', sep='\t', index_col=0)
self.rna_mad.columns = [0]
except:
overlapping_genes &= set(self.rna.columns)
self.rna = self.rna[sorted(list(overlapping_genes))]
expressed_genes = ((self.rna >= 1).sum() > (self.rna.shape[0]) * self.exp_ratio_min)
self.rna_mad = self.rna.apply(mad)
self.rna_mad = pd.DataFrame(self.rna_mad, index=self.rna.columns)
self.rna_mad = self.rna_mad.loc[expressed_genes]
self.rna_mad = self.rna_mad.sort_values(by=0, ascending=False)
self.rna_mad.to_csv(os.path.join(self.result_path, 'RNA_mad.tsv'), sep='\t')
top_mad_genes = self.rna_mad.head(min(self.rna_top_n_std, self.rna_mad.shape[0])).index
self.output_pred_small += list(top_mad_genes)[0:20]
self.output_pred_small += list(top_mad_genes)[
int(self.rna_top_n_std / 2 - 10):int(self.rna_top_n_std / 2 + 10)]
self.output_pred_small += list(top_mad_genes)[-20:]
self.rna = self.rna[top_mad_genes]
overlapping_genes &= set(self.rna.columns)
self.rna = self.rna[sorted(list(overlapping_genes))]
logging.info("Total {} genes have top {} mad and gene group data".format(
len(overlapping_genes), self.rna.shape[1]))
def _filter_community(self):
com_to_drop = []
modeled_com_genes = set()
modeled_genes = set()
for data_type in self.data_types:
modeled_genes |= set(self.__dict__[data_type].columns)
for com, members in self.community_dict.items():
if self.use_all_dependency_gene:
self.community_dict[com] = sorted(
list((set(modeled_genes) & set(members)) | (set(members) & set(self.genes_in_label))))
else:
self.community_dict[com] = sorted(list(set(modeled_genes).intersection(members)))
if len(self.community_dict[com]) < self.community_affected_size_min:
com_to_drop.append(com)
elif len(self.community_dict[com]) > self.community_affected_size_max:
com_to_drop.append(com)
elif len(set(members) & set(self.genes_in_label)) < 1:
if self.require_label_gene_in_gene_group:
com_to_drop.append(com)
else:
modeled_com_genes |= set(self.community_dict[com])
else:
modeled_com_genes |= set(self.community_dict[com])
for com in com_to_drop:
self.community_dict.pop(com, None)
def _run_create_filter(self):
self.feature_genes = set()
self.genes_in_label_idx = {}
self.idx_genes_in_label = {}
self.community_filter = self.__create_filter(self.gene_community_dict, self.community_dict,
self.community_size_dict, random=False)
def __create_filter(self, gene_community_dict, community_dict, community_size_dict, random=False):
community_filter = ddict(set)
if not random:
self.genes_in_label_idx = {}
self.idx_genes_in_label = {}
i = 0
for g in self.genes_in_label:
coms = gene_community_dict[g]
coms = list(set(coms) & (community_dict.keys()))
com_size = [community_size_dict[x] for x in coms]
community_filter[g] |= set([g])
for s, com in sorted(zip(com_size, coms)):
genes = set(community_dict[com])
# Choose top n genes so that not too many features were used per gene group
if 'ref' not in self.run_mode and self.use_all_feature_for_random_group:
if len(self.data_types) > 1:
added_genes = set(genes - community_filter[g]) & (set(self.mut.columns) | set(self.rna.columns))
elif 'rna' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.rna.columns)
elif 'mut' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.mut.columns)
if len(added_genes) == 0:
continue
if isinstance(self.feature_per_group_max, int):
choose_n = min(self.feature_per_group_max, len(added_genes))
top_genes = list(np.random.choice(list(added_genes), choose_n, replace=False))
elif isinstance(self.feature_per_group_max, float) and self.feature_per_group_max < 1:
top_n = np.ceil(len(genes) * self.feature_per_group_max)
choose_n = min(top_n, len(added_genes))
top_genes = list(np.random.choice(list(added_genes), choose_n, replace=False))
else:
raise ValueError("feature_per_group_max {} should be integer or between 0 and 1".format(
self.feature_per_group_max))
else:
if len(self.data_types) > 1:
added_genes = set(genes - community_filter[g]) & (set(self.mut.columns) | set(self.rna.columns))
variable_genes = self.rna_mad.loc[list(added_genes)].sort_values(0, ascending=False)
elif 'rna' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.rna.columns)
variable_genes = self.rna_mad.loc[list(added_genes)].sort_values(0, ascending=False)
elif 'mut' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.mut.columns)
variable_genes = self.mut_freq.loc[list(added_genes)].sort_values(0, ascending=False)
if isinstance(self.feature_per_group_max, int):
top_genes = variable_genes.head(self.feature_per_group_max).index
elif isinstance(self.feature_per_group_max, float) and self.feature_per_group_max < 1:
top_n = np.ceil(len(genes) * self.feature_per_group_max)
top_genes = variable_genes.head(top_n).index
else:
raise ValueError("feature_per_group_max {} should be integer or between 0 and 1".format(
self.feature_per_group_max))
community_filter[g] |= set(top_genes)
if len(community_filter[g]) >= self.feature_max:
break
if not random:
if len(community_filter[g]) > 0:
self.genes_in_label_idx[g] = i
self.idx_genes_in_label[i] = g
i += 1
else:
logging.info("Gene {} could not find feature genes".format(g))
if not random:
logging.info(
"The dependency of total {} genes will be predicted".format(len(self.genes_in_label_idx.keys())))
return community_filter
def _build_hierarchy(self):
leaf_communities, df = self.load_leaf_communities()
child = leaf_communities
# The layer having only gene children
level = 1
self.community_level_dict = dict()
self.level_community_dict = dict()
count_dict = ddict(int)
for x in child:
self.community_level_dict[x] = level
count_dict[x] += 1
self.level_community_dict[level] = child
# logging.info("Layer {} has {} gene groups".format(level, len(child)))
while 1:
df_level = df.loc[df[1].isin(child)]
if df_level.shape[0] == 0:
break
level += 1
parent = sorted(list(set(df_level[0])))
for parent_group in parent:
self.community_level_dict[parent_group] = level
count_dict[parent_group] += 1
self.level_community_dict[level] = parent
child = parent
# Make the layer number of each community unique
self.level_community_dict = ddict(list)
for g, level in self.community_level_dict.items():
self.level_community_dict[level].append(g)
for level, groups in sorted(self.level_community_dict.items()):
logging.info("Layer {} has {} gene groups".format(level, len(groups)))
gene_groups_all = sorted(list(self.community_dict.keys())) + ['root']
logging.info(
"Total {} layers of {} gene groups in the hierarchy including the root".format(level, len(gene_groups_all)))
feature_genes_all = []
self.feature_n = []
np.random.RandomState(self.params['seeds'][0])
for data_type in self.data_types:
feat_n = len(self.__dict__[data_type].columns)
self.feature_n.append(feat_n)
# Randomly reselect features for each feature matrix
if 'full' in self.run_mode and self.use_all_feature_for_fully_net:
feat_pool = sorted(list(self.__dict__[data_type + '_all'].columns))
feature_genes_all += feat_pool
cell_idx = self.__dict__[data_type].index
self.__dict__[data_type] = self.__dict__[data_type + '_all'].loc[cell_idx, feat_pool]
logging.info(
"Use all {} genes from {} as features to form fully connected networks".format(feat_n, data_type))
elif 'ref' not in self.run_mode and self.use_all_feature_for_random_group:
feat_pool = list(self.__dict__[data_type + '_all'].columns)
# Require gene labels in the features
pre_select = set(feat_pool) & set(self.genes_in_label)
feat_pool = sorted(list(set(feat_pool) - set(self.genes_in_label)))
random_feat = sorted(list(np.random.choice(feat_pool, feat_n - len(pre_select), replace=False)))
feature_genes_all += random_feat + list(pre_select)
feature_genes_all = sorted(feature_genes_all)
cell_idx = self.__dict__[data_type].index
self.__dict__[data_type] = self.__dict__[data_type + '_all'].loc[cell_idx, random_feat]
logging.info(
"Randomly select {} genes including {} gene of prediction from {} as features to form random gene groups".format(
feat_n, len(self.genes_in_label), data_type))
else:
feature_genes_all += sorted(list(self.__dict__[data_type].columns))
del_genes_all = sorted(list(self.genes_in_label_idx.keys()))
self.feature_n.append(len(del_genes_all))
self.genes_in_label = del_genes_all
self.save_label_genes(self.genes_in_label)
self.y = self.dependency[self.genes_in_label]
self.y_binary = ((self.y >= 0.5) + 0).astype(int)
# The order of indexed genes and gen groups:
if self.use_deletion_vector:
entity_all = feature_genes_all + del_genes_all + gene_groups_all
else:
entity_all = feature_genes_all + gene_groups_all
self.idx_name = {i: k for i, k in enumerate(entity_all)}
name_idx = ddict(list)
for k, v in self.idx_name.items():
name_idx[v].append(k)
if len(self.data_types) > 1:
self.mut_genes_idx = {}
self.rna_genes_idx = {}
for k, v in name_idx.items():
for idx in v:
if idx < self.feature_n[0]:
self.mut_genes_idx[k] = idx
elif self.feature_n[0] <= idx < self.feature_n[0] + self.feature_n[1]:
self.rna_genes_idx[k] = idx
self.feature_genes_idx = {x: min(name_idx[x]) for x in feature_genes_all}
self.del_genes_idx = {x: max(name_idx[x]) for x in del_genes_all}
self.gene_group_idx = {x: name_idx[x][0] for x in gene_groups_all}
self.community_hierarchy_dicts_all = {'idx_name': self.idx_name,
'feature_genes_idx': self.feature_genes_idx,
'del_genes_idx': self.del_genes_idx,
'gene_group_idx': self.gene_group_idx}
self.child_map_all = []
self.child_map_all_random = []
self.child_map_all_ones = []
feature_only_genes = set(feature_genes_all) - set(del_genes_all)
dep_only_genes = set(del_genes_all) - set(feature_genes_all)
feature_dep_both_genes = set(feature_genes_all) & set(del_genes_all)
gene_pool = sorted(list(set(feature_genes_all) | set(del_genes_all)))
self.community_filter_random = ddict(list)
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
self.load_random_communities()
random_hierarchy = self.load_random_hierarchy()
else:
self.community_dict_random = {}
random_hierarchy = pd.DataFrame()
self.gene_community_dict_random = ddict(list)
self.community_size_dict_random = {}
prng = np.random.RandomState(self.params['seeds'][0])
logging.info("Building gene group hierarchy")
if self.run_mode == 'random':
idx_gene_pool = {i: g for i, g in enumerate(gene_pool)}
gene_pool_idx = {g: i for i, g in enumerate(gene_pool)}
partially_shuffled_membership = self.__partially_shuffle_gene_group(gene_pool, gene_pool_idx)
idx_gene_group = {i: g for g, i in self.gene_group_idx.items()}
partially_shuffled_relation = self.__partially_shuffle_gene_group_hierarchy(df, idx_gene_group)
else:
partially_shuffled_membership = None
partially_shuffled_relation = None
idx_gene_group = None
idx_gene_pool = None
min_group_idx = min(self.gene_group_idx.values())
for group, idx in sorted(self.gene_group_idx.items()):
if group in self.community_dict:
genes = self.community_dict[group]
gene_idx = self._genes_to_feat_del_idx(genes)
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
genes_random = self.community_dict_random[group]
else:
if partially_shuffled_membership is not None:
genes_random_idx = partially_shuffled_membership[idx - min_group_idx].nonzero()[0]
genes_random = sorted([idx_gene_pool[x] for x in genes_random_idx])
else:
if self.use_consistant_groups_for_labels:
gene_pool = sorted(list(set(gene_pool) - set(self.genes_in_label)))
pre_select = set(genes) & set(self.genes_in_label)
if len(set(genes) & set(self.genes_in_label)) > 0:
random_feat = list(prng.choice(gene_pool, len(genes) - len(pre_select), replace=False))
genes_random = sorted(random_feat + list(pre_select))
else:
genes_random = sorted(
list(prng.choice(gene_pool, len(genes) - len(pre_select), replace=False)))
else:
genes_random = sorted(list(prng.choice(gene_pool, len(genes), replace=False)))
self.community_dict_random[group] = genes_random
for g in genes_random:
self.gene_community_dict_random[g].append(group)
self.community_size_dict_random[group] = len(genes_random)
feat_genes = set(genes_random) & set(self.feature_genes_idx.keys())
del_genes = set(genes_random) & set(self.del_genes_idx.keys())
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
else:
feat_gene_idx = [self.feature_genes_idx[x] for x in feat_genes]
if self.use_deletion_vector:
del_gene_idx = [self.del_genes_idx[x] for x in del_genes]
else:
del_gene_idx = []
gene_idx_random = feat_gene_idx + del_gene_idx
else:
gene_idx = []
gene_idx_random = []
child = sorted(df.loc[df[0] == group, 1].tolist())
child_idx = sorted([self.gene_group_idx[x] for x in child if x in self.gene_group_idx])
self.child_map_all.append(sorted(gene_idx + child_idx))
if len(self.child_map_all[-1]) == 0:
logging.info("Gene group {} does not have children".format(group))
# Build random group hierarchy
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
child_random = sorted(random_hierarchy.loc[random_hierarchy[0] == group, 1].tolist())
child_idx_random = sorted([self.gene_group_idx[x] for x in child_random if x in self.gene_group_idx])
else:
if partially_shuffled_relation is not None:
child_idx_random = partially_shuffled_relation[idx - min_group_idx, :].nonzero()[0]
child_idx_random = [x + min_group_idx for x in child_idx_random]
child_random = sorted([idx_gene_group[x] for x in child_idx_random])
else:
child_idx_random = []
child_random = []
for c in child:
child_level = self.community_level_dict[c]
random_child = prng.choice(self.level_community_dict[child_level], 1, replace=False)[0]
child_random.append(random_child)
random_c_idx = self.gene_group_idx[random_child]
child_idx_random.append(random_c_idx)
for rc in sorted(child_random):
random_hierarchy = pd.concat([random_hierarchy, pd.DataFrame([group, rc]).T], axis=0)
self.child_map_all_random.append(sorted(gene_idx_random + child_idx_random))
try:
assert len(gene_idx) == len(gene_idx_random), "Random gene number does not match"
except AssertionError:
pass
# Children for fully connected neural networks
if group in leaf_communities:
gene_idx_ones = list(self.feature_genes_idx.values())
else:
gene_idx_ones = []
parent_level = self.community_level_dict[group]
child_level = parent_level - 1
if child_level in self.level_community_dict:
child_ones = self.level_community_dict[child_level]
else:
child_ones = []
child_idx_ones = [self.gene_group_idx[x] for x in child_ones if x in self.gene_group_idx]
self.child_map_all_ones.append(sorted(gene_idx_ones + child_idx_ones))
self.save_communities(self.community_dict_random)
# Save random hierarchy as file
random_hierarchy.to_csv(os.path.join(self.result_path, 'random_group_hierarchy.tsv'),
index=None, sep='\t', header=None)
self.community_filter_random = self.__create_filter(self.gene_community_dict_random, self.community_dict_random,
self.community_size_dict_random, random=True)
self.community_filter_map = []
self.community_filter_map_random = []
feature_n = len(feature_genes_all)
for g in del_genes_all:
feat_genes = set(self.community_filter[g])
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
feat_gene_idx = sorted(feat_gene_idx)
else:
feat_gene_idx = sorted([self.feature_genes_idx[x] for x in feat_genes if x in self.feature_genes_idx])
feat_genes_array = np.zeros(feature_n)
feat_genes_array[feat_gene_idx] = 1
self.community_filter_map.append(feat_genes_array)
feat_genes_random = set(self.community_filter_random[g])
if len(self.data_types) > 1:
feat_genes_random_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_genes_random_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_genes_random_idx.append(self.rna_genes_idx[g])
feat_genes_random_idx = sorted(feat_genes_random_idx)
else:
feat_genes_random_idx = sorted(
[self.feature_genes_idx[x] for x in feat_genes_random if x in self.feature_genes_idx])
feat_genes_array = np.zeros(feature_n)
feat_genes_array[feat_genes_random_idx] = 1
self.community_filter_map_random.append(feat_genes_array)
def __partially_shuffle_gene_group(self, gene_pool, gene_pool_idx):
group_gene_membership_matrix = np.zeros([len(self.gene_group_idx), len(gene_pool)])
min_group_idx = min(self.gene_group_idx.values())
for group, idx in sorted(self.gene_group_idx.items()):
if group in self.community_dict:
idx -= min_group_idx
genes = self.community_dict[group]
gene_idx = [gene_pool_idx[gene] for gene in genes]
group_gene_membership_matrix[idx, gene_idx] = 1
all_idx = group_gene_membership_matrix.nonzero()
prng = | np.random.RandomState(self.random_group_permutation_seed) | numpy.random.RandomState |
"""
Author: <NAME>
Created in: September 19, 2019
Python version: 3.6
"""
from Least_SRMTL import Least_SRMTL
import libmr
from matplotlib import pyplot, cm
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d import Axes3D, art3d
import numpy as np
import numpy.matlib
import sklearn.metrics
class EVeC(object):
"""
Extreme Value evolving Classifier
Ruled-based classifier with EVM at the definition of the antecedent of the rules.
1. Create a new instance and provide the model parameters;
2. Call the predict(x) method to make predictions based on the given input;
3. Call the train(x, y) method to evolve the model based on the new input-output pair.
"""
# version
NO_REGRESSION = 0
LS = 1
LEAST_SRMTL = 2
LOGISTIC_SRMTL = 3
# Model initialization
def __init__(self, L, sigma=0.5, delta=50, N=np.Inf, version=0, rho=None):
# Setting EVeC algorithm parameters
self.L = L
self.sigma = sigma
self.tau = 99999
self.delta = delta
self.N = N
self.version = version
self.rho = rho
self.mr_x = list()
self.x0 = list()
self.X = list()
self.step = list()
self.last_update = list()
self.c = 0
# Version that trains the consequent using linear regression
if version == EVeC.NO_REGRESSION:
self.label = list()
else:
self.y = list()
self.theta = list()
if self.rho is not None:
self.init_theta = 2
self.srmtl = Least_SRMTL(rho)
self.R = None
# Initialization of a new instance of rule.
def add_rule(self, x0, step, label=None, y0=None):
self.mr_x.append(libmr.MR())
self.x0.append(x0)
self.X.append(x0)
self.step.append(step)
self.last_update.append(np.max(step))
self.c = self.c + 1
if self.version == 0:
self.label.append(label)
else:
self.y.append(y0.reshape(1, -1))
if self.rho is None:
self.theta.append(np.linalg.lstsq(np.insert(self.X[-1], 0, 1, axis=1), self.y[-1], rcond=None)[0])
else:
self.theta.append(np.zeros((x0.shape[0], x0.shape[1] + 1)))
self.init_theta = 2
# Add the sample(s) (X, label) as covered by the extreme vector. Remove repeated points.
def add_sample_to_rule(self, index, X, step, y=None):
self.X[index] = np.concatenate((self.X[index], X))
self.step[index] = np.concatenate((self.step[index], step))
if self.version != 0:
self.y[index] = np.concatenate((self.y[index], y))
if self.X[index].shape[0] > self.N:
indexes = np.argsort(-self.step[index].reshape(-1))
self.X[index] = self.X[index][indexes[: self.N], :]
self.step[index] = self.step[index][indexes[: self.N]]
if self.version != 0:
self.y[index] = self.y[index][indexes[: self.N]]
self.x0[index] = np.average(self.X[index], axis=0).reshape(1, -1)
self.last_update[index] = np.max(self.step[index])
if self.version != 0 and self.rho is None:
self.theta[index] = np.linalg.lstsq(np.insert(self.X[index], 0, 1, axis=1), self.y[index], rcond=None)[0]
def delete_from_list(self, list_, indexes):
for i in sorted(indexes, reverse=True):
del list_[i]
return list_
# Calculate the firing degree of the sample to the psi curve
def firing_degree(self, index, x):
return self.mr_x[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.x0[index], x).reshape(-1))
# Fit the psi curve of the EVs according to the external samples
def fit(self, index, X_ext):
self.fit_x(index, sklearn.metrics.pairwise.pairwise_distances(self.x0[index], X_ext)[0])
# Fit the psi curve to the extreme values with distance D to the center of the EV
def fit_x(self, index, D):
self.mr_x[index].fit_low(1/2 * D, min(D.shape[0], self.tau))
# Get the distance from the origin of the input rule which has the given probability to belong to the curve
def get_distance_input(self, percentage, index=None):
if index is None:
return [self.mr_x[i].inv(percentage) for i in range(self.c)]
else:
return self.mr_x[index].inv(percentage)
# For version zero, obtain the samples that do not belong to the given rule and have the same label; for version one, return all the samples that do not belong to the given rule
def get_external_samples(self, index=None):
if index is None:
return np.concatenate(self.X)
else:
if self.version == 0:
indexes = [i for i in range(len(self.label)) if self.label[i] == self.label[index] and i != index]
if len(indexes) > 0:
return np.concatenate(list(map(self.X.__getitem__, indexes)))
if self.c > 1:
# if there is no other rule besides the current rule with the same label, return the remaining rules regardless the label content
return np.concatenate(self.X[:index] + self.X[index + 1 :])
return np.array([])
# Merge two rules of different clusters whenever the origin of one is inside the sigma probability of inclusion of the psi curve of the other
def merge(self):
self.sort_rules()
index = 0
while index < self.c:
if index + 1 < self.c:
x0 = np.concatenate(self.x0[index + 1 : ])
S_index = self.firing_degree(index, x0)
index_to_merge = np.where(S_index > self.sigma)[0] + index + 1
if index_to_merge.size > 0:
self.init_theta = 2
for i in reversed(range(len(index_to_merge))):
if self.version != 0:
self.add_sample_to_rule(index, self.X[index_to_merge[i]], self.step[index_to_merge[i]], self.y[index_to_merge[i]])
self.remove_rule([index_to_merge[i]])
elif self.label[index] == self.label[index_to_merge[i]]:
self.add_sample_to_rule(index, self.X[index_to_merge[i]], self.step[index_to_merge[i]])
self.remove_rule([index_to_merge[i]])
index = index + 1
# Plot the granules that form the antecedent part of the rules
def plot(self, name_figure_input, name_figure_output, step):
# Input fuzzy granules plot
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
ax.axes.set_xlim3d(left=-2, right=2)
ax.axes.set_ylim3d(bottom=-2, top=2)
z_bottom = -0.3
ax.set_zticklabels("")
colors = cm.get_cmap('Dark2', self.c)
for i in range(self.c):
self.plot_rule_input(i, ax, '.', colors(i), z_bottom)
# Plot axis' labels
ax.set_xlabel('u(t)', fontsize=15)
ax.set_ylabel('y(t)', fontsize=15)
ax.set_zlabel('$\mu_x$', fontsize=15)
# Save figure
fig.savefig(name_figure_input)
# Close plot
pyplot.close(fig)
# Plot the probability of sample inclusion (psi-model) together with the samples associated with the rule for the input fuzzy granules
def plot_rule_input(self, index, ax, marker, color, z_bottom):
# Plot the input samples in the XY plan
ax.scatter(self.X[index][:, 0], self.X[index][:, 1], z_bottom * | np.ones((self.X[index].shape[0], 1)) | numpy.ones |
import cv2
import sys, os, glob, re
import json
from os.path import join, dirname, abspath, realpath, isdir
from os import makedirs
import numpy as np
from shutil import rmtree
from ipdb import set_trace
from .bench_utils.bbox_helper import rect_2_cxy_wh, cxy_wh_2_rect
def center_error(rects1, rects2):
"""Center error.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
return errors
def _intersection(rects1, rects2):
r"""Rectangle intersection.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
assert rects1.shape == rects2.shape
x1 = np.maximum(rects1[..., 0], rects2[..., 0])
y1 = np.maximum(rects1[..., 1], rects2[..., 1])
x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
rects2[..., 0] + rects2[..., 2])
y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
rects2[..., 1] + rects2[..., 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack([x1, y1, w, h]).T
def rect_iou(rects1, rects2, bound=None):
r"""Intersection over union.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
bound (numpy.ndarray): A 4 dimensional array, denotes the bound
(min_left, min_top, max_width, max_height) for ``rects1`` and ``rects2``.
"""
assert rects1.shape == rects2.shape
if bound is not None:
# bounded rects1
rects1[:, 0] = np.clip(rects1[:, 0], 0, bound[0])
rects1[:, 1] = np.clip(rects1[:, 1], 0, bound[1])
rects1[:, 2] = np.clip(rects1[:, 2], 0, bound[0] - rects1[:, 0])
rects1[:, 3] = np.clip(rects1[:, 3], 0, bound[1] - rects1[:, 1])
# bounded rects2
rects2[:, 0] = np.clip(rects2[:, 0], 0, bound[0])
rects2[:, 1] = np.clip(rects2[:, 1], 0, bound[1])
rects2[:, 2] = np.clip(rects2[:, 2], 0, bound[0] - rects2[:, 0])
rects2[:, 3] = np.clip(rects2[:, 3], 0, bound[1] - rects2[:, 1])
rects_inter = _intersection(rects1, rects2)
areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
areas1 = np.prod(rects1[..., 2:], axis=-1)
areas2 = np.prod(rects2[..., 2:], axis=-1)
areas_union = areas1 + areas2 - areas_inter
eps = np.finfo(float).eps
ious = areas_inter / (areas_union + eps)
ious = np.clip(ious, 0.0, 1.0)
return ious
def overlap_ratio(rect1, rect2):
'''
Compute overlap ratio between two rects
- rect: 1d array of [x,y,w,h] or
2d array of N x [x,y,w,h]
'''
if rect1.ndim==1:
rect1 = rect1[None,:]
if rect2.ndim==1:
rect2 = rect2[None,:]
left = np.maximum(rect1[:,0], rect2[:,0])
right = np.minimum(rect1[:,0]+rect1[:,2], rect2[:,0]+rect2[:,2])
top = np.maximum(rect1[:,1], rect2[:,1])
bottom = np.minimum(rect1[:,1]+rect1[:,3], rect2[:,1]+rect2[:,3])
intersect = np.maximum(0,right - left) * np.maximum(0,bottom - top)
union = rect1[:,2]*rect1[:,3] + rect2[:,2]*rect2[:,3] - intersect
iou = np.clip(intersect / union, 0, 1)
return iou
def calc_curves(ious, center_errors, nbins_iou, nbins_ce):
ious = np.asarray(ious, float)[:, np.newaxis]
center_errors = np.asarray(center_errors, float)[:, np.newaxis]
thr_iou = np.linspace(0, 1, nbins_iou)[np.newaxis, :]
thr_ce = np.arange(0, nbins_ce)[np.newaxis, :]
bin_iou = np.greater(ious, thr_iou)
bin_ce = np.less_equal(center_errors, thr_ce)
succ_curve = np.mean(bin_iou, axis=0)
prec_curve = | np.mean(bin_ce, axis=0) | numpy.mean |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of DDEC charges based on data parsed by cclib."""
import copy
import random
import numpy
import logging
import math
import os
import sys
from cclib.method.calculationmethod import Method
from cclib.method.volume import electrondensity_spin
from cclib.parser.utils import convertor
from cclib.parser.utils import find_package
from typing import List
class MissingInputError(Exception):
pass
class DDEC6(Method):
"""DDEC6 charges."""
# All of these are required for DDEC6 charges.
required_attrs = ("homos", "mocoeffs", "nbasis", "gbasis")
def __init__(
self, data, volume, proatom_path=None, progress=None, loglevel=logging.INFO, logname="Log"
):
# Inputs are:
# data -- ccData object that describe target molecule.
# volume -- Volume object that describe target Cartesian grid.
# proatom_path -- path to proatom densities
# (directory containing atoms.h5 in horton or c2_001_001_000_400_075.txt in chargemol)
super(DDEC6, self).__init__(data, progress, loglevel, logname)
self.volume = volume
self.fragresults = None
self.proatom_path = proatom_path
if numpy.sum(self.data.coreelectrons) != 0:
# TODO: Pseudopotentials should be added back
pass
# Check whether proatom_path is a valid directory or not.
assert os.path.isdir(
proatom_path
), "Directory that contains proatom densities should be added as an input."
# Read in reference charges.
self.proatom_density = []
self.radial_grid_r = []
for atom_number in self.data.atomnos:
density, r = self._read_proatom(proatom_path, atom_number, 0)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
def __str__(self):
"""Return a string representation of the object."""
return "DDEC6 charges of {}".format(self.data)
def __repr__(self):
"""Return a representation of the object."""
return "DDEC6({})".format(self.data)
def _check_required_attributes(self):
super(DDEC6, self)._check_required_attributes()
def _cartesian_dist(self, pt1, pt2):
""" Small utility function that calculates Euclidian distance between two points
pt1 and pt2 are numpy arrays representing a point in Cartesian coordinates. """
return numpy.sqrt(numpy.dot(pt1 - pt2, pt1 - pt2))
def _read_proatom(
self, directory, atom_num, charge # type = str # type = int # type = float
):
# type: (...) -> numpy.ndarray, numpy.ndarray
"""Return a list containing proatom reference densities."""
# TODO: Treat calculations with psuedopotentials
# TODO: Modify so that proatom densities are read only once for horton
# [https://github.com/cclib/cclib/pull/914#discussion_r464039991]
# File name format:
# ** Chargemol **
# c2_[atom number]_[nuclear charge]_[electron count]_[cutoff radius]_[# shells]
# ** Horton **
# atoms.h5
# File format:
# Starting from line 13, each line contains the charge densities for each shell
# If `charge` is not an integer, proatom densities have to be linearly interpolated between
# the densities of the ion/atom with floor(charge) and ceiling(charge)
charge_floor = int(math.floor(charge))
charge_ceil = int(math.ceil(charge))
chargemol_path_floor = os.path.join(
directory,
"c2_{:03d}_{:03d}_{:03d}_500_100.txt".format(
atom_num, atom_num, atom_num - charge_floor
),
)
chargemol_path_ceil = os.path.join(
directory,
"c2_{:03d}_{:03d}_{:03d}_500_100.txt".format(
atom_num, atom_num, atom_num - charge_ceil
),
)
horton_path = os.path.join(directory, "atoms.h5")
if os.path.isfile(chargemol_path_floor) or os.path.isfile(chargemol_path_ceil):
# Use chargemol proatom densities
# Each shell is .05 angstroms apart (uniform).
# *scalefactor* = 10.58354497764173 bohrs in module_global_parameter.f08
if atom_num <= charge_floor:
density_floor = numpy.array([0])
else:
density_floor = numpy.loadtxt(chargemol_path_floor, skiprows=12, dtype=float)
if atom_num >= charge_ceil:
density_ceil = numpy.array([0])
else:
density_ceil = numpy.loadtxt(chargemol_path_ceil, skiprows=12, dtype=float)
density = (charge_ceil - charge) * density_floor + (
charge - charge_floor
) * density_ceil
radiusgrid = numpy.arange(1, len(density) + 1) * 0.05
elif os.path.isfile(horton_path):
# Use horton proatom densities
assert find_package("h5py"), "h5py is needed to read in proatom densities from horton."
import h5py
with h5py.File(horton_path, "r") as proatomdb:
if atom_num <= charge_floor:
density_floor = numpy.array([0])
radiusgrid = numpy.array([0])
else:
keystring_floor = "Z={}_Q={:+d}".format(atom_num, charge_floor)
density_floor = numpy.asanyarray(list(proatomdb[keystring_floor]["rho"]))
# gridspec is specification of integration grid for proatom densities in horton.
# Example -- ['PowerRTransform', '1.1774580743206259e-07', '20.140888089596444', '41']
# is constructed using PowerRTransform grid
# with rmin = 1.1774580743206259e-07
# rmax = 20.140888089596444
# and ngrid = 41
# PowerRTransform is default in horton-atomdb.py.
gridtype, gridmin, gridmax, gridn = (
proatomdb[keystring_floor].attrs["rtransform"].split()
)
gridmin = convertor(float(gridmin), "bohr", "Angstrom")
gridmax = convertor(float(gridmax), "bohr", "Angstrom")
gridn = int(gridn)
# Convert byte to string in Python3
if sys.version[0] == "3":
gridtype = gridtype.decode("UTF-8")
# First verify that it is one of recognized grids
assert gridtype in [
"LinearRTransform",
"ExpRTransform",
"PowerRTransform",
], "Grid type not recognized."
if gridtype == "LinearRTransform":
# Linear transformation. r(t) = rmin + t*(rmax - rmin)/(npoint - 1)
gridcoeff = (gridmax - gridmin) / (gridn - 1)
radiusgrid = gridmin + numpy.arange(1, gridn + 1) * gridcoeff
elif gridtype == "ExpRTransform":
# Exponential transformation. r(t) = rmin*exp(t*log(rmax/rmin)/(npoint - 1))
gridcoeff = math.log(gridmax / gridmin) / (gridn - 1)
radiusgrid = gridmin * numpy.exp(numpy.arange(1, gridn + 1) * gridcoeff)
elif gridtype == "PowerRTransform":
# Power transformation. r(t) = rmin*t^power
# with power = log(rmax/rmin)/log(npoint)
gridcoeff = math.log(gridmax / gridmin) / math.log(gridn)
radiusgrid = gridmin * numpy.power(numpy.arange(1, gridn + 1), gridcoeff)
if atom_num <= charge_ceil:
density_ceil = numpy.array([0])
else:
keystring_ceil = "Z={}_Q={:+d}".format(atom_num, charge_ceil)
density_ceil = numpy.asanyarray(list(proatomdb[keystring_ceil]["rho"]))
density = (charge_ceil - charge) * density_floor + (
charge - charge_floor
) * density_ceil
del h5py
else:
raise MissingInputError("Pro-atom densities were not found in the specified path.")
if charge == charge_floor:
density = density_floor
return density, radiusgrid
def calculate(self, indices=None, fupdate=0.05):
"""
Calculate DDEC6 charges based on doi: 10.1039/c6ra04656h paper.
Cartesian, uniformly spaced grids are assumed for this function.
"""
# Obtain charge densities on the grid if it does not contain one.
if not numpy.any(self.volume.data):
self.logger.info("Calculating charge densities on the provided empty grid.")
if len(self.data.mocoeffs) == 1:
self.chgdensity = electrondensity_spin(
self.data, self.volume, [self.data.mocoeffs[0][: self.data.homos[0]]]
)
self.chgdensity.data *= 2
else:
self.chgdensity = electrondensity_spin(
self.data,
self.volume,
[
self.data.mocoeffs[0][: self.data.homos[0]],
self.data.mocoeffs[1][: self.data.homos[1]],
],
)
# If charge densities are provided beforehand, log this information
# `Volume` object does not contain (nor rely on) information about the constituent atoms.
else:
self.logger.info("Using charge densities from the provided Volume object.")
self.chgdensity = self.volume
# STEP 1
# Carry out step 1 of DDEC6 algorithm [Determining ion charge value]
# Refer to equations 49-57 in doi: 10.1039/c6ra04656h
self.logger.info("Creating first reference charges.")
ref, loc, stock = self.calculate_refcharges()
self.refcharges = [ref]
self._localizedcharges = [loc]
self._stockholdercharges = [stock]
# STEP 2
# Load new proatom densities.
self.logger.info("Creating second reference charges.")
self.proatom_density = []
self.radial_grid_r = []
for i, atom_number in enumerate(self.data.atomnos):
density, r = self._read_proatom(
self.proatom_path, atom_number, float(self.refcharges[0][i])
)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
# Carry out step 2 of DDEC6 algorithm [Determining ion charge value again]
ref, loc, stock = self.calculate_refcharges()
self.refcharges.append(ref)
self._localizedcharges.append(loc)
self._stockholdercharges.append(stock)
# STEP 3
# Load new proatom densities.
self.proatom_density = []
self.radial_grid_r = []
for i, atom_number in enumerate(self.data.atomnos):
density, r = self._read_proatom(
self.proatom_path, atom_number, float(self.refcharges[1][i])
)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
# Carry out step 3 of DDEC6 algorithm [Determine conditioned charge density and tau]
self.logger.info("Conditioning charge densities.")
self.condition_densities()
def calculate_refcharges(self):
""" Calculate reference charges from proatom density and molecular density
[STEP 1 and 2]
"""
# Generator object to iterate over the grid
xshape, yshape, zshape = self.chgdensity.data.shape
atoms = len(self.data.atomnos)
indices = (
(i, x, y, z)
for i in range(atoms)
for x in range(xshape)
for y in range(yshape)
for z in range(zshape)
)
stockholder_w = numpy.zeros((atoms, xshape, yshape, zshape))
localized_w = numpy.zeros((atoms, xshape, yshape, zshape))
self.closest_r_index = numpy.zeros((atoms, xshape, yshape, zshape), dtype=int)
for atomi, xindex, yindex, zindex in indices:
# Distance of the grid from atom grid
dist_r = self._cartesian_dist(
self.data.atomcoords[-1][atomi],
self.chgdensity.coordinates([xindex, yindex, zindex]),
)
self.closest_r_index[atomi][xindex][yindex][zindex] = numpy.abs(
self.radial_grid_r[atomi] - dist_r
).argmin()
# Equation 54 in doi: 10.1039/c6ra04656h
stockholder_w[atomi][xindex][yindex][zindex] = self.proatom_density[atomi][
self.closest_r_index[atomi][xindex][yindex][zindex]
]
# Equation 55 in doi: 10.1039/c6ra04656h
localized_w = numpy.power(stockholder_w, 4)
# Equation 53 in doi: 10.1039/c6ra04656h
stockholder_bigW = numpy.sum(stockholder_w, axis=0)
localized_bigW = numpy.sum(localized_w, axis=0)
refcharges = numpy.zeros((atoms))
localizedcharges = numpy.zeros((atoms))
stockholdercharges = numpy.zeros((atoms))
for atomi in range(atoms):
# Equation 52 and 51 in doi: 10.1039/c6ra04656h
localizedcharges[atomi] = self.data.atomnos[atomi] - self.chgdensity.integrate(
weights=(localized_w[atomi] / localized_bigW)
)
stockholdercharges[atomi] = self.data.atomnos[atomi] - self.chgdensity.integrate(
weights=(stockholder_w[atomi] / stockholder_bigW)
)
# In DDEC6, weights of 1/3 and 2/3 are assigned for stockholder and localized charges.
# (Equation 50 and 58 in doi: 10.1039/c6ra04656h)
refcharges[atomi] = (stockholdercharges[atomi] / 3.0) + (
localizedcharges[atomi] * 2.0 / 3.0
)
return refcharges, localizedcharges, stockholdercharges
def condition_densities(self):
""" Calculate conditioned densities
[STEP 3]
"""
# Generator object to iterate over the grid
xshape, yshape, zshape = self.chgdensity.data.shape
atoms = len(self.data.atomnos)
indices = (
(i, x, y, z)
for i in range(atoms)
for x in range(xshape)
for y in range(yshape)
for z in range(zshape)
)
self._rho_ref = numpy.zeros((xshape, yshape, zshape))
for atomi, xindex, yindex, zindex in indices:
# rho_ref -- Equation 41 in doi: 10.1039/c6ra04656h
self._rho_ref[xindex][yindex][zindex] += self.proatom_density[atomi][
self.closest_r_index[atomi][xindex][yindex][zindex]
]
self._candidates_bigPhi = []
self._candidates_phi = []
# Initial conditions are detailed in Figure S1 in doi: 10.1039/c6ra04656h
phiAI = numpy.zeros_like(self.data.atomnos, dtype=float)
bigphiAI = numpy.zeros_like(self.data.atomnos, dtype=float)
self._y_a = []
self._cond_density = []
for atomi in range(len(self.data.atomnos)):
# y_a -- equation 40 in doi: 10.1039/c6ra04656h
self._y_a.append(self._ya(self.proatom_density, atomi))
# rho_A^cond -- equation S97 in doi: 10.1039/c6ra04656h
self._cond_density.append(
self._y_a[atomi] + bigphiAI[atomi] * numpy.sqrt(self._y_a[atomi])
)
# Monotonic Decrease Condition (as expressed in equation S99)
self._cond_density[atomi] = numpy.minimum.accumulate(self._cond_density[atomi])
# phi_A^I -- Equation S100 in doi: 10.1039/c6ra04656h
phiAI[atomi] = (
self._integrate_from_radial([self._cond_density[atomi]], [atomi])
- self.data.atomnos[atomi]
+ self.refcharges[-1][atomi]
)
self._candidates_bigPhi.append([bigphiAI[atomi]])
self._candidates_phi.append([phiAI[atomi]])
fitphi = True # when convergence is reached, this is modified to False.
while phiAI[atomi] <= 0:
# Iterative algorithm until convergence
# Refer to S101 in doi: 10.1039/c6ra04656h
bigphiAI[atomi] = 2 * bigphiAI[atomi] - phiAI[atomi] / self._integrate_from_radial(
[numpy.sqrt(self._y_a[atomi])], [atomi]
)
# When Phi is updated, related quantities are updated as well
# Refer to S100 in doi: 10.1039/c6ra04656h
phiAI[atomi], self._cond_density[atomi] = self._phiai(
self._y_a[atomi], bigphiAI[atomi], atomi
)
self._candidates_phi[atomi].append(phiAI[atomi])
self._candidates_bigPhi[atomi].append(bigphiAI[atomi])
# lowerbigPhi is largest negative Phi.
# upperbigPhi is smallest positive Phi.
if fitphi:
self._candidates_phi[atomi] = numpy.array(self._candidates_phi[atomi], dtype=float)
self._candidates_bigPhi[atomi] = numpy.array(
self._candidates_bigPhi[atomi], dtype=float
)
if numpy.count_nonzero(self._candidates_phi[atomi] < 0) > 0:
lower_ind = numpy.where(
self._candidates_phi[atomi]
== self._candidates_phi[atomi][self._candidates_phi[atomi] < 0].max()
)[0][0]
lowerbigPhi = self._candidates_bigPhi[atomi][lower_ind]
lowerphi = self._candidates_phi[atomi][lower_ind]
else: # assign some large negative number
lowerbigPhi = numpy.NINF
lowerphi = numpy.NINF
if numpy.count_nonzero(self._candidates_phi[atomi] > 0) > 0:
upper_ind = numpy.where(
self._candidates_phi[atomi]
== self._candidates_phi[atomi][self._candidates_phi[atomi] > 0].min()
)[0][0]
upperbigPhi = self._candidates_bigPhi[atomi][upper_ind]
upperphi = self._candidates_phi[atomi][upper_ind]
else: # assign some large positive number
upperbigPhi = numpy.PINF
upperphi = numpy.PINF
iter = 0
while fitphi and iter < 50:
# Flow diagram on Figure S1 in doi: 10.1039/c6ra04656h details the procedure.
iter = iter + 1
midbigPhi = (lowerbigPhi + upperbigPhi) / 2.0
midphi = self._phiai(self._y_a[atomi], midbigPhi, atomi)[0]
# Exit conditions
if abs(lowerphi) < 1e-10:
bigphiAI[atomi] = lowerbigPhi
fitphi = False
elif abs(upperphi) < 1e-10:
bigphiAI[atomi] = upperbigPhi
fitphi = False
elif abs(midphi) < 1e-10:
bigphiAI[atomi] = midbigPhi
fitphi = False
else:
# Parabolic fitting as described on Figure S1 in doi: 10.1039/c6ra04656h
# Type casting here converts from size 1 numpy.ndarray to float
xpts = numpy.array(
[float(lowerbigPhi), float(midbigPhi), float(upperbigPhi)], dtype=float
)
ypts = numpy.array(
[float(lowerphi), float(midphi), float(upperphi)], dtype=float
)
fit = numpy.polyfit(xpts, ypts, 2)
roots = numpy.roots(fit) # max two roots (bigPhi)
belowphi = self._phiai(self._y_a[atomi], roots.min(), atomi)[0]
abovephi = self._phiai(self._y_a[atomi], roots.max(), atomi)[0]
if abs(abovephi) < 1e-10:
bigphiAI[atomi] = roots.min()
fitphi = False
elif abs(belowphi) < 1e-10:
bigphiAI[atomi] = roots.max()
fitphi = False
else:
if 3 * abs(abovephi) < abs(belowphi):
corbigPhi = roots.max() - 2.0 * abovephi * (
roots.max() - roots.min()
) / (abovephi - belowphi)
elif 3 * abs(belowphi) < abs(abovephi):
corbigPhi = roots.min() - 2.0 * belowphi * (
roots.max() - roots.min()
) / (abovephi - belowphi)
else:
corbigPhi = (roots.max() + roots.min()) / 2.0
# New candidates
corphi = self._phiai(self._y_a[atomi], corbigPhi, atomi)[0]
self._candidates_bigPhi[atomi] = numpy.array(
[
lowerbigPhi,
midbigPhi,
upperbigPhi,
roots.max(),
roots.min(),
corbigPhi,
],
dtype=float,
)
self._candidates_phi[atomi] = numpy.array(
[lowerphi, midphi, upperphi, abovephi, belowphi, corphi], dtype=float
)
# Update upperphi and lowerphi
lower_ind = numpy.where(
self._candidates_phi[atomi]
== self._candidates_phi[atomi][self._candidates_phi[atomi] < 0].max()
)[0][0]
upper_ind = numpy.where(
self._candidates_phi[atomi]
== self._candidates_phi[atomi][self._candidates_phi[atomi] > 0].min()
)[0][0]
lowerphi = self._candidates_phi[atomi][lower_ind]
upperphi = self._candidates_phi[atomi][upper_ind]
if abs(lowerphi) < 1e-10:
bigphiAI[atomi] = self._candidates_bigPhi[atomi][lower_ind]
fitphi = False
elif abs(upperphi) < 1e-10:
bigphiAI[atomi] = self._candidates_bigPhi[atomi][upper_ind]
fitphi = False
else:
# Fitting needs to continue in this case.
lowerbigPhi = self._candidates_bigPhi[atomi][lower_ind]
lowerphi = self._candidates_phi[atomi][lower_ind]
upperbigPhi = self._candidates_bigPhi[atomi][upper_ind]
upperphi = self._candidates_phi[atomi][upper_ind]
assert not fitphi, "Iterative conditioning failed to converge."
# Set final conditioned density using chosen Phi
self._cond_density[atomi] = self._phiai(self._y_a[atomi], bigphiAI[atomi], atomi)[1]
self.logger.info("Calculating tau and combined conditioned densities.")
# Calculate tau(r) and rho^cond(r)
# Refer to equation 65 and 66 in doi: 10.1039/c6ra04656h
# Assign rho^cond on grid using generator object
self.rho_cond = copy.deepcopy(self.chgdensity)
self.rho_cond.data = numpy.zeros_like(self.rho_cond.data, dtype=float)
rho_cond_sqrt = numpy.zeros_like(self.rho_cond.data, dtype=float)
# Generator object to iterate over the grid
xshape, yshape, zshape = self.chgdensity.data.shape
atoms = len(self.data.atomnos)
indices = (
(i, x, y, z)
for i in range(atoms)
for x in range(xshape)
for y in range(yshape)
for z in range(zshape)
)
self._leftterm = numpy.zeros((atoms, xshape, yshape, zshape), dtype=float)
self.tau = []
# rho_cond -- equation 65 in doi: 10.1039/c6ra04656h
for atomi, xindex, yindex, zindex in indices:
self.rho_cond.data[xindex][yindex][zindex] += self._cond_density[atomi][
self.closest_r_index[atomi][xindex][yindex][zindex]
]
rho_cond_sqrt = numpy.sqrt(self.rho_cond.data)
for atomi in range(len(self.data.atomnos)):
self.tau.append(numpy.zeros_like(self.proatom_density[atomi], dtype=float))
grid = ((x, y, z) for x in range(xshape) for y in range(yshape) for z in range(zshape))
for xindex, yindex, zindex in grid:
# leftterm is the first spherical average term in equation 66.
# <rho^cond_A(r_A) / sqrt(rho^cond(r))>
self._leftterm[atomi][xindex][yindex][zindex] = self._cond_density[atomi][
self.closest_r_index[atomi][xindex][yindex][zindex]
]
self._leftterm[atomi] = self._leftterm[atomi] / rho_cond_sqrt
for radiusi in range(len(self.tau[atomi])):
grid_filter = self.closest_r_index[atomi] == radiusi
num_grid_filter = numpy.count_nonzero(grid_filter)
if num_grid_filter < 1:
self.tau[atomi][radiusi] = 0.0
else:
leftaverage = numpy.sum(grid_filter * self._leftterm[atomi]) / num_grid_filter
rightaverage = | numpy.sum(grid_filter * rho_cond_sqrt) | numpy.sum |
"""
Tests for the correlation function calculations.
"""
import numpy as np
from numpy.testing import (
run_module_suite,
assert_,
assert_array_almost_equal,
assert_raises,
)
from matsubara.correlation import (
sum_of_exponentials,
biexp_fit,
bath_correlation,
underdamped_brownian,
nonmatsubara_exponents,
matsubara_exponents,
matsubara_zero_analytical,
coth,
spectrum,
spectrum_matsubara,
spectrum_non_matsubara,
_S,
_A,
)
def test_sum_of_exponentials():
"""
correlation: Test the sum of exponentials.
"""
tlist = [0.0, 0.5, 1.0, 1.5, 2.0]
# Complex coefficients and frequencies.
ck1 = [0.5 + 2j, -0.1]
vk1 = [-0.5 + 3j, 1 - 1j]
corr1 = sum_of_exponentials(ck1, vk1, tlist)
y1 = np.array(
[
0.4 + 2.0j,
-1.67084356 + 0.57764922j,
-0.61828702 - 0.92938927j,
0.84201641 + 0.0170242j,
0.68968912 + 1.32694318j,
]
)
assert_array_almost_equal(corr1, y1)
# Real coefficients and frequencies.
ck2 = [0.5, -0.3]
vk2 = [-0.9, -0.3]
corr2 = sum_of_exponentials(ck2, vk2, tlist)
y2 = np.array([0.2, 0.060602, -0.018961, -0.061668, -0.081994])
assert_array_almost_equal(corr2, y2)
def test_biexp_fit():
"""
correlation: Tests biexponential fitting.
"""
tlist = np.linspace(0.0, 10, 100)
ck = [-0.21, -0.13]
vk = [-0.4, -1.5]
corr = sum_of_exponentials(ck, vk, tlist)
ck_fit, vk_fit = biexp_fit(tlist, corr)
corr_fit = sum_of_exponentials(ck_fit, vk_fit, tlist)
max_error = np.max(np.abs(corr - corr_fit))
max_amplitude = np.max(np.abs(corr))
assert max_error < max_amplitude / 1e3
def test_bath_correlation():
"""
correlation: Tests for bath correlation function.
"""
tlist = [0.0, 0.5, 1.0, 1.5, 2.0]
lam, gamma, w0 = 0.4, 0.4, 1.0
beta = np.inf
w_cutoff = 10.0
corr = bath_correlation(
underdamped_brownian, tlist, [lam, gamma, w0], beta, w_cutoff
)
y = np.array(
[
0.07108,
0.059188 - 0.03477j,
0.033282 - 0.055529j,
0.003295 - 0.060187j,
-0.022843 - 0.050641j,
]
)
assert_array_almost_equal(corr, y)
sd = np.arange(0, 10, 2)
assert_raises(TypeError, bath_correlation, [sd, tlist, [0.1], beta, w_cutoff])
def test_exponents():
"""
correlation: Tests the Matsubara and non Matsubara exponents.
"""
lam, gamma, w0 = 0.2, 0.05, 1.0
tlist = np.linspace(0, 100, 1000)
# Finite temperature cases
beta = 0.1
N_exp = 200
ck_nonmats = [0.190164 - 0.004997j, 0.21017 + 0.004997j]
vk_nonmats = [-0.025 + 0.999687j, -0.025 - 0.999687j]
ck1, vk1 = nonmatsubara_exponents(lam, gamma, w0, beta)
assert_array_almost_equal(ck1, ck_nonmats)
assert_array_almost_equal(vk1, vk_nonmats)
ck2, vk2 = matsubara_exponents(lam, gamma, w0, beta, N_exp)
corr_fit = sum_of_exponentials(
np.concatenate([ck1, ck2]), | np.concatenate([vk1, vk2]) | numpy.concatenate |
"""
@author: <NAME>
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from scipy.interpolate import griddata
from mpl_toolkits.mplot3d import Axes3D
from pyDOE import lhs
import time
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
###############################################################################
############################## Helper Functions ###############################
###############################################################################
def initialize_NN(layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev, dtype=tf.float32), dtype=tf.float32)
def neural_net(X, weights, biases):
num_layers = len(weights) + 1
H = X
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.sin(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
###############################################################################
################################ DeepHPM Class ################################
###############################################################################
class DeepHPM:
def __init__(self, t, x, u,
x0, u0, tb, X_f,
u_layers, pde_layers,
layers,
lb_idn, ub_idn,
lb_sol, ub_sol):
# Domain Boundary
self.lb_idn = lb_idn
self.ub_idn = ub_idn
self.lb_sol = lb_sol
self.ub_sol = ub_sol
# Init for Identification
self.idn_init(t, x, u, u_layers, pde_layers)
# Init for Solution
self.sol_init(x0, u0, tb, X_f, layers)
# tf session
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
init = tf.global_variables_initializer()
self.sess.run(init)
###########################################################################
############################# Identifier ##################################
###########################################################################
def idn_init(self, t, x, u, u_layers, pde_layers):
# Training Data for Identification
self.t = t
self.x = x
self.u = u
# Layers for Identification
self.u_layers = u_layers
self.pde_layers = pde_layers
# Initialize NNs for Identification
self.u_weights, self.u_biases = initialize_NN(u_layers)
self.pde_weights, self.pde_biases = initialize_NN(pde_layers)
# tf placeholders for Identification
self.t_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.u_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.terms_tf = tf.placeholder(tf.float32, shape=[None, pde_layers[0]])
# tf graphs for Identification
self.idn_u_pred = self.idn_net_u(self.t_tf, self.x_tf)
self.pde_pred = self.net_pde(self.terms_tf)
self.idn_f_pred = self.idn_net_f(self.t_tf, self.x_tf)
# loss for Identification
self.idn_u_loss = tf.reduce_sum(tf.square(self.idn_u_pred - self.u_tf))
self.idn_f_loss = tf.reduce_sum(tf.square(self.idn_f_pred))
# Optimizer for Identification
self.idn_u_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.idn_u_loss,
var_list = self.u_weights + self.u_biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.idn_f_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.idn_f_loss,
var_list = self.pde_weights + self.pde_biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.idn_u_optimizer_Adam = tf.train.AdamOptimizer()
self.idn_u_train_op_Adam = self.idn_u_optimizer_Adam.minimize(self.idn_u_loss,
var_list = self.u_weights + self.u_biases)
self.idn_f_optimizer_Adam = tf.train.AdamOptimizer()
self.idn_f_train_op_Adam = self.idn_f_optimizer_Adam.minimize(self.idn_f_loss,
var_list = self.pde_weights + self.pde_biases)
def idn_net_u(self, t, x):
X = tf.concat([t,x],1)
H = 2.0*(X - self.lb_idn)/(self.ub_idn - self.lb_idn) - 1.0
u = neural_net(H, self.u_weights, self.u_biases)
return u
def net_pde(self, terms):
pde = neural_net(terms, self.pde_weights, self.pde_biases)
return pde
def idn_net_f(self, t, x):
u = self.idn_net_u(t, x)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
terms = tf.concat([u,u_x,u_xx],1)
f = u_t - self.net_pde(terms)
return f
def idn_u_train(self, N_iter):
tf_dict = {self.t_tf: self.t, self.x_tf: self.x, self.u_tf: self.u}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.idn_u_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.idn_u_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.idn_u_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.idn_u_loss],
loss_callback = self.callback)
def idn_f_train(self, N_iter):
tf_dict = {self.t_tf: self.t, self.x_tf: self.x}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.idn_f_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.idn_f_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.idn_f_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.idn_f_loss],
loss_callback = self.callback)
def idn_predict(self, t_star, x_star):
tf_dict = {self.t_tf: t_star, self.x_tf: x_star}
u_star = self.sess.run(self.idn_u_pred, tf_dict)
f_star = self.sess.run(self.idn_f_pred, tf_dict)
return u_star, f_star
def predict_pde(self, terms_star):
tf_dict = {self.terms_tf: terms_star}
pde_star = self.sess.run(self.pde_pred, tf_dict)
return pde_star
###########################################################################
############################### Solver ####################################
###########################################################################
def sol_init(self, x0, u0, tb, X_f, layers):
# Training Data for Solution
X0 = np.concatenate((0*x0, x0), 1) # (0, x0)
X_lb = np.concatenate((tb, 0*tb + self.lb_sol[1]), 1) # (tb, lb[1])
X_ub = np.concatenate((tb, 0*tb + self.ub_sol[1]), 1) # (tb, ub[1])
self.X_f = X_f # Collocation Points
self.t0 = X0[:,0:1] # Initial Data (time)
self.x0 = X0[:,1:2] # Initial Data (space)
self.t_lb = X_lb[:,0:1] # Boundary Data (time) -- lower boundary
self.x_lb = X_lb[:,1:2] # Boundary Data (space) -- lower boundary
self.t_ub = X_ub[:,0:1] # Boundary Data (time) -- upper boundary
self.x_ub = X_ub[:,1:2] # Boundary Data (space) -- upper boundary
self.t_f = X_f[:,0:1] # Collocation Points (time)
self.x_f = X_f[:,1:2] # Collocation Points (space)
self.u0 = u0 # Boundary Data
# Layers for Solution
self.layers = layers
# Initialize NNs for Solution
self.weights, self.biases = initialize_NN(layers)
# tf placeholders for Solution
self.t0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.u0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_lb_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_lb_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_ub_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_ub_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_f_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_f_tf = tf.placeholder(tf.float32, shape=[None, 1])
# tf graphs for Solution
self.u0_pred, _ = self.sol_net_u(self.t0_tf, self.x0_tf)
self.u_lb_pred, self.u_x_lb_pred = self.sol_net_u(self.t_lb_tf, self.x_lb_tf)
self.u_ub_pred, self.u_x_ub_pred = self.sol_net_u(self.t_ub_tf, self.x_ub_tf)
self.sol_f_pred = self.sol_net_f(self.t_f_tf, self.x_f_tf)
# loss for Solution
self.sol_loss = tf.reduce_sum(tf.square(self.u0_tf - self.u0_pred)) + \
tf.reduce_sum(tf.square(self.u_lb_pred - self.u_ub_pred)) + \
tf.reduce_sum(tf.square(self.u_x_lb_pred - self.u_x_ub_pred)) + \
tf.reduce_sum(tf.square(self.sol_f_pred))
# Optimizer for Solution
self.sol_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.sol_loss,
var_list = self.weights + self.biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.sol_optimizer_Adam = tf.train.AdamOptimizer()
self.sol_train_op_Adam = self.sol_optimizer_Adam.minimize(self.sol_loss,
var_list = self.weights + self.biases)
def sol_net_u(self, t, x):
X = tf.concat([t,x],1)
H = 2.0*(X - self.lb_sol)/(self.ub_sol - self.lb_sol) - 1.0
u = neural_net(H, self.weights, self.biases)
u_x = tf.gradients(u, x)[0]
return u, u_x
def sol_net_f(self, t, x):
u, _ = self.sol_net_u(t,x)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
terms = tf.concat([u,u_x,u_xx],1)
f = u_t - self.net_pde(terms)
return f
def callback(self, loss):
print('Loss: %e' % (loss))
def sol_train(self, N_iter):
tf_dict = {self.t0_tf: self.t0, self.x0_tf: self.x0,
self.u0_tf: self.u0,
self.t_lb_tf: self.t_lb, self.x_lb_tf: self.x_lb,
self.t_ub_tf: self.t_ub, self.x_ub_tf: self.x_ub,
self.t_f_tf: self.t_f, self.x_f_tf: self.x_f}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.sol_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.sol_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.sol_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.sol_loss],
loss_callback = self.callback)
def sol_predict(self, t_star, x_star):
u_star = self.sess.run(self.u0_pred, {self.t0_tf: t_star, self.x0_tf: x_star})
f_star = self.sess.run(self.sol_f_pred, {self.t_f_tf: t_star, self.x_f_tf: x_star})
return u_star, f_star
###############################################################################
################################ Main Function ################################
###############################################################################
if __name__ == "__main__":
# Doman bounds
lb_idn = np.array([0.0, -8.0])
ub_idn = np.array([10.0, 8.0])
lb_sol = np.array([0.0, -8.0])
ub_sol = np.array([10.0, 8.0])
### Load Data ###
data_idn = scipy.io.loadmat('../Data/burgers_sine.mat')
t_idn = data_idn['t'].flatten()[:,None]
x_idn = data_idn['x'].flatten()[:,None]
Exact_idn = np.real(data_idn['usol'])
T_idn, X_idn = np.meshgrid(t_idn,x_idn)
keep = 2/3
index = int(keep*t_idn.shape[0])
T_idn = T_idn[:,0:index]
X_idn = X_idn[:,0:index]
Exact_idn = Exact_idn[:,0:index]
t_idn_star = T_idn.flatten()[:,None]
x_idn_star = X_idn.flatten()[:,None]
X_idn_star = np.hstack((t_idn_star, x_idn_star))
u_idn_star = Exact_idn.flatten()[:,None]
#
data_sol = scipy.io.loadmat('../Data/burgers_sine.mat')
t_sol = data_sol['t'].flatten()[:,None]
x_sol = data_sol['x'].flatten()[:,None]
Exact_sol = np.real(data_sol['usol'])
T_sol, X_sol = np.meshgrid(t_sol,x_sol)
t_sol_star = T_sol.flatten()[:,None]
x_sol_star = X_sol.flatten()[:,None]
X_sol_star = | np.hstack((t_sol_star, x_sol_star)) | numpy.hstack |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Implement many useful :class:`Augmentation`.
"""
import numpy as np
import sys
import random
import copy
import math
from PIL import Image
from detectron2.data.detection_utils import adjust_scale, rand_by_rate, get_inv_tuple_matrix, get_random_color, get_paste_point
from .augmentation import Augmentation, _transform_to_aug
from .transform import *
__all__ = [
"RandomApply",
"RandomBrightness",
"RandomContrast",
"RandomCrop",
"RandomExtent",
"RandomFlip",
"RandomSaturation",
"RandomLighting",
"RandomRotation",
"Resize",
"ResizeShortestEdge",
"RandomCrop_CategoryAreaConstraint",
"EdgeFilter",
"ResizeRatio",
"BoxShear",
"BoxContrast",
"BoxErase",
"Noise",
"Mosaic",
"BoxAttentionCrop",
"BoxMove",
]
class RandomApply(Augmentation):
"""
Randomly apply an augmentation with a given probability.
"""
def __init__(self, tfm_or_aug, prob=0.5):
"""
Args:
tfm_or_aug (Transform, Augmentation): the transform or augmentation
to be applied. It can either be a `Transform` or `Augmentation`
instance.
prob (float): probability between 0.0 and 1.0 that
the wrapper transformation is applied
"""
super().__init__()
self.aug = _transform_to_aug(tfm_or_aug)
assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
self.prob = prob
def get_transform(self, *args):
do = self._rand_range() < self.prob
if do:
return self.aug.get_transform(*args)
else:
return NoOpTransform()
def __call__(self, aug_input):
do = self._rand_range() < self.prob
if do:
return self.aug(aug_input)
else:
return NoOpTransform()
class RandomFlip(Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
do = self._rand_range() < self.prob
if do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
class Resize(Augmentation):
""" Resize image to a fixed target size"""
def __init__(self, shape, interp=Image.BILINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: PIL interpolation method
"""
if isinstance(shape, int):
shape = (shape, shape)
shape = tuple(shape)
self._init(locals())
def get_transform(self, image):
return ResizeTransform(
image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp
)
class ResizeShortestEdge(Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert len(short_edge_length) == 2, (
"short_edge_length must be two values using 'range' sample style."
f" Got {short_edge_length}!"
)
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
if self.is_range:
size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
size = np.random.choice(self.short_edge_length)
if size == 0:
return NoOpTransform()
scale = size * 1.0 / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return ResizeTransform(h, w, newh, neww, self.interp)
class RandomRotation(Augmentation):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around the given center.
"""
def __init__(self, prob, angle, expand=True, center=None, sample_style="range", interp=None):
"""
Args:
angle (list[float]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the angle (in degrees).
If ``sample_style=="choice"``, a list of angles to sample from
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (list[[float, float]]): If ``sample_style=="range"``,
a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
[0, 0] being the top left of the image and [1, 1] the bottom right.
If ``sample_style=="choice"``, a list of centers to sample from
Default: None, which means that the center of rotation is the center of the image
center has no effect if expand=True because it only affects shifting
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(angle, (float, int)):
angle = (angle, angle)
if center is not None and isinstance(center[0], (float, int)):
center = (center, center)
self._init(locals())
def get_transform(self, image):
do = self._rand_range() < self.prob
if do:
h, w = image.shape[:2]
center = None
if self.is_range:
angle = np.random.uniform(self.angle[0], self.angle[1])
if self.center is not None:
center = (
np.random.uniform(self.center[0][0], self.center[1][0]),
np.random.uniform(self.center[0][1], self.center[1][1]),
)
else:
angle = np.random.choice(self.angle)
if self.center is not None:
center = np.random.choice(self.center)
if center is not None:
center = (w * center[0], h * center[1]) # Convert to absolute coordinates
if angle % 360 == 0:
return NoOpTransform()
return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
else:
return NoOpTransform()
class RandomCrop(Augmentation):
"""
Randomly crop a subimage out of an image.
"""
def __init__(self, crop_type: str, crop_size):
"""
Args:
crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
See `config/defaults.py` for explanation.
crop_size (tuple[float]): the relative ratio or absolute pixels of
height and width
"""
super().__init__()
assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
croph, cropw = self.get_crop_size((h, w))
assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
h0 = np.random.randint(h - croph + 1)
w0 = np.random.randint(w - cropw + 1)
return CropTransform(w0, h0, cropw, croph)
def get_crop_size(self, image_size):
"""
Args:
image_size (tuple): height, width
Returns:
crop_size (tuple): height, width in absolute pixels
"""
h, w = image_size
if self.crop_type == "relative":
ch, cw = self.crop_size
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "relative_range":
crop_size = np.asarray(self.crop_size, dtype=np.float32)
ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "absolute":
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == "absolute_range":
assert self.crop_size[0] <= self.crop_size[1]
ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
return ch, cw
else:
NotImplementedError("Unknown crop type {}".format(self.crop_type))
class RandomCrop_CategoryAreaConstraint(Augmentation):
"""
Similar to :class:`RandomCrop`, but find a cropping window such that no single category
occupies a ratio of more than `single_category_max_area` in semantic segmentation ground
truth, which can cause unstability in training. The function attempts to find such a valid
cropping window for at most 10 times.
"""
def __init__(
self,
crop_type: str,
crop_size,
single_category_max_area: float = 1.0,
ignored_category: int = None,
):
"""
Args:
crop_type, crop_size: same as in :class:`RandomCrop`
single_category_max_area: the maximum allowed area ratio of a
category. Set to 1.0 to disable
ignored_category: allow this category in the semantic segmentation
ground truth to exceed the area ratio. Usually set to the category
that's ignored in training.
"""
self.crop_aug = RandomCrop(crop_type, crop_size)
self._init(locals())
def get_transform(self, image, sem_seg):
if self.single_category_max_area >= 1.0:
return self.crop_aug.get_transform(image)
else:
h, w = sem_seg.shape
for _ in range(10):
crop_size = self.crop_aug.get_crop_size((h, w))
y0 = np.random.randint(h - crop_size[0] + 1)
x0 = np.random.randint(w - crop_size[1] + 1)
sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]]
labels, cnt = np.unique(sem_seg_temp, return_counts=True)
if self.ignored_category is not None:
cnt = cnt[labels != self.ignored_category]
if len(cnt) > 1 and | np.max(cnt) | numpy.max |
import tempfile, os, glob
from scipy.stats import norm as ndist
from traitlets import (HasTraits,
Integer,
Unicode,
Float,
Integer,
Instance,
Dict,
Bool,
default)
import numpy as np
import regreg.api as rr
from selection.algorithms.lasso import lasso, lasso_full, lasso_full_modelQ
from selection.algorithms.sqrt_lasso import choose_lambda
from selection.truncated.gaussian import truncated_gaussian_old as TG
from selection.randomized.lasso import lasso as random_lasso_method, form_targets
from selection.randomized.modelQ import modelQ as randomized_modelQ
from utils import BHfilter
from selection.randomized.base import restricted_estimator
# Rpy
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
methods = {}
class generic_method(HasTraits):
need_CV = False
selectiveR_method = False
wide_ok = True # ok for p>= n?
# Traits
q = Float(0.2)
method_name = Unicode('Generic method')
model_target = Unicode()
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
(self.X,
self.Y,
self.l_theory,
self.l_min,
self.l_1se,
self.sigma_reid) = (X,
Y,
l_theory,
l_min,
l_1se,
sigma_reid)
def select(self):
raise NotImplementedError('abstract method')
@classmethod
def register(cls):
methods[cls.__name__] = cls
def selected_target(self, active, beta):
C = self.feature_cov[active]
Q = C[:,active]
return np.linalg.inv(Q).dot(C.dot(beta))
def full_target(self, active, beta):
return beta[active]
def get_target(self, active, beta):
if self.model_target not in ['selected', 'full']:
raise ValueError('Gaussian methods only have selected or full targets')
if self.model_target == 'full':
return self.full_target(active, beta)
else:
return self.selected_target(active, beta)
# Knockoff selection
class knockoffs_mf(generic_method):
method_name = Unicode('Knockoffs')
knockoff_method = Unicode('Second order')
model_target = Unicode("full")
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_mf.register()
class knockoffs_sigma(generic_method):
factor_method = 'asdp'
method_name = Unicode('Knockoffs')
knockoff_method = Unicode("ModelX (asdp)")
model_target = Unicode("full")
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
numpy2ri.activate()
# see if we've factored this before
have_factorization = False
if not os.path.exists('.knockoff_factorizations'):
os.mkdir('.knockoff_factorizations')
factors = glob.glob('.knockoff_factorizations/*npz')
for factor_file in factors:
factor = np.load(factor_file)
feature_cov_f = factor['feature_cov']
if ((feature_cov_f.shape == feature_cov.shape) and
(factor['method'] == cls.factor_method) and
np.allclose(feature_cov_f, feature_cov)):
have_factorization = True
print('found factorization: %s' % factor_file)
cls.knockoff_chol = factor['knockoff_chol']
if not have_factorization:
print('doing factorization')
cls.knockoff_chol = factor_knockoffs(feature_cov, cls.factor_method)
numpy2ri.deactivate()
def select(self):
numpy2ri.activate()
rpy.r.assign('chol_k', self.knockoff_chol)
rpy.r('''
knockoffs = function(X) {
mu = rep(0, ncol(X))
mu_k = X # sweep(X, 2, mu, "-") %*% SigmaInv_s
X_k = mu_k + matrix(rnorm(ncol(X) * nrow(X)), nrow(X)) %*%
chol_k
return(X_k)
}
''')
numpy2ri.deactivate()
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q, knockoffs=knockoffs)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_sigma.register()
def factor_knockoffs(feature_cov, method='asdp'):
numpy2ri.activate()
rpy.r.assign('Sigma', feature_cov)
rpy.r.assign('method', method)
rpy.r('''
# Compute the Cholesky -- from create.gaussian
diag_s = diag(switch(method, equi = create.solve_equi(Sigma),
sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))
if (is.null(dim(diag_s))) {
diag_s = diag(diag_s, length(diag_s))
}
SigmaInv_s = solve(Sigma, diag_s)
Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s
chol_k = chol(Sigma_k)
''')
knockoff_chol = np.asarray(rpy.r('chol_k'))
SigmaInv_s = np.asarray(rpy.r('SigmaInv_s'))
diag_s = np.asarray(rpy.r('diag_s'))
np.savez('.knockoff_factorizations/%s.npz' % (os.path.split(tempfile.mkstemp()[1])[1],),
method=method,
feature_cov=feature_cov,
knockoff_chol=knockoff_chol)
return knockoff_chol
class knockoffs_sigma_equi(knockoffs_sigma):
knockoff_method = Unicode('ModelX (equi)')
factor_method = 'equi'
knockoffs_sigma_equi.register()
class knockoffs_orig(generic_method):
wide_OK = False # requires at least n>p
method_name = Unicode("Knockoffs")
knockoff_method = Unicode('Candes & Barber')
model_target = Unicode('full')
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, statistic=stat.glmnet_lambdadiff, fdr=q, knockoffs=create.fixed)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
V = np.asarray(V, np.int)
return V, V
except:
return [], []
knockoffs_orig.register()
class knockoffs_fixed(generic_method):
wide_OK = False # requires at least n>p
method_name = Unicode("Knockoffs")
knockoff_method = Unicode('Fixed')
model_target = Unicode('full')
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q, knockoffs=create.fixed)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_fixed.register()
# Liu, Markovic, Tibs selection
class parametric_method(generic_method):
confidence = Float(0.95)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
generic_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self._fit = False
def select(self):
if not self._fit:
self.method_instance.fit()
self._fit = True
active_set, pvalues = self.generate_pvalues()
if len(pvalues) > 0:
selected = [active_set[i] for i in BHfilter(pvalues, q=self.q)]
return selected, active_set
else:
return [], active_set
class liu_theory(parametric_method):
sigma_estimator = Unicode('relaxed')
method_name = Unicode("Liu")
lambda_choice = Unicode("theory")
model_target = Unicode("full")
dispersion = Float(0.)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
n, p = X.shape
if n < p:
self.method_name = 'ROSI'
self.lagrange = l_theory * np.ones(X.shape[1])
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
def generate_summary(self, compute_intervals=False):
if not self._fit:
self.method_instance.fit()
self._fit = True
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
if len(L.active) > 0:
if self.sigma_estimator == 'reid' and n < p:
dispersion = self.sigma_reid**2
elif self.dispersion != 0:
dispersion = self.dispersion
else:
dispersion = None
S = L.summary(compute_intervals=compute_intervals, dispersion=dispersion)
return S
def generate_pvalues(self):
S = self.generate_summary(compute_intervals=False)
if S is not None:
active_set = np.array(S['variable'])
pvalues = np.asarray(S['pval'])
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
S = self.generate_summary(compute_intervals=True)
if S is not None:
active_set = | np.array(S['variable']) | numpy.array |
import pytest
import numpy as np
def test_randlanet_torch():
import torch
import open3d.ml.torch as ml3d
net = ml3d.models.RandLANet(num_points=5000, num_classes=10, dim_input=6)
net.device = 'cpu'
data = {
'point':
np.array(np.random.random((1000, 3)), dtype=np.float32),
'feat':
np.array(np.random.random((1000, 3)), dtype=np.float32),
'label':
np.array([np.random.randint(10) for i in range(1000)],
dtype=np.int32)
}
attr = {'split': 'train'}
data = net.preprocess(data, attr)
inputs = net.transform(data, attr)
inputs = {
'xyz': [torch.from_numpy(np.array([item])) for item in inputs['xyz']],
'neigh_idx': [
torch.from_numpy(np.array([item])) for item in inputs['neigh_idx']
],
'sub_idx': [
torch.from_numpy(np.array([item])) for item in inputs['sub_idx']
],
'interp_idx': [
torch.from_numpy(np.array([item])) for item in inputs['interp_idx']
],
'features': torch.from_numpy(np.array([inputs['features']])),
'labels': torch.from_numpy(np.array([inputs['labels']]))
}
out = net(inputs).detach().numpy()
assert out.shape == (1, 5000, 10)
def test_randlanet_tf():
import tensorflow as tf
import open3d.ml.tf as ml3d
net = ml3d.models.RandLANet(num_points=5000,
num_classes=10,
dim_input=6,
num_layers=4)
data = {
'point':
np.array(np.random.random((1000, 3)), dtype=np.float32),
'feat':
np.array(np.random.random((1000, 3)), dtype=np.float32),
'label':
np.array([np.random.randint(10) for i in range(1000)],
dtype=np.int32)
}
attr = {'split': 'train'}
data = net.preprocess(data, attr)
pc, feat, label, _ = ml3d.datasets.utils.trans_crop_pc(
data['point'], data['feat'], data['label'], data['search_tree'], 0,
5000)
inputs = net.transform(tf.convert_to_tensor(pc), tf.convert_to_tensor(feat),
tf.convert_to_tensor(label))
for i in range(18): # num_layers * 4 + 2
inputs[i] = tf.expand_dims(inputs[i], 0)
out = net(inputs).numpy()
assert out.shape == (1, 5000, 10)
def test_kpconv_torch():
import torch
import open3d.ml.torch as ml3d
net = ml3d.models.KPFCNN(lbl_values=[0, 1, 2, 3, 4, 5],
num_classes=4,
ignored_label_inds=[0],
in_features_dim=5)
net.device = 'cpu'
data = {
'point':
np.array(np.random.random((1000, 3)), dtype=np.float32),
'feat':
np.array(np.random.random((1000, 3)), dtype=np.float32),
'label':
np.array([np.random.randint(5) for i in range(1000)],
dtype=np.int32)
}
attr = {'split': 'train'}
batcher = ml3d.dataloaders.ConcatBatcher('cpu')
data = net.preprocess(data, attr)
inputs = {'data': net.transform(data, attr), 'attr': attr}
inputs = batcher.collate_fn([inputs])
out = net(inputs['data']).detach().numpy()
assert out.shape[1] == 5
def test_kpconv_tf():
import tensorflow as tf
import open3d.ml.tf as ml3d
net = ml3d.models.KPFCNN(lbl_values=[0, 1, 2, 3, 4, 5],
num_classes=4,
ignored_label_inds=[0],
in_features_dim=5)
data = {
'point':
np.array( | np.random.random((10000, 3)) | numpy.random.random |
import pytest
import numpy as np
import scipy.stats as sts
from .context import viroconcom
from viroconcom.distributions import (WeibullDistribution, NormalDistribution,
LognormalDistribution)
from viroconcom.params import ConstantParam
# Weibull tests
@pytest.fixture(params=[1, 5, 100])
def weibull_shape(request):
return ConstantParam(request.param)
@pytest.fixture(params=[0, 1, 100])
def weibull_loc(request):
return ConstantParam(request.param)
@pytest.fixture(params=[1, 5, 100])
def weibull_scale(request):
return ConstantParam(request.param)
@pytest.fixture(params=[100, 1000, 5000])
def weibull_number(request):
return request.param
def test_weibull_cdf(weibull_shape, weibull_loc, weibull_scale):
x = np.linspace(0, 20)
ref_cdf = sts.weibull_min.cdf(x, weibull_shape(None), weibull_loc(None), weibull_scale(None))
dist = WeibullDistribution(weibull_shape, weibull_loc, weibull_scale)
my_cdf = dist.cdf(x, x, (None, None, None))
assert np.allclose(ref_cdf, my_cdf)
def test_weibull_i_cdf(weibull_shape, weibull_loc, weibull_scale):
x = np.linspace(0, 1)
ref_cdf = sts.weibull_min.ppf(x, weibull_shape(None), weibull_loc(None), weibull_scale(None))
dist = WeibullDistribution(weibull_shape, weibull_loc, weibull_scale)
my_cdf = dist.i_cdf(x, x, (None, None, None))
assert np.allclose(ref_cdf, my_cdf)
def test_weibull_draw_sample(weibull_number, weibull_shape, weibull_loc, weibull_scale):
ref_points = weibull_number
dist = WeibullDistribution(weibull_shape, weibull_loc, weibull_scale)
my_points = dist.draw_sample(weibull_number)
my_points = my_points.size
assert ref_points == my_points
@pytest.fixture(params=["shape", "loc", "scale"])
def weibull_param_name(request):
return request.param
def test_weibull_param_out_of_bounds(weibull_param_name):
dist = WeibullDistribution()
setattr(dist, weibull_param_name, ConstantParam(-np.inf))
with pytest.raises(ValueError):
dist.cdf([0, 100], [0, 100], (None, None, None))
dist = WeibullDistribution()
setattr(dist, weibull_param_name, ConstantParam(np.inf))
with pytest.raises(ValueError):
dist.cdf([0, 100], [0, 100], (None, None, None))
# Normal tests
@pytest.fixture(params=[0, 1, 100, -10])
def normal_loc(request):
return ConstantParam(request.param)
@pytest.fixture(params=[1, 5, 100])
def normal_scale(request):
return ConstantParam(request.param)
@pytest.fixture(params=[100, 1000, 5000])
def normal_number(request):
return request.param
def test_normal_cdf(normal_loc, normal_scale):
x = np.linspace(-20, 20)
ref_cdf = sts.norm.cdf(x, normal_loc(None), normal_scale(None))
dist = NormalDistribution(None, normal_loc, normal_scale)
my_cdf = dist.cdf(x, x, (None, None, None))
assert np.allclose(ref_cdf, my_cdf)
def test_normal_i_cdf(normal_loc, normal_scale):
x = np.linspace(0, 1)
ref_cdf = sts.norm.ppf(x, normal_loc(None), normal_scale(None))
dist = NormalDistribution(None, normal_loc, normal_scale)
my_cdf = dist.i_cdf(x, x, (None, None, None))
assert np.allclose(ref_cdf, my_cdf)
def test_normal_draw_sample(normal_number, normal_loc, normal_scale):
ref_points = normal_number
dist = NormalDistribution(normal_loc, normal_scale)
my_points = dist.draw_sample(normal_number)
my_points = my_points.size
assert ref_points == my_points
@pytest.fixture(params=["shape", "loc", "scale"])
def normal_param_name(request):
return request.param
def test_normal_param_out_of_bounds(normal_param_name):
dist = NormalDistribution()
setattr(dist, normal_param_name, ConstantParam(-np.inf))
with pytest.raises(ValueError):
dist.cdf([0, 100], [0, 100], (None, None, None))
dist = NormalDistribution()
setattr(dist, normal_param_name, ConstantParam(np.inf))
with pytest.raises(ValueError):
dist.cdf([0, 100], [0, 100], (None, None, None))
# Lognormal tests
@pytest.fixture(params=[1, 5, 100])
def lognormal_shape(request):
return ConstantParam(request.param)
@pytest.fixture(params=[1, 5, 100])
def lognormal_scale(request):
return ConstantParam(request.param)
@pytest.fixture(params=[100, 1000, 5000])
def lognormal_number(request):
return request.param
def test_lognormal_cdf(lognormal_shape, lognormal_scale):
x = np.linspace(0, 20)
ref_cdf = sts.lognorm.cdf(x, s=lognormal_shape(None), scale=lognormal_scale(None))
dist = LognormalDistribution(lognormal_shape, None, lognormal_scale)
my_cdf = dist.cdf(x, x, (None, None, None))
assert | np.allclose(ref_cdf, my_cdf) | numpy.allclose |
import unittest
import numpy
import pytest
import cupy
from cupy.cuda import driver
from cupy.cuda import runtime
from cupy import testing
def _get_hermitian(xp, a, UPLO):
assert UPLO in 'UL'
A = xp.triu(a) if UPLO == 'U' else xp.tril(a)
A = A + A.swapaxes(-2, -1).conj()
n = a.shape[-1]
# Note: there is no "cupy.s_()", but we're just constructing slice objects
# here, so it's fine to call "numpy.s_()".
diag = numpy.s_[..., xp.arange(n), xp.arange(n)]
A[diag] -= a[diag]
return A
@testing.parameterize(*testing.product({
'UPLO': ['U', 'L'],
}))
@testing.gpu
@pytest.mark.skipif(
runtime.is_hip and driver.get_build_version() < 402,
reason='eigensolver not added until ROCm 4.2.0')
class TestEigenvalue(unittest.TestCase):
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-4, contiguous_check=False)
def test_eigh(self, xp, dtype):
a = xp.array([[1, 0, 3], [0, 5, 0], [7, 0, 9]], dtype)
w, v = xp.linalg.eigh(a, UPLO=self.UPLO)
# NumPy, cuSOLVER, rocSOLVER all sort in ascending order,
# so they should be directly comparable
return w, v
@testing.for_all_dtypes(no_bool=True, no_float16=True, no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-4, contiguous_check=False)
def test_eigh_batched(self, xp, dtype):
a = xp.array([[[1, 0, 3], [0, 5, 0], [7, 0, 9]],
[[3, 0, 3], [0, 7, 0], [7, 0, 11]]], dtype)
w, v = xp.linalg.eigh(a, UPLO=self.UPLO)
# NumPy, cuSOLVER, rocSOLVER all sort in ascending order,
# so w's should be directly comparable. However, both cuSOLVER
# and rocSOLVER pick a different convention for constructing
# eigenvectors, so v's are not directly comparible and we verify
# them through the eigen equation A*v=w*v.
A = _get_hermitian(xp, a, self.UPLO)
for i in range(a.shape[0]):
testing.assert_allclose(
A[i].dot(v[i]), w[i]*v[i], rtol=1e-5, atol=1e-5)
return w
def test_eigh_float16(self):
# NumPy's eigh deos not support float16
a = cupy.array([[1, 0, 3], [0, 5, 0], [7, 0, 9]], 'e')
w, v = cupy.linalg.eigh(a, UPLO=self.UPLO)
assert w.dtype == numpy.float16
assert v.dtype == numpy.float16
na = numpy.array([[1, 0, 3], [0, 5, 0], [7, 0, 9]], 'f')
nw, nv = | numpy.linalg.eigh(na, UPLO=self.UPLO) | numpy.linalg.eigh |
# -*- coding: utf-8 -*-
"""
"""
import os
import sys
import runpy
from threading import Thread
import pytest
import numpy as np
from cslug import ptr
from hirola import HashTable, exceptions
from hirola._hash_table import slug
from tests import random_ids, ignore_almost_full_warnings, warnings_as_errors
DATA = np.arange(120, dtype=np.int8).data
DTYPES = [
np.int16,
np.float64,
np.dtype([("vertex", np.float32, 5)]),
np.dtype(np.bytes_) * 3,
np.dtype(np.bytes_) * 15,
]
def test_modulo():
for i in range(20, -20, -1):
assert slug.dll.euclidean_modulo(i, 5) == i % 5
def test_hash():
x = np.array([123, 4234, 213], dtype=np.int32)
out = np.int32(0)
old = np.seterr(over="ignore")
for i in range(3):
out ^= x[i] * np.int32(0x10001)
out *= np.int32(0x0B070503)
np.seterr(**old)
assert slug.dll.hash(ptr(x), 12) == out
@ignore_almost_full_warnings
def test_walk_through():
data = np.array([100, 101, 100, 103, 104, 105, 103, 107], dtype=np.float32)
self = HashTable(5, dtype=data.dtype)
assert self.dtype == data.dtype
assert np.all(self._hash_owners == -1)
assert self.key_size == 4
assert self.length == 0
assert self.max == 5
hash = slug.dll.hash(ptr(data), self.key_size)
for i in range(2):
assert slug.dll.HT_hash_for(self._raw._ptr, ptr(data), False) \
== hash % self.max
assert self._add(data) == 0
assert self.length == 1
assert len(self) == 1
assert np.array_equal(self.keys, [100])
assert self._hash_owners[hash % self.max] == 0
assert self._get(data) == 0
assert self._add(data[1]) == 1
assert self._add(data[2]) == 0
assert self._add(data[3]) == 2
assert self._add(data[4]) == 3
assert self._add(data[5]) == 4
assert self._add(data[6]) == 2
assert self._add(data[7]) == -1
assert self.add(data[:7]).tolist() == [0, 1, 0, 2, 3, 4, 2]
assert self.get(data).tolist() == [0, 1, 0, 2, 3, 4, 2, -1]
assert self[data[:-1]].tolist() == [0, 1, 0, 2, 3, 4, 2]
assert isinstance(self.add(data[0]), int)
assert isinstance(self.get(data[0]), int)
with pytest.raises(exceptions.HashTableFullError,
match=r".* add keys\[7\] = 107\.0 to .* and 107\.0 is"):
self.add(data)
with pytest.raises(exceptions.HashTableFullError,
match=r".* add keys\[1, 3\] = 107\.0 to .* and 107\.0 "):
self.add(data.reshape((2, 4)))
with pytest.raises(exceptions.HashTableFullError,
match=r".* add key = 107\.0 to .* and 107\.0 is"):
self.add(data[7])
SHAPES = [(), (10,), (0, 10), (10, 0), (10, 10), (10, 10, 10)]
def test_dtype_normalisation_simple():
self = HashTable(10, np.int16)
assert isinstance(self.dtype, np.dtype)
assert self._base_dtype == np.dtype(np.int16)
assert self._dtype_shape == ()
for shape in SHAPES:
keys, shape_ = self._norm_input_keys(np.empty(shape, dtype=np.int16))
assert shape_ == shape
assert self._norm_input_keys(np.empty(10, dtype=np.int16))[1] == (10,)
with pytest.raises(TypeError, match="Expecting int16 but got float64."):
self.get(np.arange(10, dtype=np.float64))
def test_dtype_normalisation_multidimensional():
self = HashTable(10, np.dtype(np.float32) * 3)
assert self.key_size == 12
assert self._base_dtype == np.dtype(np.float32)
assert self._dtype_shape == (3,)
with pytest.raises(TypeError):
self._norm_input_keys(np.empty(10, np.int32))
with pytest.raises(ValueError):
self._norm_input_keys(np.empty(10, np.float32))
with pytest.raises(ValueError):
self._norm_input_keys(np.empty((10, 4), np.float32))
assert self._norm_input_keys(np.empty(3, np.float32))[1] == ()
for shape in SHAPES:
_, shape_ = self._norm_input_keys(np.empty(shape + (3,), np.float32))
assert shape_ == shape
def test_dtype_normalisation_records():
dtype = np.dtype([("a", np.int16, 4), ("b", np.uint64)])
self = HashTable(10, dtype)
assert self._base_dtype == dtype
assert self._dtype_shape == ()
assert self.key_size == 16
def test_invalid_array():
with pytest.raises(TypeError):
HashTable(10, object)
assert HashTable(0, int).max == 1
assert HashTable(-10, int).max == 1
def test_string_like():
self = HashTable(10, "U5")
# Molly-coddle the types.
assert self.add(np.array("bear", self.dtype)) == 0
# Should convert from Python str without complaint.
assert self.get("bear\x00") == 0
# Should implicitly add trailing NULLs.
assert self.get("bear") == 0
# Should be case sensitive.
assert self.get("Bear") == -1
# NumPy implicitly truncates overlong strings. Accept this behaviour.
assert self._check_dtype("tigers") == "tiger"
assert self.add(["tigers"]) == 1
assert self.keys[1] == "tiger"
# Make absolutely darn certain that hirola's C code never comes into
# contact with strings of random lengths.
normed, shape = self._norm_input_keys(["cat", "dog", "hippopotamus"])
assert normed.dtype == "U5"
assert shape == (3,)
assert normed.tolist() == ["cat", "dog", "hippo"]
# NumPy implicitly converts non-strings to string. Accept this too although
# is probably a bad idea for floats.
for (i, key) in enumerate((1, 100, 10000000, .123, 1 / 9), start=len(self)):
key_ = self._check_dtype(key)
assert key_ == str(key)[:5]
assert self.add(key) == i
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("batch", [False, True])
def test(dtype, sort, batch):
unique_ = np.unique(np.frombuffer(DATA, dtype=dtype))
ids_ = random_ids(len(unique_), 100, at_least_once=True, sort=sort)
values = unique_[ids_]
self = HashTable(len(values), dtype)
if batch:
assert np.all(self.get(values) == -1)
if batch:
ids = self.add(values)
else:
ids = np.empty_like(ids_)
for (i, value) in enumerate(values):
value = np.array(value)
ids[i] = self._add(value)
assert np.all(self.keys[ids] == values)
if sort:
assert np.all(self.keys == unique_)
assert np.all(ids == ids_)
for (i, value) in enumerate(values):
assert self._get(value) == ids[i]
assert np.all(self.get(values) == ids)
assert np.all(self.add(values) == ids)
def test_non_int_max():
max = HashTable(3.5, int).max
assert isinstance(max, int)
assert max == 3
def test_getting():
"""Test HashTable().get() both with and without defaults and
HashTable().__getitem__()."""
self = HashTable(10, (str, 10))
assert self.add(["duck", "goose", "chicken"]).tolist() == [0, 1, 2]
# The default default of -1.
assert self.get("pigeon") == -1
assert self.get(["goose", "pigeon", "parrot"]).tolist() == [1, -1, -1]
# User defined integer default.
assert self.get("pigeon", default=10) == 10
assert self.get(["pigeon", "goose", "parrot"], default=5).tolist() \
== [5, 1, 5]
# User defined random object default.
default = object()
assert self.get("toad", default=default) is default
assert self.get(["chicken", "toad"], default=default).tolist() \
== [2, default]
assert self.get("toad", default=None) is None
# Defaulting disabled. Currently a private option.
with pytest.raises(KeyError, match=r"key = 'troll' is not"):
self.get("troll", default=self._NO_DEFAULT)
# __getitem__() disables the default.
assert self["chicken"] == 2
assert self[["chicken", "duck"]].tolist() == [2, 0]
with pytest.raises(KeyError):
self["toad"]
def test_blame_key_multidimensional():
"""Test that the custom KeyErrors work for non scalar keys. """
# Create a hash table for float triplets.
self = HashTable(10, dtype=(float, 3))
keys = np.arange(24, dtype=float).reshape((-1, 3))
# Add all but the last key.
self.add(keys[:-1])
# Try getting the last key. The resultant key errors should always point to
# the correct one being missing.
with pytest.raises(KeyError, match=r"key = array\(\[21., 22., 23.\]\) is"):
self[keys[-1]]
with pytest.raises(KeyError, match=r"keys\[7\] = array\(\[21"):
self[keys]
with pytest.raises(KeyError, match=r"keys\[3, 1\] = array\(\[21"):
self[keys.reshape((4, 2, 3))]
def test_blame_key_structured():
"""Similar to test_blame_key_multidimensional() but for struct dtypes."""
self = HashTable(10, dtype=[("name", str, 10), ("age", int)])
keys = np.array([("bill", 10), ("bob", 12), ("ben", 13)], self.dtype)
self.add(keys[:-1])
with pytest.raises(KeyError, match=r"key = \('ben', 13\) is"):
self[keys[-1]]
with pytest.raises(KeyError, match=r"keys\[2\] = \('ben', 13\) is"):
self[keys]
def test_destroy():
self = HashTable(10, float)
self.add([.3, .5, .8])
# Release self.keys so that it can be written to.
keys = self.destroy()
assert keys.flags.writeable
assert np.shares_memory(keys, self.keys)
# destroy() should be re-callable without complaint (although it's now
# functionless).
assert np.shares_memory(keys, self.destroy())
# Now that self.keys has been made accessibly writeable, it is no longer
# safe to use the table.
with pytest.raises(exceptions.HashTableDestroyed, match=".*"):
self.add(.8)
with pytest.raises(exceptions.HashTableDestroyed):
self.get(.5)
@ignore_almost_full_warnings
def test_resize():
self = HashTable(5, int)
self.add([4, 3, 2, 9])
with pytest.raises(ValueError, match=".* size 3 is .* fit 4 keys"):
self.resize(3)
for new_size in [4, 10]:
smaller = self.resize(new_size)
assert smaller.length == self.length
assert np.array_equal(smaller.keys, self.keys)
assert smaller.max == new_size
# Test inplace resizing.
assert self is self.resize(6, in_place=True)
assert self.max == 6
assert self.get([5, 4, 3, 2, 3]).tolist() == [-1, 0, 1, 2, 1]
assert self.add(8) == 4
assert self.add(11) == 5
def test_almost_full():
"""Test table.almost_full set to warn, ignore or raise errors when the hash
table's fullness crosses a given threshold."""
# Test the default - to warn at 90% full.
self = HashTable(5, int)
assert self.almost_full == (.9, "warn")
# The internal integer threshold should always round up.
assert self._raw.panic_at == 5
# Nothing should happen until we hit the 5th key.
with warnings_as_errors():
self.add(range(4))
with pytest.warns(exceptions.AlmostFull, match="is 100% full"):
self.add(4)
# Yest raising an error when nearly full.
self = HashTable(10, int, almost_full=(.45, "raise"))
assert self.almost_full == (.45, "raise")
with pytest.raises(exceptions.AlmostFull, match="is 50% full"):
self.add(range(8))
assert len(self) == 5
self.almost_full = .7, "warn"
with pytest.warns(exceptions.AlmostFull, match="is 70% full"):
self.add(range(10))
assert len(self) == 10
def test_disabled_almost_full():
"""Verify that no almost full warnings are produced if disabled."""
self = HashTable(10, int, almost_full=None)
with warnings_as_errors():
self.add(range(10))
def test_almost_full_input_guards():
"""Test the various exceptions raised by the input validators of
HashTable.almost_full's setter."""
with pytest.raises(ValueError, match=".* first .* be >0 and <=1"):
HashTable(10, int, almost_full=(2, "warn"))
with pytest.raises(ValueError, match=".* first .* >0 and <=1"):
HashTable(10, int, almost_full=(0, "warn"))
with pytest.raises(ValueError,
match="Valid near-full actions are .* Not 'bean'."):
HashTable(10, int, almost_full=(.6, "bean"))
with pytest.raises(TypeError, match=".* second parameter to almost_full"):
HashTable(10, int, almost_full=(.6, range))
with pytest.raises(TypeError):
HashTable(10, int, almost_full="hello")
def test_infinite_resizing_check():
"""If automatic resizing is enabled but the resize factor is <= 1 or so
close to 1 that ``int(table.max * resize_factor)`` truncates to
``table.max`` then we could end up in an infinite loop of no-op resizes.
Verify that the HashTable.almost_full setter blocks such resize factors.
"""
self = HashTable(10, int, almost_full=(1, 1.1))
assert self.add(range(20)).tolist() == list(range(20))
with pytest.raises(ValueError, match="resize factor of 1.09 would"):
HashTable(10, int, almost_full=(1, 1.09))
HashTable(100, int, almost_full=(1, 1.01))
with pytest.raises(ValueError, match="resize factor of 1.01 would"):
HashTable(99, int, almost_full=(1, 1.01))
with pytest.raises(ValueError, match="resize factor"):
HashTable(10, int, almost_full=(1, .8))
with pytest.raises(ValueError, match="resize factor"):
HashTable(10, int, almost_full=(1, -1))
def test_automatic_resize():
"""Test setting self.almost_full to automatically resize the hash table."""
# Upsize by x1.5 when 60% full.
self = HashTable(10, int, almost_full=(.6, 1.5))
# 5 out of 10 is less than 60%. Nothing should have changed.
self.add(range(5))
assert self.max == 10
# Adding one extra value brings us up to 60% which should trigger a resize.
self.add(5)
assert self.max == 15
# No information should have been lost in the resize.
assert np.array_equal(self.keys, np.arange(6))
# Adding loads of new keys should call resize as many times as needed.
self.add(np.arange(30))
assert self.max == 73
def test_copy():
self = HashTable(10, int)
self.add(range(3, 8))
copy = self.copy()
assert copy._destroyed is False
assert copy.keys.tolist() == self.keys.tolist()
self.add(9)
assert 9 in self.keys
assert 9 not in copy.keys
copy.add(0)
keys = self.destroy()
copy = self.copy(usable=False)
assert copy._destroyed is True
assert copy.keys.tolist() == self.keys.tolist()
keys[0] = 5
assert copy.keys[0] == 3
copy = self.copy(usable=True)
assert copy._destroyed is False
assert copy.keys.tolist() == [5, 4, 6, 7, 9]
def test_in():
"""Test HashTable().contains() and ``x in table``."""
self = HashTable(10, int)
self.add([20, 5, 50, 3, 4])
assert self.contains(50) is True
assert self.contains(51) is False
assert self.contains([20, 4, 10, 99, 12]).tolist() == \
[True, True, False, False, False]
assert self.contains([[3, 5], [2, 1]]).tolist() == \
[[True, True], [False, False]]
assert 3 in self
assert not 9 in self
with pytest.raises(ValueError):
# Not allowed by Python.
[1, 2] in self
def test_PyInstaller_hook():
if getattr(sys, "frozen", False):
pytest.skip("")
from hirola import _PyInstaller_hook_dir
hook_dir, = _PyInstaller_hook_dir()
assert os.path.isdir(hook_dir)
hook = os.path.join(hook_dir, "hook-hirola.py")
assert os.path.isfile(hook)
namespace = runpy.run_path(hook)
assert len(namespace["datas"]) == 2
def test_thread_safety_add():
"""Run table.add() asynchronously and verify that all keys make it in."""
# Unfortunately, a lot of data is needed to see the effects asynchronous
# manipulations.
self = HashTable(200000, int, almost_full=None)
chunk_size = self.max // 4
threads = []
chunks = []
for i in range(0, self.max, chunk_size):
chunk = np.arange(i, i + chunk_size)
chunks.append(chunk)
threads.append(Thread(target=self.add, args=(chunk,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# All keys should have made it into the table.
assert len(self) == self.max
# The grouping into chunks and ordering within chunks should have been
# preserved but there is no guarantee which order those chunks will occur
# in.
args = np.argsort(self.keys[::chunk_size])
keys = np.concatenate([chunks[i] for i in args]).tolist()
assert self.keys.tolist() == keys
def test_thread_safety_resize():
"""Run table.resize() asynchronously. Without proper thread locking in
place, this can lead to segfaults."""
keys = | np.arange(10000) | numpy.arange |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
import os
from functools import partial
import pandapower as pp
from pandapower.test.consistency_checks import consistency_checks
from pandapower.test.toolbox import add_grid_connection, create_test_line
import numpy as np
try:
from julia import Main
julia_installed = True
except ImportError:
julia_installed = False
@pytest.fixture
def net_3w_trafo_opf():
net = pp.create_empty_network()
# create buses
bus1 = pp.create_bus(net, vn_kv=220.)
bus2 = pp.create_bus(net, vn_kv=110.)
bus3 = pp.create_bus(net, vn_kv=110.)
bus4 = pp.create_bus(net, vn_kv=110.)
bus5 = pp.create_bus(net, vn_kv=110.)
pp.create_bus(net, vn_kv=110., in_service=False)
# create 220/110 kV transformer
pp.create_transformer3w_from_parameters(net, bus1, bus2, bus5, vn_hv_kv=220, vn_mv_kv=110,
vn_lv_kv=110, vk_hv_percent=10., vk_mv_percent=10.,
vk_lv_percent=10., vkr_hv_percent=0.5,
vkr_mv_percent=0.5, vkr_lv_percent=0.5, pfe_kw=100,
i0_percent=0.1, shift_mv_degree=0, shift_lv_degree=0,
sn_hv_mva=100, sn_mv_mva=50, sn_lv_mva=50)
# create 110 kV lines
pp.create_line(net, bus2, bus3, length_km=70., std_type='149-AL1/24-ST1A 110.0')
pp.create_line(net, bus3, bus4, length_km=50., std_type='149-AL1/24-ST1A 110.0')
pp.create_line(net, bus4, bus2, length_km=40., std_type='149-AL1/24-ST1A 110.0')
pp.create_line(net, bus4, bus5, length_km=30., std_type='149-AL1/24-ST1A 110.0')
# create loads
pp.create_load(net, bus2, p_mw=60, controllable=False)
pp.create_load(net, bus3, p_mw=70, controllable=False)
pp.create_sgen(net, bus3, p_mw=10, controllable=False)
# create generators
pp.create_ext_grid(net, bus1, min_p_mw=0, max_p_mw=1000, max_q_mvar=0.01, min_q_mvar=0)
pp.create_gen(net, bus3, p_mw=80, min_p_mw=0, max_p_mw=80, vm_pu=1.01)
pp.create_gen(net, bus4, p_mw=80, min_p_mw=0, max_p_mw=80, vm_pu=1.01)
net.gen["controllable"] = False
return net
@pytest.mark.skipif(julia_installed == False, reason="requires julia installation")
def test_compare_pwl_and_poly(net_3w_trafo_opf):
net = net_3w_trafo_opf
pp.create_pwl_cost(net, 0, 'ext_grid', [(0, 1, 1)])
pp.create_pwl_cost(net, 0, 'gen', [(0, 30, 3), (30, 80, 3)])
pp.create_pwl_cost(net, 1, 'gen', [(0, 100, 2)])
pp.runpm_ac_opf(net)
consistency_checks(net)
p_gen = net.res_gen.p_mw.values
q_gen = net.res_gen.q_mvar.values
vm_bus = net.res_bus.vm_pu.values
va_bus = net.res_bus.va_degree.values
net.pwl_cost.drop(net.pwl_cost.index, inplace=True)
pp.create_poly_cost(net, 0, 'ext_grid', cp1_eur_per_mw=1)
pp.create_poly_cost(net, 0, 'gen', cp1_eur_per_mw=3)
pp.create_poly_cost(net, 1, 'gen', cp1_eur_per_mw=2)
pp.runpm_ac_opf(net)
consistency_checks(net)
np.allclose(p_gen, net.res_gen.p_mw.values)
np.allclose(q_gen, net.res_gen.q_mvar.values)
np.allclose(vm_bus, net.res_bus.vm_pu.values)
np.allclose(va_bus, net.res_bus.va_degree.values)
pp.runpm_dc_opf(net)
consistency_checks(net)
np.allclose(p_gen, net.res_gen.p_mw.values)
np.allclose(va_bus, net.res_bus.va_degree.values)
@pytest.mark.skipif(julia_installed == False, reason="requires julia installation")
def test_pwl():
net = pp.create_empty_network()
# create buses
bus1 = pp.create_bus(net, vn_kv=110., min_vm_pu=0.9, max_vm_pu=1.1)
bus2 = pp.create_bus(net, vn_kv=110., min_vm_pu=0.9, max_vm_pu=1.1)
bus3 = pp.create_bus(net, vn_kv=110., min_vm_pu=0.9, max_vm_pu=1.1)
# create 110 kV lines
pp.create_line(net, bus1, bus2, length_km=50., std_type='149-AL1/24-ST1A 110.0')
pp.create_line(net, bus2, bus3, length_km=50., std_type='149-AL1/24-ST1A 110.0')
# create loads
pp.create_load(net, bus2, p_mw=80, controllable=False)
# create generators
g1 = pp.create_gen(net, bus1, p_mw=80, min_p_mw=0, max_p_mw=80, vm_pu=1.01, slack=True)
g2 = pp.create_gen(net, bus3, p_mw=80, min_p_mw=0, max_p_mw=80, vm_pu=1.01)
# net.gen["controllable"] = False
pp.create_pwl_cost(net, g1, 'gen', [(0, 2, 2), (2, 80, 5)])
pp.create_pwl_cost(net, g2, 'gen', [(0, 2, 2), (2, 80, 5)])
pp.runpm_ac_opf(net)
consistency_checks(net, rtol=1e-3)
assert np.isclose(net.res_gen.p_mw.iloc[0], net.res_gen.p_mw.iloc[1])
assert np.isclose(net.res_gen.q_mvar.iloc[0], net.res_gen.q_mvar.iloc[1])
net.pwl_cost.drop(net.pwl_cost.index, inplace=True)
g3 = pp.create_gen(net, bus1, p_mw=80, min_p_mw=0, max_p_mw=80, vm_pu=1.01)
pp.create_pwl_cost(net, g1, 'gen', [(0, 2, 1.), (2, 80, 8.)])
pp.create_pwl_cost(net, g2, 'gen', [(0, 3, 2.), (3, 80, 14)])
pp.create_pwl_cost(net, g3, 'gen', [(0, 1, 3.), (1, 80, 10.)])
net.load.p_mw = 1
pp.runpm_ac_opf(net)
consistency_checks(net, rtol=1e-3)
assert | np.isclose(net.res_gen.p_mw.at[g2], 0) | numpy.isclose |
# Author : <NAME>
# E-mail : <EMAIL>
import gym
import numpy as np
dict = {0: "LEFT", 1: "DOWN", 2: "RIGHT", 3: "UP"}
class agent:
def __init__(self, step_size, gamma, epsilon, env):
self.q_table = np.random.rand(16, 4)
self.q_table[15] = [0, 0, 0, 0]
self.step_size = step_size
self.gamma = gamma
self.epsilon = epsilon
self.env = env
self.init_state = 0
env.render()
def epsilon_greedy(self):
# print("Selecting action")
rass = | np.random.random() | numpy.random.random |
#%%
from flask import Flask
import joblib
import numpy as np
import pandas as pd
from sklearn import ensemble, preprocessing
MODEL = joblib.load("../models/tuned-random-forest-regression-model.pkl")
def preprocess_features(df):
_numeric_features = ["GHI(W/m2)",
"mslp(hPa)",
"rain(mm)",
"rh(%)",
"t2(C)",
"td2(C)",
"wind_dir(Deg)",
"wind_speed(m/s)"]
_ordinal_features = ["AOD",
"day",
"month",
"year"]
standard_scalar = preprocessing.StandardScaler()
Z0 = standard_scalar.fit_transform(df.loc[:, _numeric_features])
ordinal_encoder = preprocessing.OrdinalEncoder()
Z1 = ordinal_encoder.fit_transform(df.loc[:, _ordinal_features])
transformed_features = np.hstack((Z0, Z1))
return transformed_features
def feature_engineering(df):
_dropped_cols = ["SWDIR(W/m2)", "SWDNI(W/m2)", "SWDIF(W/m2)"]
_year = (df.index
.year)
_month = (df.index
.month)
_day = (df.index
.dayofyear)
_hour = (df.index
.hour)
features = (df.drop(_dropped_cols, axis=1, inplace=False)
.assign(year=_year, month=_month, day=_day, hour=_hour)
.groupby(["year", "month", "day", "hour"])
.mean()
.unstack(level=["hour"])
.reset_index(inplace=False)
.sort_index(axis=1)
.drop("year", axis=1, inplace=False))
# create the proxy for our solar power target
efficiency_factor = 0.5
target = (features.loc[:, ["GHI(W/m2)"]]
.mul(efficiency_factor)
.shift(-1)
.rename(columns={"GHI(W/m2)": "target(W/m2)"}))
# combine to create the input data
input_data = (features.join(target)
.dropna(how="any", inplace=False)
.sort_index(axis=1))
return input_data
# load data
neom_data = (pd.read_csv("../data/raw/neom-data.csv", parse_dates=[0], nrows=100)
.rename(columns={"Unnamed: 0": "Timestamp"})
.set_index("Timestamp", drop=True, inplace=False))
app = Flask(__name__)
@app.route('/',methods=['GET',])
def home():
#return '<pre>'+get_shell_script_output_using_check_output()+'</pre>'
# perform feature engineering
input_data = feature_engineering(neom_data)
# simulate online learning by sampling features from the input data
_prng = | np.random.RandomState(42) | numpy.random.RandomState |
import os
import os.path as op
import numpy as np
import pandas as pd
import mne
import matplotlib.pyplot as plt
from scipy import stats
from params import DATA_DIR as data_dir
from params import BIDS_ROOT as bids_root
from params import SUBJECTS as subjects
from params import TASK as task
from params import TEMPLATE as template
from params import ATLAS as aseg
from params import ALPHA as alpha
from params import LEFT_HANDED_SUBJECTS as lh_sub
from params import FREQUENCIES as freqs
freqs = np.array([0] + list(freqs)) # add evoked
def swarm(x, bins): # plot helper function
counts = np.ones((bins.size))
y = np.zeros((len(x)))
for i, this_x in enumerate(x):
idx = np.where(this_x < bins)[0][0] - 1
y[i] = counts[idx] // 2 if counts[idx] % 2 else -counts[idx] // 2
counts[idx] += 1
return y
fig_dir = op.join(data_dir, 'derivatives', 'plots')
if not op.isdir(fig_dir):
os.makedirs(fig_dir)
# get plotting information
subjects_dir = op.join(bids_root, 'derivatives')
brain_kwargs = dict(cortex='low_contrast', alpha=0.2, background='white',
subjects_dir=subjects_dir, units='m')
colors = mne._freesurfer.read_freesurfer_lut()[1]
cmap = plt.get_cmap('viridis')
template_trans = mne.coreg.estimate_head_mri_t(template, subjects_dir)
ch_pos = pd.read_csv(op.join(data_dir, 'derivatives',
'elec_contacts_info.tsv'), sep='\t')
# get svm information
source_dir = op.join(data_dir, 'derivatives', 'pca_svm_classifier')
scores = pd.read_csv(op.join(source_dir, 'scores.tsv'), sep='\t')
# remove nans for positions and scores
idx = ~np.logical_or(np.logical_or(np.isnan(
ch_pos['x']), np.isnan(ch_pos['y'])), np.isnan(ch_pos['z']))
ch_pos = ch_pos[idx].reset_index()
scores = scores[idx].reset_index()
with np.load(op.join(source_dir, 'event_images.npz')) as images:
images = {k: v for k, v in images.items()}
spec_shape = images[list(images.keys())[0]].shape
times = np.linspace(-0.5, 0.5, spec_shape[1])
with np.load(op.join(source_dir, 'null_images.npz')) as null_images:
null_images = {k: v for k, v in null_images.items()}
# compute significant indices pooled across subjects
sig_thresh = np.quantile(scores['null_scores'], 1 - alpha)
not_sig = [i for i, score in enumerate(scores['event_scores'])
if score <= sig_thresh]
sig = [i for i, score in enumerate(scores['event_scores'])
if score > sig_thresh]
# compute null distribution thresholds per subject and per image
image_thresh = np.quantile(
abs(np.array(list(null_images.values()))), 1 - alpha, axis=0)
# feature map computation
feature_maps = np.zeros((4,) + spec_shape)
for sub, elec_name, number, score in zip(
scores['sub'], scores['elec_name'], scores['number'],
scores['event_scores']):
if score > sig_thresh:
image = images[f'sub-{sub}_ch-{elec_name}{int(number)}']
feature_maps[0] += abs(image) > image_thresh # count
feature_maps[1] += image > image_thresh
feature_maps[2] += abs(image)
feature_maps[3] += (abs(image) > image_thresh) * score
# normalize
feature_maps[1] /= feature_maps[0] # scale by count
feature_maps[3] /= feature_maps[0] # scale by count
feature_maps[0] /= feature_maps[0].max()
feature_maps[2] /= feature_maps[2].max()
# time-frequency areas of interest
prop_thresh = 0.5
areas = {'Pre-Movement Beta': (1, 25, 37, -0.4, -0.1),
'Delta': (1, 1, 5, -0.5, 0.5),
'Evoked Potential': (1, 0, 0, -0.5, 0.5),
'High-Beta Rebound': (1, 27, 40, 0, 0.25),
'Low-Beta Rebound': (1, 14, 23, 0.05, 0.25),
'Post-Movement Gamma': (1, 45, 160, 0.08, 0.23),
'Pre-Movement Alpha': (0, 7, 14, -0.3, 0)}
area_contacts = {area: dict() for area in areas}
for name, image in images.items():
sub, ch = [phrase.split('-')[1] for phrase in
name.split('_')[0:2]]
elec_name = ''.join([letter for letter in ch if not letter.isdigit()])
number = ''.join([letter for letter in ch if letter.isdigit()])
if not len(ch_pos[(ch_pos['sub'].astype(str) == sub) &
(ch_pos['elec_name'] == elec_name) &
(ch_pos['number'].astype(int).astype(str) == number)]):
continue # no channel position, skip
mask = (abs(image) > image_thresh) * np.sign(image)
for area, (fm_idx, fmin, fmax, tmin, tmax) in areas.items():
fmin_idx = np.argmin(abs(freqs - fmin))
fmax_idx = np.argmin(abs(freqs - fmax))
tmin_idx = np.argmin(abs(times - tmin))
tmax_idx = np.argmin(abs(times - tmax))
this_area = mask[slice(fmin_idx, fmax_idx + 1),
slice(tmin_idx, tmax_idx + 1)]
area_contacts[area][(int(sub), elec_name, int(number))] = \
this_area.sum() / this_area.size
# Plots
# Figure 1: Task figure
sr = 800 / 1200 # screen ratio
fig, ax = plt.subplots(figsize=(6, 2))
fig.suptitle('Forced Two-Choice Task Design')
ax.axis('off')
# fixation 700 + blank 700 + go 1200 + iti 4000 = 6600
ax.axis([-0.02, 6.62, -1, 1])
# main experimental design
ax.plot([0, 0, 0, 6.6, 6.6, 6.6], [0.2, -0.2, 0, 0, -0.2, 0.2], color='black')
# fixation
for t in (0.3, 0.4, 0.5, 0.6, 0.7):
ax.plot([t, t], [-0.2, 0.2], color=(0.5, 0.5, 0.5))
ax.plot([0, 0.35, 0.7], [0.2, 0.35, 0.2], color=(0.5, 0.5, 0.5)) # zoom
ax.fill([0, 0.7, 0.7, 0, 0], 0.37 + | np.array([0, 0, 0.7 * sr, 0.7 * sr, 0]) | numpy.array |
from __future__ import absolute_import
from tabulate import tabulate
import logging
from numpy import asarray
import numpy as np
from numpy import dot
from limix_qep.lik import Bernoulli
from limix_qep.lik import Binomial
from limix_math.linalg import qs_decomposition
from limix_math.linalg import _QS_from_K_split
from .util import gower_kinship_normalization
import scipy.stats as st
from limix_qep.ep import BernoulliEP
from limix_qep.ep import BinomialEP
def _get_offset_covariate(covariate, n):
if covariate is None:
covariate = np.ones((n, 1))
return covariate
class LRT(object):
def __init__(self, y, Q0, Q1, S0, outcome_type=Bernoulli(), full=False,
covariate=None, null_model_only=False):
self._logger = logging.getLogger(__name__)
if not (isinstance(outcome_type, Bernoulli) or
isinstance(outcome_type, Binomial)):
raise Exception("Unrecognized outcome type.")
outcome_type.assert_outcome(y)
self._full = full
self._y = y
self._Q0 = Q0
self._Q1 = Q1
self._S0 = S0
self._covariate = _get_offset_covariate(covariate, y.shape[0])
self._outcome_type = outcome_type
self._null_model_ready = False
self._alt_model_ready = False
self._genetic_variance = None
self._instrumental_variance = None
self._environmental_variance = None
self._pvals = None
self._lrs = None
self._ep = None
self._betas = None
self._lml_null = np.nan
self._X = None
self._null_model_only = null_model_only
self._lml_alt = None
@property
def genetic_variance(self):
return self._genetic_variance
@property
def instrumental_variance(self):
return self._instrumental_variance
@property
def environmental_variance(self):
return self._environmental_variance
@property
def candidate_markers(self):
return self._X
@candidate_markers.setter
def candidate_markers(self, X):
if self._X is None:
self._X = X
self._alt_model_ready = False
elif np.any(self._X != X):
self._X = X
self._alt_model_ready = False
def _compute_statistics(self):
self._logger.info('Statistics computation has started.')
self._compute_null_model()
if self._null_model_only:
return
if self._full:
self._compute_alt_models_full()
else:
self._compute_alt_models()
def _compute_alt_models(self):
if self._alt_model_ready:
return
self._logger.info('Alternative model computation has started.')
X = self._X
covariate = self._covariate
lml_null = self._lml_null
ep = self._ep
ep.pause = True
fp_lml_alt = np.full(X.shape[1], -np.inf)
fep = ep.fixed_ep()
t = self._lml_alts(fep, X, covariate)
fp_lml_alt[:] = np.maximum(fp_lml_alt, t)
self._lml_alt = fp_lml_alt
fp_lrs = -2 * lml_null + 2 * fp_lml_alt
chi2 = st.chi2(df=1)
fp_pvals = chi2.sf(fp_lrs)
self._pvals = fp_pvals
self._lrs = fp_lrs
self._alt_model_ready = True
def _compute_null_model(self):
if self._null_model_ready:
return
self._logger.info('Null model computation has started.')
y = self._y
Q0, Q1 = self._Q0, self._Q1
S0 = self._S0
covariate = self._covariate
outcome_type = self._outcome_type
if isinstance(outcome_type, Binomial):
ep = BinomialEP(y, outcome_type.ntrials, covariate, Q0, Q1, S0)
elif isinstance(outcome_type, Bernoulli):
ep = BernoulliEP(y, covariate, Q0, Q1, S0)
ep.optimize()
self._lml_null = ep.lml()
self._ep = ep
self._genetic_variance = ep.genetic_variance
self._instrumental_variance = ep.instrumental_variance
self._environmental_variance = ep.environmental_variance
self._null_model_ready = True
def _compute_alt_models_full(self):
if self._alt_model_ready:
return
X = self._X
covariate = self._covariate
ep = self._ep
lml_alts = []
for i in range(X.shape[1]):
ep.M = np.hstack((covariate, X[:, i][:, np.newaxis]))
assert False, 'fix me'
# ep.optimize(only_step2=True)
lml_alts.append(ep.lml())
lml_alts = np.asarray(lml_alts, float)
lrs = -2 * self._lml_null + 2 * lml_alts
chi2 = st.chi2(df=1)
pvals = chi2.sf(lrs)
self._pvals = pvals
self._lrs = lrs
self._alt_model_ready = True
def _lml_alts(self, fep, X, covariate=None):
if covariate is None:
covariate = | np.ones((X.shape[0], 1)) | numpy.ones |
"""Training - mitosis detection"""
import argparse
from datetime import datetime
import json
import math
import os
import pickle
import shutil
import sys
import numpy as np
import tensorflow as tf
import tensorboard as tb
import resnet
import resnet50
# data
def get_image(filename, patch_size):
"""Get image from filename.
Args:
filename: String filename of an image.
patch_size: Integer length to which the square image will be
resized.
Returns:
TensorFlow tensor containing the decoded and resized image with
type float32 and values in [0, 1).
"""
image_string = tf.read_file(filename)
# shape (h,w,c), uint8 in [0, 255]:
image = tf.image.decode_png(image_string, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32) # float32 [0, 1)
# TODO: remove this
#image = tf.image.resize_images(image, [patch_size, patch_size]) # float32 [0, 1)
#with tf.control_dependencies(
# [tf.assert_type(image, tf.float32, image.dtype),
# tf.verify_tensor_all_finite(image, "image tensor contains NaN or INF values"]):
return image
def get_label(filename):
"""Get label from filename.
Args:
filename: String in format "**/train|val/mitosis|normal/name.{ext}",
where the label is either "mitosis" or "normal".
Returns:
TensorFlow float binary label equal to 1 for mitosis or 0 for
normal.
"""
# note file name format:
# lab is a single digit, case and region are two digits with padding if needed
splits = tf.string_split([filename], "/")
label_str = splits.values[-2]
# check that label string is valid
is_valid = tf.logical_or(tf.equal(label_str, 'normal'), tf.equal(label_str, 'mitosis'))
assert_op = tf.Assert(is_valid, [label_str])
with tf.control_dependencies([assert_op]): # test for correct label extraction
#label = tf.to_int32(tf.equal(label_str, 'mitosis'))
label = tf.to_float(tf.equal(label_str, 'mitosis')) # required because model produces float
return label
def preprocess(filename, patch_size):
"""Get image and label from filename.
Args:
filename: String filename of an image.
patch_size: Integer length to which the square image will be
resized, if necessary.
Returns:
Tuple of a float32 image Tensor with shape (h,w,c) and values in
[0, 1), a binary label, and a filename.
"""
# return image_resized, label
label = get_label(filename)
#label = tf.expand_dims(label, -1) # make each scalar label a vector of length 1 to match model
image = get_image(filename, patch_size) # float32 in [0, 1)
return image, label, filename
def normalize(image, model_name):
"""Normalize an image tensor.
Note: due to broadcasting, this works with a single image, or a batch
of images.
Args:
image: A Tensor of shape (...,h,w,c) with values in [0, 1].
model_name: String indicating the model to use.
Returns:
A normalized image Tensor of shape (...,h,w,c).
"""
# NOTE: don't use in-place updates to avoid side-effects
if model_name in ("vgg", "vgg19", "resnet"):
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
image = image[..., ::-1] # rbg -> bgr
image = image * 255 # float32 in [0, 255]
image = image - means # mean centering using imagenet means
else:
# normalize to [-1, 1]
#image = image / 255
image = image - 0.5
image = image * 2
return image
def unnormalize(image, model_name):
"""Unnormalize an image tensor.
Note: due to broadcasting, this works with a single image, or a batch
of images.
Args:
image: A Tensor of shape (...,h,w,c) with normalized values.
model_name: String indicating the model to use.
Returns:
An unnormalized image Tensor of shape (...,h,w,c) with values in
[0, 1].
"""
# NOTE: don't use in-place updates to avoid side-effects
if model_name in ("vgg", "vgg19", "resnet"):
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
image = image + means # mean centering using imagenet means
image = image / 255 # float32 in [0, 1]
image = image[..., ::-1] # bgr -> rgb
else:
image = image / 2
image = image + 0.5
return image
def augment(image, patch_size, seed=None):
"""Apply random data augmentation to the given image.
Args:
image: A Tensor of shape (h,w,c) with values in [0, 1].
patch_size: The patch size to which to randomly crop the image.
seed: An integer used to create a random seed.
Returns:
A data-augmented image with values in [0, 1].
"""
# NOTE: these values currently come from the Google pathology paper:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al.
# Detecting Cancer Metastases on Gigapixel Pathology Images. arXiv.org. 2017.
# TODO: convert these hardcoded values into hyperparameters!!
# NOTE: if the seed is None, these ops will be seeded with a completely random seed, rather than
# a deterministic one based on the graph seed. This appears to only happen within the map
# functions of the Dataset API, based on the `test_num_parallel_calls` and
# `test_image_random_op_seeds` tests. For now, we will pass in a seed from the user and use it
# at the op level.
# NOTE: Additionally, if the Dataset.map() function that calls this function is using
# `num_parallel_calls` > 1, the results will be non-reproducible.
# TODO: https://github.com/tensorflow/tensorflow/issues/13932
# NOTE: ouch! It turns out that a reinitializable iterator for a Dataset will cause any ops with
# random seeds, such as these, to be reset, and thus each epoch will be evaluated exactly the
# same. The desired behavior would be to seed these ops once at the very beginning, so that an
# entire training run can be deterministic, but not with the exact same random augmentation during
# each epoch. Oh TensorFlow...
shape = tf.shape(image) # (h, w, c)
# random zoom
# TODO: possibly re-enable random zooms enabled via flag
#lb = shape[0] # lower bound on the resize is the current size of the image
#ub = lb + tf.to_int32(tf.to_float(lb)*0.25) # upper bound is 25% larger
#new_size = tf.random_uniform([2], minval=lb, maxval=ub, dtype=tf.int32, seed=seed)
#image = tf.image.resize_images(image, new_size) # random resize
#image = tf.random_crop(image, shape, seed=seed) # random cropping back to original size
# mirror padding if needed
size = int(math.ceil((patch_size + 30) * (math.cos(math.pi/4) + math.sin(math.pi/4))))
#pad_h = tf.maximum(0, size - shape[0])
#pad_w = tf.maximum(0, size - shape[1])
#pad_h_before = tf.to_int32(tf.floor(pad_h / 2))
#pad_w_before = tf.to_int32(tf.floor(pad_w / 2))
#pad_h_after = pad_h - pad_h_before
#pad_w_after = pad_w - pad_w_before
#paddings = tf.reshape(
# tf.stack([pad_h_before, pad_h_after, pad_w_before, pad_w_after, 0, 0], 0),
# [3, 2]) # h, w, z before/after paddings
pad = tf.to_int32(tf.ceil(tf.maximum(0, size - shape[0]) / 2))
paddings = tf.reshape(tf.stack([pad, pad, pad, pad, 0, 0], 0), [3, 2]) # h, w, z before/after
image = tf.pad(image, paddings, mode="REFLECT")
# random rotation
angle = tf.random_uniform([], minval=0, maxval=2*np.pi, seed=seed)
image = tf.contrib.image.rotate(image, angle, "BILINEAR")
# crop to bounding box to allow for random translation crop, if the input image is large enough
# note: translation distance: 7 µm = 30 pixels = max allowable euclidean pred distance from
# actual mitosis
# note: the allowable region is a circle with radius 30, but we are cropping to a square, so we
# can't crop from a square with side length of patch_size+30 or we run the risk of moving the
# mitosis to a spot in the corner of the resulting image, which would be outside of the circle
# radius, and thus we would be incorrect to label that image as positive. We also want to impose
# some amount of buffer into our learned model, so we place an upper bound of `c` pixels on the
# distance. We sample a distance along the height axis, compute a valid distance along the width
# axis that is upper bounded by a Euclidean translation distance of `c` in the worst case, crop
# the center of the image to this height and width, and then perform a random crop, yielding a
# patch for which the center is at most `c` pixels from the true center in terms of Euclidean
# distance.
# NOTE: In the dataset, all normal samples must be > 60 pixels from the center of a mitotic figure
# to avoid random crops that end up incorrectly within a mitotic region.
# c = 25 = sqrt(a**2 + b**2) = 6.25 µm
c = 25 # TODO: set this as a hyperparameter
a = tf.random_uniform([], minval=0, maxval=c, dtype=tf.int32, seed=seed)
b = tf.to_int32(tf.floor(tf.sqrt(tf.to_float(c**2 - a**2))))
crop_h = tf.minimum(shape[0], patch_size + a)
crop_w = tf.minimum(shape[1], patch_size + b)
image = tf.image.resize_image_with_crop_or_pad(image, crop_h, crop_w)
# random central crop == random translation augmentation
image = tf.random_crop(image, [patch_size, patch_size, 3], seed=seed)
image = tf.image.random_flip_up_down(image, seed=seed)
image = tf.image.random_flip_left_right(image, seed=seed)
image = tf.image.random_brightness(image, 64/255, seed=seed)
image = tf.image.random_contrast(image, 0.25, 1, seed=seed)
image = tf.image.random_saturation(image, 0.75, 1, seed=seed)
image = tf.image.random_hue(image, 0.04, seed=seed)
image = tf.clip_by_value(image, 0, 1)
return image
def create_augmented_batch(image, batch_size, patch_size):
"""Create a batch of augmented versions of the given image.
This will sample `batch_size/4` augmented images deterministically,
and yield four rotated variants for each augmented image (0, 90, 180,
270 degrees).
Args:
image: A Tensor of shape (h,w,c).
batch_size: Number of augmented versions to generate.
patch_size: The patch size to which to randomly crop the image.
Returns:
A Tensor of shape (batch_size,h,w,c) containing a batch of
data-augmented versions of the given image.
"""
assert batch_size % 4 == 0 or batch_size == 1, "batch_size must be 1 or divisible by 4"
# TODO rewrite this function to just draw `batch_size` examples from the `augment` function
def rots_batch(image):
rot0 = image
rot90 = tf.image.rot90(image)
rot180 = tf.image.rot90(image, k=2)
rot270 = tf.image.rot90(image, k=3)
rots = tf.stack([rot0, rot90, rot180, rot270])
return rots
if batch_size >= 4:
image_crop = tf.image.resize_image_with_crop_or_pad(image, patch_size, patch_size)
images = rots_batch(image_crop)
for i in range(round(batch_size/4)-1):
aug_image = augment(image, patch_size, i)
aug_image_rots = rots_batch(aug_image)
images = tf.concat([images, aug_image_rots], axis=0)
else:
images = tf.expand_dims(image, 0)
return images
def marginalize(x):
"""Marginalize over injected noise at test time.
This implements noise marginalization by averaging over a batch of
values. Typically, this would be used with logits for a batch of
augmented versions of a single image, or for the associated batch
of labels. This is only performed at test time when
`tf.keras.backend.learning_phase() == 0`.
Args:
x: A Tensor of shape (n,...).
Returns:
A Tensor of shape (1, ...) containing the average over the batch
dimension.
"""
avg_x = tf.reduce_mean(x, axis=0, keepdims=True, name="avg_x")
x = tf.cond(tf.logical_not(tf.keras.backend.learning_phase()), lambda: avg_x, lambda: x)
return x
def process_dataset(dataset, model_name, patch_size, augmentation, marginalization, marg_batch_size,
threads, seed=None):
"""Process a Dataset.
Args:
dataset: Dataset of filenames.
model_name: String indicating the model to use.
patch_size: Integer length to which the square patches will be
resized.
augmentation: Boolean for whether or not to apply random augmentation
to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`marg_batch_size` must be divisible by 4, or equal to 1 for a special
debugging case of no augmentation.
marg_batch_size: Integer training batch size.
threads: Integer number of threads for dataset buffering.
seed: Integer random seed.
Returns:
A labeled Dataset of augmented, normalized images, possibly with
marginalization.
"""
dataset = dataset.map(lambda filename: preprocess(filename, patch_size),
num_parallel_calls=threads)
# augment (typically at training time)
if augmentation:
dataset = dataset.map(
lambda image, label, filename: (augment(image, patch_size, seed), label, filename),
num_parallel_calls=threads)
else:
# we are now generating larger original images to allow for random rotations & translations
# during augmentation, and thus if we don't apply augmentation, we need to ensure that the
# images are center cropped to the correct size.
dataset = dataset.map(lambda image, label, filename:
(tf.image.resize_image_with_crop_or_pad(image, patch_size, patch_size), label, filename),
num_parallel_calls=threads)
# TODO: should this be in an `elif` block before the above `else` block? in particular, the
# patch sizes will be messed up
# marginalize (typically at eval time)
if marginalization:
dataset = dataset.map(lambda image, label, filename:
(create_augmented_batch(image, marg_batch_size, patch_size),
tf.tile(tf.expand_dims(label, -1), [marg_batch_size]),
tf.tile(tf.expand_dims(filename, -1), [marg_batch_size])),
num_parallel_calls=threads)
# normalize
dataset = dataset.map(lambda image, label, filename:
(normalize(image, model_name), label, filename), num_parallel_calls=threads)
return dataset
def create_dataset(path, model_name, patch_size, batch_size, shuffle, augmentation, marginalization,
oversampling, threads, prefetch_batches, seed=None):
"""Create a dataset.
Args:
path: String path to the generated image patches. This should
contain folders for each class.
model_name: String indicating the model to use.
patch_size: Integer length to which the square patches will be
resized.
batch_size: Integer training batch size.
shuffle: Boolean for whether or not to shuffle filenames.
augmentation: Boolean for whether or not to apply random augmentation
to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`batch_size` must be divisible by 4, or equal to 1 for a special
debugging case of no augmentation.
oversampling: Boolean for whether or not to oversample the minority
mitosis class via class-aware sampling. Not compatible with
marginalization.
threads: Integer number of threads for dataset buffering.
prefetch_batches: Integer number of batches to prefetch.
seed: Integer random seed.
Returns:
A Dataset object.
"""
# read & process images
if oversampling:
# oversample the minority mitosis class via class-aware sampling, in which we sample the mitosis
# and normal samples separately in order to yield class-balanced mini-batches.
mitosis_dataset = tf.data.Dataset.list_files(os.path.join(path, "mitosis", "*.png"))
normal_dataset = tf.data.Dataset.list_files(os.path.join(path, "normal", "*.png"))
# zipping will stop once the normal dataset is empty
mitosis_dataset = mitosis_dataset.repeat(-1).shuffle(int(1e6))
normal_dataset = normal_dataset.shuffle(int(1e6))
mitosis_dataset = process_dataset(mitosis_dataset, model_name, patch_size, augmentation, False,
batch_size, threads, seed)
normal_dataset = process_dataset(normal_dataset, model_name, patch_size, augmentation, False,
batch_size, threads, seed)
# zip together the datasets, then flatten and batch so that each mini-batch contains an even
# number of mitosis and normal samples
# NOTE: the number of elements in the zipped dataset is limited to the lesser of the mitosis and
# normal datasets, and since the mitosis dataset is set to repeat indefinitely, this zipped
# dataset will be limited to the number of normal samples
dataset = tf.data.Dataset.zip((mitosis_dataset, normal_dataset))
dataset = dataset.flat_map(lambda mitosis, normal:
tf.data.Dataset.from_tensors(mitosis).concatenate(tf.data.Dataset.from_tensors(normal)))
dataset = dataset.batch(batch_size)
# note that batch norm could be affected by very small final batches, but right now this would
# also affect evaluation tasks, so we will wait to enable this until we move to tf Estimators
#dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
else:
dataset = tf.data.Dataset.list_files(os.path.join(path, "*", "*.png"))
if shuffle:
dataset = dataset.shuffle(int(1e7))
dataset = process_dataset(dataset, model_name, patch_size, augmentation, marginalization,
batch_size, threads, seed)
# batch if necessary
if not marginalization:
dataset = dataset.batch(batch_size)
# note that batch norm could be affected by very small final batches, but right now this would
# also affect evaluation tasks, so we will wait to enable this until we move to tf Estimators
#dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
# prefetch
dataset = dataset.prefetch(prefetch_batches)
return dataset
# model
def create_model(model_name, input_shape, images):
"""Create a model.
Args:
model_name: String indicating the model to use in ("vgg", "vgg19",
"resnet", "logreg").
input_shape: 3-Tuple containing the shape of a single image.
images: An image Tensor of shape (n,h,w,c).
Returns:
An unfrozen Keras Model in which `images` is the input tensor, and
another Model object representing the base model when using
pretrained models.
"""
if model_name == "logreg":
# logistic regression classifier
model_base = None
inputs = tf.keras.layers.Input(shape=input_shape, tensor=images)
x = tf.keras.layers.Flatten()(inputs)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg":
# create a model by replacing the classifier of a VGG16 model with a new classifier specific
# to the breast cancer problem
# recommend fine-tuning last 4 layers
#with tf.device("/cpu"):
model_base = tf.keras.applications.VGG16(
include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc1')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=keras.regularizers.l2(l2))(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc2')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=keras.regularizers.l2(l2))(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg_new":
# train a new vgg16-like model from scratch on inputs in [-1, 1].
#with tf.device("/cpu"):
model_base = tf.keras.applications.VGG16(
include_top=False, input_shape=input_shape, input_tensor=images, weights=None)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc1')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=tf.keras.regularizers.l2(l2))(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc2')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=tf.keras.regularizers.l2(l2))(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg19":
# create a model by replacing the classifier of a VGG19 model with a new classifier specific
# to the breast cancer problem
# recommend fine-tuning last 4 layers
#with tf.device("/cpu"):
#inputs = tf.keras.layers.Input(shape=input_shape)
model_base = tf.keras.applications.VGG19(
include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet":
# create a model by replacing the classifier of a ResNet50 model with a new classifier
# specific to the breast cancer problem
# recommend fine-tuning last 11 (stage 5 block c), 21 (stage 5 blocks b & c), or 33 (stage
# 5 blocks a,b,c) layers
#with tf.device("/cpu"):
# NOTE: there is an issue in keras with using batch norm with model templating, i.e.,
# defining a model with generic inputs and then calling it on a tensor. the issue stems from
# batch norm not being well defined for shared settings, but it makes it quite annoying in
# this context. to "fix" it, we define it by directly passing in the `images` tensor
# https://github.com/fchollet/keras/issues/2827
model_base = resnet50.ResNet50(include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet_new":
# train a new resnet50-like model from scratch on inputs in [-1, 1].
#with tf.device("/cpu"):
# NOTE: there is an issue in keras with using batch norm with model templating, i.e.,
# defining a model with generic inputs and then calling it on a tensor. the issue stems from
# batch norm not being well defined for shared settings, but it makes it quite annoying in
# this context. to "fix" it, we define it by directly passing in the `images` tensor
# https://github.com/fchollet/keras/issues/2827
model_base = resnet50.ResNet50(
include_top=False, input_shape=input_shape, input_tensor=images, weights=None)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet_custom":
model_base = None
model_tower = resnet.ResNet(images, input_shape)
else:
raise Exception("model name unknown: {}".format(model_name))
# TODO: add this when it's necessary, and move to a separate function
## Multi-GPU exploitation via a linear combination of GPU loss functions.
#ins = []
#outs = []
#for i in range(num_gpus):
# with tf.device("/gpu:{}".format(i)):
# x = tf.keras.layers.Input(shape=input_shape) # split of batch
# out = resnet50(x) # run split on shared model
# ins.append(x)
# outs.append(out)
#model = tf.keras.Model(inputs=ins, outputs=outs) # multi-GPU, data-parallel model
model = model_tower
# unfreeze all model layers.
for layer in model.layers[1:]: # don't include input layer
layer.trainable = True
return model, model_base
# based on `keras.utils.multi_gpu_model`
def multi_gpu_model(model, gpus):
"""Replicates a model on different GPUs.
Specifically, this function implements single-machine
multi-GPU data parallelism. It works in the following way:
- Divide the model's input(s) into multiple sub-batches.
- Apply a model copy on each sub-batch. Every model copy
is executed on a dedicated GPU.
- Concatenate the results (on CPU) into one big batch.
E.g. if your `batch_size` is 64 and you use `gpus=2`,
then we will divide the input into 2 sub-batches of 32 samples,
process each sub-batch on one GPU, then return the full
batch of 64 processed samples.
This induces quasi-linear speedup on up to 8 GPUs.
This function is only available with the TensorFlow backend
for the time being.
# Arguments
model: A Keras model instance. To avoid OOM errors,
this model could have been built on CPU, for instance
(see usage example below).
gpus: Integer >= 2 or list of integers, number of GPUs or
list of GPU IDs on which to create model replicas.
# Returns
A Keras `Model` instance which can be used just like the initial
`model` argument, but which distributes its workload on multiple GPUs.
"""
if isinstance(gpus, (list, tuple)):
num_gpus = len(gpus)
target_gpu_ids = gpus
else:
num_gpus = gpus
target_gpu_ids = range(num_gpus)
def get_slice(data, i, parts):
shape = tf.shape(data)
batch_size = shape[:1]
input_shape = shape[1:]
step = batch_size // parts
if i == num_gpus - 1:
size = batch_size - step * i
else:
size = step
size = tf.concat([size, input_shape], axis=0)
stride = tf.concat([step, input_shape * 0], axis=0)
start = stride * i
return tf.slice(data, start, size)
all_outputs = []
for i in range(len(model.outputs)):
all_outputs.append([])
# Place a copy of the model on each GPU,
# each getting a slice of the inputs.
for i, gpu_id in enumerate(target_gpu_ids):
with tf.device('/cpu:0'):
inputs = []
# Retrieve a slice of the input on the CPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_i = tf.keras.layers.Lambda(
get_slice, output_shape=input_shape, arguments={'i': i, 'parts': num_gpus})(x)
inputs.append(slice_i)
with tf.device('/gpu:%d' % gpu_id):
with tf.name_scope('replica_%d' % gpu_id):
# Apply model on slice (creating a model replica on the target device).
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later.
for o in range(len(outputs)):
all_outputs[o].append(outputs[o])
# Merge outputs on CPU.
with tf.device('/cpu:0'):
merged = []
for name, outputs in zip(model.output_names, all_outputs):
merged.append(tf.keras.layers.concatenate(outputs, axis=0, name=name))
return tf.keras.Model(model.inputs, merged)
def compute_data_loss(labels, logits):
"""Compute the mean logistic loss.
Args:
labels: A Tensor of shape (n, 1) containing a batch of labels.
logits: A Tensor of shape (n, 1) containing a batch of pre-sigmoid
prediction values.
Returns:
A scalar Tensor representing the mean logistic loss.
"""
# TODO: this is a binary classification problem so optimizing a loss derived from a Bernoulli
# distribution is appropriate. however, would the dynamics of the training algorithm be more
# stable if we treated this as a multi-class classification problem and derived a loss from a
# Multinomial distribution with two classes (and a single trial)? it would be
# over-parameterized, but then again, the deep net itself is already heavily parameterized.
# Bernoulli-derived loss
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.reshape(labels, [-1, 1]), logits=logits))
# Multinomial-derived loss
#labels = tf.one_hot(indices=labels, depth=2, on_value=1, off_value=0)
#loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits))
#loss = tf.reduce_mean(
# tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, features=logits))
return loss
def compute_l2_reg_loss(model, include_frozen=False, reg_final=True, reg_biases=False):
"""Compute L2 loss of trainable model weights.
This places a Gaussian prior with mean 0 std 1 on each of the model
parameters.
Args:
model: A Keras Model object.
include_frozen: Boolean for whether or not to ignore frozen layers.
reg_final: Boolean for whether or not to regularize the final
logits-producing layer.
reg_biases: Boolean for whether or not to regularize biases.
Returns:
The L2 regularization loss of all trainable (i.e., unfrozen) model
weights, unless `include_frozen` is True, in which case all weights
are used.
"""
weights = []
if reg_final:
end = None
else: # don't regularize the final function that produces logits
end = -1 if not model.layers[-1].name.startswith("flatten") else -2
for layer in model.layers[:end]:
if layer.trainable or include_frozen:
if hasattr(layer, 'kernel'): # conv, dense
weights.append(layer.kernel)
elif hasattr(layer, 'gamma'): # batch norm scale
weights.append(1.0 - layer.gamma) # Gaussian prior centered at 1 for batch norm gamma value
if reg_biases:
# TODO: generally, we don't regularize the biases, but could we determine a probabilistic
# motivation to do this?
if hasattr(layer, 'bias'):
weights.append(layer.bias)
elif hasattr(layer, 'beta'):
weights.append(layer.beta)
l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in weights])
return l2_loss
def compute_metrics(loss, labels, preds, probs, num_thresholds):
"""Compute metrics.
This creates ops that compute metrics in a streaming fashion.
Args:
loss: A Tensor representing the current batch mean loss.
labels: A Tensor of shape (n, 1) containing a batch of labels.
preds: A Tensor of shape (n, 1) containing a batch of binary
prediction values.
probs: A Tensor of shape (n, 1) containing a batch of probabilistic
prediction values.
num_thresholds: An integer indicating the number of thresholds to
use to compute PR curves.
Returns:
A tuple of mean loss, accuracy, positive predictive value
(precision), sensitivity (recall), F1, PR curve data, F1 list
based on the PR curve data, a grouped metrics update op, and a
group metrics reset op.
"""
# TODO: think about converting this to a class
mean_loss, mean_loss_update_op, mean_loss_reset_op = create_resettable_metric(tf.metrics.mean,
'mean_loss', values=loss)
acc, acc_update_op, acc_reset_op = create_resettable_metric(tf.metrics.accuracy,
'acc', labels=labels, predictions=preds)
ppv, ppv_update_op, ppv_reset_op = create_resettable_metric(tf.metrics.precision,
'ppv', labels=labels, predictions=preds)
sens, sens_update_op, sens_reset_op = create_resettable_metric(tf.metrics.recall,
'sens', labels=labels, predictions=preds)
f1 = 2 * (ppv * sens) / (ppv + sens)
pr, pr_update_op, pr_reset_op = create_resettable_metric(
tf.contrib.metrics.precision_recall_at_equal_thresholds,
'pr', labels=tf.cast(labels, dtype=tf.bool), predictions=probs, num_thresholds=num_thresholds)
f1s = 2 * (pr.precision * pr.recall) / (pr.precision + pr.recall)
# combine all reset & update ops
metric_update_ops = tf.group(
mean_loss_update_op, acc_update_op, ppv_update_op, sens_update_op, pr_update_op)
metric_reset_ops = tf.group(
mean_loss_reset_op, acc_reset_op, ppv_reset_op, sens_reset_op, pr_reset_op)
return mean_loss, acc, ppv, sens, f1, pr, f1s, metric_update_ops, metric_reset_ops
#return mean_loss, acc, ppv, sens, f1, metric_update_ops, metric_reset_ops
# utils
def create_resettable_metric(metric, scope, **metric_kwargs): # prob safer to only allow kwargs
"""Create a resettable metric.
Args:
metric: A tf.metrics metric function.
scope: A String scope name to enclose the metric variables within.
metric_kwargs: Kwargs for the metric.
Returns:
The metric op, the metric update op, and a metric reset op.
"""
# started with an implementation from https://github.com/tensorflow/tensorflow/issues/4814
with tf.variable_scope(scope) as scope:
metric_op, update_op = metric(**metric_kwargs)
scope_name = tf.contrib.framework.get_name_scope() # in case nested name/variable scopes
local_vars = tf.contrib.framework.get_variables(scope_name,
collection=tf.GraphKeys.LOCAL_VARIABLES) # get all local variables in this scope
reset_op = tf.variables_initializer(local_vars)
return metric_op, update_op, reset_op
def initialize_variables(sess):
"""Initialize variables for training.
This initializes all tensor variables in the graph.
Args:
sess: A TensorFlow Session.
"""
# NOTE: Keras keeps track of the variables that are initialized, and any call to
# `tf.keras.backend.get_session()`, which is even used internally, will include logic to
# initialize variables. There is a situation in which resuming from a previous checkpoint and
# then saving the model after the first epoch will result in part of the model being
# reinitialized. The problem is that calling `tf.keras.backend.get_session()` here is too soon
# to initialize any variables, the resume branch skips any variable initialization, and then the
# `model.save` code path ends up calling `tf.keras.backend.get_session()`, thus causing part of
# the model to be reinitialized. Specifically, the model base is fine because it is initialized
# when the pretrained weights are added in, but the new dense classifier will not be marked as
# initialized by Keras. The non-resume branch will initialize any variables not initialized by
# Keras yet, and thus will avoid this issue. It could be possible to use
# `tf.keras.backend.manual_variable_initialization(True)` and then manually initialize
# all variables, but this would cause any pretrained weights to be removed. Instead, we should
# initialize all variables first with the equivalent of the logic in
# `tf.keras.backend.get_session()`, and then call resume.
# NOTE: the global variables initializer will erase the pretrained weights, so we instead only
# initialize the other variables
# NOTE: reproduced from the old tf.keras.backend._initialize_variables() function
# EDIT: this was updated in the master branch in commit
# https://github.com/fchollet/keras/commit/9166733c3c144739868fe0c30d57b861b4947b44
# TODO: given the change in master, reevaluate whether or not this is actually necessary anymore
variables = tf.global_variables()
uninitialized_variables = []
for v in variables:
if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
uninitialized_variables.append(v)
v._keras_initialized = True
global_init_op = tf.variables_initializer(uninitialized_variables)
local_init_op = tf.local_variables_initializer()
sess.run([global_init_op, local_init_op])
# training
def train(train_path, val_path, exp_path, model_name, model_weights, patch_size, train_batch_size,
val_batch_size, clf_epochs, finetune_epochs, clf_lr, finetune_lr, finetune_momentum,
finetune_layers, l2, reg_biases, reg_final, augmentation, marginalization, oversampling,
num_gpus, threads, prefetch_batches, log_interval, checkpoint, resume, seed):
"""Train a model.
Args:
train_path: String path to the generated training image patches.
This should contain folders for each class.
val_path: String path to the generated validation image patches.
This should contain folders for each class.
exp_path: String path in which to store the model checkpoints, logs,
etc. for this experiment
model_name: String indicating the model to use.
model_weights: Optional string path to an HDF5 file containing the
initial weights of the model. If None, then pretrained imagenet
weights will be used.
patch_size: Integer length to which the square patches will be
resized.
train_batch_size: Integer training batch size.
val_batch_size: Integer validation batch size.
clf_epochs: Integer number of epochs for which to training the new
classifier layers.
finetune_epochs: Integer number of epochs for which to fine-tune the
model.
clf_lr: Float learning rate for training the new classifier layers.
finetune_lr: Float learning rate for fine-tuning the model.
finetune_momentum: Float momentum rate for fine-tuning the model.
finetune_layers: Integer number of layers at the end of the
pretrained portion of the model to fine-tune. The new classifier
layers will still be trained during fine-tuning as well.
l2: Float L2 global regularization value.
reg_biases: Boolean for whether or not to regularize biases.
reg_final: Boolean for whether or not to regularize the final
logits-producing layer.
augmentation: Boolean for whether or not to apply random
augmentation to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`val_batch_size` must be divisible by 4, or equal to 1 for a
special debugging case of no augmentation.
oversampling: Boolean for whether or not to oversample the minority
mitosis class via class-aware sampling.
num_gpus: Integer number of GPUs to use for data parallelism.
threads: Integer number of threads for dataset buffering.
prefetch_batches: Integer number of batches to prefetch.
log_interval: Integer number of steps between logging during
training.
checkpoint: Boolean flag for whether or not to save a checkpoint
after each epoch.
resume: Boolean flag for whether or not to resume training from a
checkpoint.
seed: Integer random seed.
"""
# TODO: break this out into:
# * data gen func
# * inference func
# * loss func
# * metrics func
# * logging func
# * train func
# set random seed
# NOTE: At the moment, this is faily useless because if the augmentation ops are seeded, they will
# be evaluated in the exact same deterministic manner on every epoch, which is not desired.
# Additionally, the multithreading needed to process the data will cause non-deterministic
# results. The one benefit is that the classification layers will be created deterministically.
np.random.seed(seed)
tf.set_random_seed(seed)
# create session, force tf.Keras to use it
config = tf.ConfigProto(allow_soft_placement=True)#, log_device_placement=True)
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# debugger
#from tensorflow.python import debug as tf_debug
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# data
with tf.name_scope("data"):
# NOTE: seed issues to be fixed in tf
train_dataset = create_dataset(train_path, model_name, patch_size, train_batch_size, True,
augmentation, False, oversampling, threads, prefetch_batches) #, seed)
val_dataset = create_dataset(val_path, model_name, patch_size, val_batch_size, False,
False, marginalization, False, threads, prefetch_batches) #, seed)
# note that batch norm could be affected by very small final batches, but right now the fix,
# which requires this change as well, would also affect evaluation tasks, so we will wait to
# enable that (and this change too) until we move to tf Estimators
#output_shapes = (tf.TensorShape([None, patch_size, patch_size, 3]),
# tf.TensorShape([None]),
# tf.TensorShape([None]))
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
#output_shapes)
train_init_op = iterator.make_initializer(train_dataset)
val_init_op = iterator.make_initializer(val_dataset)
images, labels, filenames = iterator.get_next()
input_shape = (patch_size, patch_size, 3)
# models
with tf.name_scope("model"):
# replicate model on each GPU to allow for data parallelism
if num_gpus > 1:
with tf.device("/cpu:0"):
model_tower, model_base = create_model(model_name, input_shape, images)
#model_tower, model_base = create_model(model_name, input_shape, images)
model = multi_gpu_model(model_tower, num_gpus)
else:
model_tower, model_base = create_model(model_name, input_shape, images)
model = model_tower
if model_weights is not None:
model_tower.load_weights(model_weights)
# compute logits and predictions, possibly with marginalization
# NOTE: tf prefers to feed logits into a combined sigmoid and logistic loss function for
# numerical stability
if marginalization:
logits = marginalize(model.output) # will marginalize at test time
labels = tf.cond(tf.keras.backend.learning_phase(), lambda: labels, lambda: labels[0:1])
else:
logits = model.output
# for Bernoulli-derived loss
probs = tf.nn.sigmoid(logits, name="probs")
preds = tf.round(probs, name="preds") # implicit threshold at 0.5
# for Multinomial-derived loss
#probs = tf.nn.softmax(logits, name="probs") # possible improved numerical stability
#preds = tf.argmax(probs, axis=1, name="preds")
# loss
with tf.name_scope("loss"):
with tf.control_dependencies([tf.assert_equal(tf.shape(labels)[0], tf.shape(logits)[0])]):
data_loss = compute_data_loss(labels, logits)
reg_loss = compute_l2_reg_loss(
model_tower, include_frozen=True, reg_final=reg_final, reg_biases=reg_biases)
loss = data_loss + l2*reg_loss
# TODO: enable this and test it
# use l2 reg during training, but not during validation. Otherwise, more fine-tuning will
# lead to an apparent lower validation loss, even though it may just be due to more layers
# that can be adjusted in order to lower the regularization portion of the loss.
#loss = tf.cond(tf.keras.backend.learning_phase(), lambda: data_loss + l2*reg_loss, lambda: data_loss)
# optim
# TODO: extract this into a function with tests
with tf.name_scope("optim"):
global_step_op = tf.train.get_or_create_global_step()
global_epoch_op = tf.Variable(0, trainable=False, name="global_epoch", dtype=tf.int32)
global_epoch_increment_op = tf.assign_add(global_epoch_op, 1, name="global_epoch_increment")
# TODO: rework the `finetune_layers` param to include starting from the beg/end
# classifier
# - freeze all pre-trained model layers.
if model_base:
for layer in model_base.layers:
layer.trainable = False
var_list = model_tower.trainable_weights
else:
var_list = None # i.e., use all available variables if we are not using transfer learning
# add any weight regularization to the base loss for unfrozen layers:
clf_reg_loss = compute_l2_reg_loss(model_tower, reg_final=reg_final, reg_biases=reg_biases)
clf_loss = data_loss + l2*clf_reg_loss
clf_opt = tf.train.AdamOptimizer(clf_lr)
clf_grads_and_vars = clf_opt.compute_gradients(clf_loss, var_list=var_list)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
clf_model_update_ops = model_tower.updates
with tf.control_dependencies(clf_model_update_ops):
clf_train_op = clf_opt.apply_gradients(clf_grads_and_vars, global_step=global_step_op)
# finetuning
# - unfreeze a portion of the pre-trained model layers.
# note, could make this arbitrary, but for now, fine-tune some number of layers at the *end* of
# the pretrained portion of the model
if model_base:
if finetune_layers != 0:
for layer in model_base.layers[-finetune_layers:]:
layer.trainable = True
var_list = model_tower.trainable_weights
else:
var_list = None # i.e., use all available variables if we are not using transfer learning
# add any weight regularization to the base loss for unfrozen layers:
finetune_reg_loss = compute_l2_reg_loss(model_tower, reg_final=reg_final, reg_biases=reg_biases)
finetune_loss = data_loss + l2*finetune_reg_loss
# TODO: enable this, or use `tf.train.piecewise_constant` with `global_epoch`
#lr = tf.train.exponential_decay(
# finetune_lr, global_step_op,
# decay_steps=decay_steps, decay_rate=decay_rate,
# staircase=True)
finetune_opt = tf.train.MomentumOptimizer(finetune_lr, finetune_momentum, use_nesterov=True)
finetune_grads_and_vars = finetune_opt.compute_gradients(finetune_loss, var_list=var_list)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
finetune_model_update_ops = model_tower.updates
with tf.control_dependencies(finetune_model_update_ops):
finetune_train_op = finetune_opt.apply_gradients(
finetune_grads_and_vars, global_step=global_step_op)
# metrics
with tf.name_scope("metrics"):
num_thresholds = 11
mean_loss, acc, ppv, sens, f1, pr, f1s, metric_update_ops, metric_reset_ops = compute_metrics(
loss, labels, preds, probs, num_thresholds)
f1_max = tf.reduce_max(f1s)
thresh_max = pr.thresholds[tf.argmax(f1s)]
# tensorboard summaries
# TODO: extract this into a function
# NOTE: tensorflow is annoying when it comes to name scopes, so sometimes the name needs to be
# hardcoded as a prefix instead of a proper name scope if that name was used as a name scope
# earlier. otherwise, a numeric suffix will be appended to the name.
# general minibatch summaries
with tf.name_scope("summary"):
# data
actual_batch_size = tf.shape(images)[0]
percent_pos = tf.reduce_mean(labels) # positive labels are 1
pos_mask = tf.cast(labels, tf.bool)
neg_mask = tf.logical_not(pos_mask)
mitosis_images = tf.boolean_mask(images, pos_mask)
normal_images = tf.boolean_mask(images, neg_mask)
mitosis_filenames = tf.boolean_mask(filenames, pos_mask)
normal_filenames = tf.boolean_mask(filenames, neg_mask)
num_preds = tf.shape(preds)[0]
# false-positive & false-negative cases
pos_preds_mask = tf.cast(tf.squeeze(preds, axis=1), tf.bool)
#pos_preds_mask = tf.cast(preds, tf.bool)
neg_preds_mask = tf.logical_not(pos_preds_mask)
fp_mask = tf.logical_and(pos_preds_mask, neg_mask)
fn_mask = tf.logical_and(neg_preds_mask, pos_mask)
fp_images = tf.boolean_mask(images, fp_mask)
fn_images = tf.boolean_mask(images, fn_mask)
fp_filenames = tf.boolean_mask(filenames, fp_mask)
fn_filenames = tf.boolean_mask(filenames, fn_mask)
with tf.name_scope("images"):
tf.summary.image("mitosis", unnormalize(mitosis_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("normal", unnormalize(normal_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("false-positive", unnormalize(fp_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("false-negative", unnormalize(fn_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
with tf.name_scope("data/filenames"):
tf.summary.text("mitosis", mitosis_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("normal", normal_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("false-positive", fp_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("false-negative", fn_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("data/images", images, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("data/labels", labels, collections=["minibatch", "minibatch_val"])
for layer in model_tower.layers:
for weight in layer.weights:
tf.summary.histogram(weight.name, weight, collections=["minibatch", "minibatch_val"])
if hasattr(layer, 'output'):
layer_name = "model/{}/out".format(layer.name)
tf.summary.histogram(layer_name, layer.output, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("model/probs", probs, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("model/preds", preds, collections=["minibatch", "minibatch_val"])
with tf.name_scope("minibatch"):
tf.summary.scalar("loss", loss, collections=["minibatch"])
tf.summary.scalar("batch_size", actual_batch_size, collections=["minibatch", "minibatch_val"])
tf.summary.scalar("num_preds", num_preds, collections=["minibatch", "minibatch_val"])
tf.summary.scalar("percent_positive", percent_pos, collections=["minibatch"])
tf.summary.scalar("learning_phase", tf.to_int32(tf.keras.backend.learning_phase()),
collections=["minibatch", "minibatch_val"])
# TODO: gradient histograms
# TODO: first layer convolution kernels as images
minibatch_summaries = tf.summary.merge_all("minibatch")
minibatch_val_summaries = tf.summary.merge_all("minibatch_val")
# epoch summaries
with tf.name_scope("epoch"):
tf.summary.scalar("loss", mean_loss, collections=["epoch"])
tf.summary.scalar("acc", acc, collections=["epoch"])
tf.summary.scalar("ppv", ppv, collections=["epoch"])
tf.summary.scalar("sens", sens, collections=["epoch"])
tf.summary.scalar("f1", f1, collections=["epoch"])
tf.summary.scalar("f1_max", f1_max, collections=["epoch"])
tf.summary.scalar("thresh_max", thresh_max, collections=["epoch"])
tb.summary.pr_curve_raw_data_op(
name='pr_curve',
true_positive_counts=pr.tp,
false_positive_counts=pr.fp,
true_negative_counts=pr.tn,
false_negative_counts=pr.fn,
precision=pr.precision,
recall=pr.recall,
num_thresholds=num_thresholds,
display_name='pr curve',
description="PR curve for {num_thresholds} thresholds.".format(num_thresholds=num_thresholds),
collections=["epoch"])
epoch_summaries = tf.summary.merge_all("epoch")
# use train and val writers so that plots can be on same graph
train_writer = tf.summary.FileWriter(os.path.join(exp_path, "train"), tf.get_default_graph())
val_writer = tf.summary.FileWriter(os.path.join(exp_path, "val"))
# save ops
checkpoint_filename = os.path.join(exp_path, "model.ckpt")
saver = tf.train.Saver()
# initialize stuff
initialize_variables(sess)
if resume:
saver.restore(sess, checkpoint_filename)
# TODO: extract this into a function with tests
# training loop for new classifier layers and fine-tuning
for train_op, epochs in [(clf_train_op, clf_epochs), (finetune_train_op, finetune_epochs)]:
global_epoch_start = sess.run(global_epoch_op)
for _ in range(global_epoch_start, global_epoch_start+epochs): # allow for resuming of training
global_epoch = sess.run(global_epoch_op)
# training
sess.run(train_init_op)
while True:
global_step = sess.run(global_step_op)
try:
if log_interval > 0 and global_step % log_interval == 0:
# train, update metrics, & log stuff
_, _, loss_val, summary_str = sess.run([train_op, metric_update_ops, loss,
minibatch_summaries], feed_dict={tf.keras.backend.learning_phase(): 1})
mean_loss_val, f1_val = sess.run([mean_loss, f1])
train_writer.add_summary(summary_str, global_step)
print("train", global_epoch, global_step, loss_val, mean_loss_val, f1_val)
else:
# train & update metrics
_, _ = sess.run(
[train_op, metric_update_ops], feed_dict={tf.keras.backend.learning_phase(): 1})
except tf.errors.OutOfRangeError:
break
# log average training metrics for epoch & reset
op_values = sess.run([f1, f1_max, thresh_max, ppv, sens, acc, mean_loss, epoch_summaries])
(f1_val, f1_max_val, thresh_max_val, ppv_val, sens_val, acc_val, mean_loss_val,
summary_str) = op_values
print("---epoch {global_epoch}, train f1 (@ 0.5): {f1_val}, train max f1 "\
"(@ {thresh_max_val}): {f1_max_val}, train ppv: {ppv_val}, train sens: {sens_val}, "\
"train acc: {acc_val}, train avg loss: {mean_loss_val}"\
.format(global_epoch=global_epoch, f1_val=f1_val, acc_val=acc_val,
mean_loss_val=mean_loss_val, thresh_max_val=thresh_max_val,
f1_max_val=f1_max_val, ppv_val=ppv_val, sens_val=sens_val))
train_writer.add_summary(summary_str, global_epoch)
sess.run(metric_reset_ops)
# validation
sess.run(val_init_op)
vi = 0 # validation step
while True:
try:
# evaluate & update metrics
if log_interval > 0 and vi % log_interval == 0:
_, loss_val, summary_str = sess.run(
[metric_update_ops, loss, minibatch_val_summaries],
feed_dict={tf.keras.backend.learning_phase(): 0})
mean_loss_val, f1_val = sess.run([mean_loss, f1])
print("val", global_epoch, vi, loss_val, mean_loss_val, f1_val)
val_writer.add_summary(summary_str, vi)
else:
_ = sess.run(metric_update_ops, feed_dict={tf.keras.backend.learning_phase(): 0})
vi += 1
except tf.errors.OutOfRangeError:
break
# log average validation metrics for epoch & reset
op_values = sess.run([f1, f1_max, thresh_max, ppv, sens, acc, mean_loss, epoch_summaries])
(f1_val, f1_max_val, thresh_max_val, ppv_val, sens_val, acc_val, mean_loss_val,
summary_str) = op_values
print("---epoch {global_epoch}, val f1 (@ 0.5): {f1_val}, val max f1 (@ {thresh_max_val}): "\
"{f1_max_val}, val ppv: {ppv_val}, val sens: {sens_val}, val acc: {acc_val}, "\
"val avg loss: {mean_loss_val}"\
.format(global_epoch=global_epoch, f1_val=f1_val,
thresh_max_val=thresh_max_val, f1_max_val=f1_max_val, ppv_val=ppv_val,
sens_val=sens_val, acc_val=acc_val, mean_loss_val=mean_loss_val))
val_writer.add_summary(summary_str, global_epoch)
sess.run(metric_reset_ops)
sess.run(global_epoch_increment_op) # global_epoch += 1
# save model
if checkpoint:
keras_filename = os.path.join(exp_path, "checkpoints",
"{f1_max_val:.5}_f1max_{f1_val:.5}_f1_{mean_loss_val:.5}_loss_{global_epoch}_"\
"epoch_model.hdf5"\
.format(f1_max_val=f1_max_val, f1_val=f1_val, mean_loss_val=mean_loss_val,
global_epoch=global_epoch))
model_tower.save(keras_filename, include_optimizer=False) # keras model
saver.save(sess, checkpoint_filename) # full TF graph
print("Saved model file to {}".format(keras_filename))
val_writer.flush()
#train_writer.flush()
def main(argv=None):
"""Command line interface for this script. Can optionally pass in a
list of strings to simulate command line usage.
"""
# parse args
parser = argparse.ArgumentParser()
parser.add_argument("--patches_path", default=os.path.join("data", "mitoses", "patches"),
help="path to the generated image patches containing `train` & `val` folders "\
"(default: %(default)s)")
parser.add_argument("--exp_parent_path", default=os.path.join("experiments", "mitoses"),
help="parent path in which to store experiment folders (default: %(default)s)")
parser.add_argument("--exp_name", default=None,
help="path within the experiment parent path in which to store the model checkpoints, "\
"logs, etc. for this experiment; an existing path can be used to resume training "\
"(default: %%y-%%m-%%d_%%H:%%M:%%S_{model})")
parser.add_argument("--exp_name_suffix", default=None,
help="suffix to add to experiment name (default: all parameters concatenated together)")
parser.add_argument("--exp_full_path", default=None,
help="full path in which to store the experiment. either this or the --exp_parent_path, "\
"--exp_name, --exp_name_suffix flags as a group can be used. typically, this would "\
"be used to resume an existing experiment (default: %(default)s)")
parser.add_argument("--model", default="vgg",
choices=["logreg", "vgg", "vgg_new", "vgg19", "resnet", "resnet_new", "resnet_custom"],
help="name of the model to use in ['logreg', 'vgg', 'vgg_new', 'vgg19', 'resnet', "\
"'resnet_new', 'resnet_custom'] (default: %(default)s)")
parser.add_argument("--model_weights", default=None,
help="optional hdf5 file containing the initial weights of the model. if not supplied, the "\
"model will start with pretrained weights from imagenet. (default: %(default)s)")
parser.add_argument("--patch_size", type=int, default=64,
help="integer length to which the square patches will be resized (default: %(default)s)")
parser.add_argument("--train_batch_size", type=int, default=32,
help="training batch size (default: %(default)s)")
parser.add_argument("--val_batch_size", type=int, default=32,
help="validation batch size (default: %(default)s)")
parser.add_argument("--clf_epochs", type=int, default=1,
help="number of epochs for which to train the new classifier layers "\
"(default: %(default)s)")
parser.add_argument("--finetune_epochs", type=int, default=0,
help="number of epochs for which to fine-tune the unfrozen layers (default: %(default)s)")
parser.add_argument("--clf_lr", type=float, default=1e-3,
help="learning rate for training the new classifier layers (default: %(default)s)")
parser.add_argument("--finetune_lr", type=float, default=1e-4,
help="learning rate for fine-tuning the unfrozen layers (default: %(default)s)")
parser.add_argument("--finetune_momentum", type=float, default=0.9,
help="momentum rate for fine-tuning the unfrozen layers (default: %(default)s)")
parser.add_argument("--finetune_layers", type=int, default=0,
help="number of layers at the end of the pretrained portion of the model to fine-tune "\
"(note: the new classifier layers will still be trained during fine-tuning as well) "\
"(default: %(default)s)")
parser.add_argument("--l2", type=float, default=1e-3,
help="amount of l2 weight regularization (default: %(default)s)")
parser.add_argument("--reg_biases", default=False, action="store_true",
help="whether or not to regularize biases. (default: %(default)s)")
parser.add_argument("--skip_reg_final", dest="reg_final", action="store_false",
help="whether or not to skip regularization of the logits-producing layer "\
"(default: %(default)s)")
parser.set_defaults(reg_final=True)
augment_parser = parser.add_mutually_exclusive_group(required=False)
augment_parser.add_argument("--augment", dest="augment", action="store_true",
help="apply random augmentation to the training images (default: True)")
augment_parser.add_argument("--no_augment", dest="augment", action="store_false",
help="do not apply random augmentation to the training images (default: False)")
parser.set_defaults(augment=True)
parser.add_argument("--marginalize", default=False, action="store_true",
help="use noise marginalization when evaluating the validation set. if this is set, then "\
"the validation batch_size must be divisible by 4, or equal to 1 for no augmentation "\
"(default: %(default)s)")
parser.add_argument("--oversample", default=False, action="store_true",
help="oversample the minority mitosis class during training via class-aware sampling "\
"(default: %(default)s)")
parser.add_argument("--num_gpus", type=int, default=1,
help="num_gpus: Integer number of GPUs to use for data parallelism. (default: %(default)s)")
parser.add_argument("--threads", type=int, default=5,
help="number of threads for dataset parallel processing; note: this will cause "\
"non-reproducibility (default: %(default)s)")
# TODO: update this to default to `None` to take advantage of auto prefetch buffer size tuning
# https://github.com/tensorflow/tensorflow/commit/d355f4e2644b68ea643f573c564936ec23b93787
parser.add_argument("--prefetch_batches", type=int, default=100,
help="number of batches to prefetch (default: %(default)s)")
parser.add_argument("--log_interval", type=int, default=100,
help="number of steps between logging during training (default: %(default)s)")
checkpoint_parser = parser.add_mutually_exclusive_group(required=False)
checkpoint_parser.add_argument("--checkpoint", dest="checkpoint", action="store_true",
help="save a checkpoint after each epoch (default: True)")
checkpoint_parser.add_argument("--no_checkpoint", dest="checkpoint", action="store_false",
help="do not save a checkpoint after each epoch (default: False)")
parser.set_defaults(checkpoint=True)
parser.add_argument("--resume", default=False, action="store_true",
help="resume training from a checkpoint (default: %(default)s)")
parser.add_argument("--seed", type=int, help="random seed (default: %(default)s)")
args = parser.parse_args(argv)
# set train/val paths
train_path = os.path.join(args.patches_path, "train")
val_path = os.path.join(args.patches_path, "val")
if args.exp_full_path is None:
if args.exp_name is None:
date = datetime.strftime(datetime.today(), "%y%m%d_%H%M%S")
args.exp_name = date + "_" + args.model
if args.exp_name_suffix is None:
args.exp_name_suffix = "patch_size_{args.patch_size}_batch_size_{args.train_batch_size}_"\
"clf_epochs_{args.clf_epochs}_ft_epochs_{args.finetune_epochs}_"\
"clf_lr_{args.clf_lr}_ft_lr_{args.finetune_lr}_"\
"ft_mom_{args.finetune_momentum}_ft_layers_{args.finetune_layers}_"\
"l2_{args.l2}_rb_{args.reg_biases}_aug_{args.augment}_"\
"marg_{args.marginalize}_over_{args.oversample}".format(args=args)
full_exp_name = args.exp_name + "_" + args.exp_name_suffix
args.exp_full_path = os.path.join(args.exp_parent_path, full_exp_name)
# make an experiment folder
if not os.path.exists(args.exp_full_path):
os.makedirs(os.path.join(args.exp_full_path, "checkpoints"))
print("experiment directory: {}".format(args.exp_full_path))
# create a random seed if needed
if args.seed is None:
args.seed = np.random.randint(1e9)
# save args to a file in the experiment folder, appending if it exists
with open(os.path.join(args.exp_full_path, 'args.txt'), 'a') as f:
json.dump(args.__dict__, f)
print("", file=f)
# can be read in later with
#with open('args.txt', 'r') as f:
# args = json.load(f)
# save command line invocation to txt file for ease of rerunning the exact experiment
with open(os.path.join(args.exp_full_path, 'invoke.txt'), 'a') as f:
# NOTE: since we sometimes call this `main` function via the hyperparam search script, we can't
# always use `sys.argv` because it would always refer to the outer invocation, i.e., the
# invocation of the hyperparam search script.
if argv is not None: # called from hpo script
fname = os.path.basename(__file__)
f.write("python3 {fname} ".format(fname=fname) + " ".join(argv) + "\n")
else: # called directly
f.write("python3 " + " ".join(sys.argv) + "\n")
# copy this script to the experiment folder
shutil.copy2(os.path.realpath(__file__), args.exp_full_path)
# train!
train(train_path=train_path, val_path=val_path, exp_path=args.exp_full_path,
model_name=args.model, model_weights=args.model_weights, patch_size=args.patch_size,
train_batch_size=args.train_batch_size, val_batch_size=args.val_batch_size,
clf_epochs=args.clf_epochs, finetune_epochs=args.finetune_epochs, clf_lr=args.clf_lr,
finetune_lr=args.finetune_lr, finetune_momentum=args.finetune_momentum,
finetune_layers=args.finetune_layers, l2=args.l2, reg_biases=args.reg_biases,
reg_final=args.reg_final, augmentation=args.augment, marginalization=args.marginalize,
oversampling=args.oversample, num_gpus=args.num_gpus, threads=args.threads,
prefetch_batches=args.prefetch_batches, log_interval=args.log_interval,
checkpoint=args.checkpoint, resume=args.resume, seed=args.seed)
if __name__ == "__main__":
main()
# ---
# tests
# TODO: eventually move these to a separate file.
# `py.test train_mitoses.py`
# TODO: use this fixture when we move these tests to a test module
#import pytest
#
#@pytest.fixture(autouse=True)
#def reset():
# """Ensure that the TensorFlow graph/session are clean."""
# tf.reset_default_graph()
# tf.keras.backend.clear_session()
# yield # run test
def reset():
"""Ensure that the TensorFlow graph/session are clean."""
tf.reset_default_graph()
tf.keras.backend.clear_session()
# data
def test_get_image(tmpdir):
# NOTE: pytest will provide a temp directory automatically:
# https://docs.pytest.org/en/latest/tmpdir.html
from PIL import Image
reset()
# create png image
filename = os.path.join(str(tmpdir), "x.png")
x = np.random.randint(0, 255, dtype=np.uint8, size=(64,64,3))
Image.fromarray(x).save(filename)
image_op = get_image(filename, 64)
sess = tf.keras.backend.get_session()
image = sess.run(image_op)
assert image.shape == (64, 64, 3)
assert image.dtype == np.float32
assert np.min(image) >= 0
assert np.max(image) < 1
assert np.allclose(x.astype(np.float32) / 255, image)
assert np.allclose((x / 255).astype(np.float32), image)
def test_get_label():
import pytest
# mitosis
reset()
filename = "train/mitosis/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
assert label == 1
# normal
reset()
filename = "train/normal/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
assert label == 0
# wrong label name
with pytest.raises(tf.errors.InvalidArgumentError):
reset()
filename = "train/unknown/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
def test_preprocess(tmpdir):
# NOTE: pytest will provide a temp directory automatically:
# https://docs.pytest.org/en/latest/tmpdir.html
from PIL import Image
reset()
# create png image
folder = os.path.join(str(tmpdir), "this/train/mitosis")
os.makedirs(folder)
filename_orig = os.path.join(folder, "x.png")
x = np.random.randint(0, 255, dtype=np.uint8, size=(64,64,3))
Image.fromarray(x).save(filename_orig)
image_op, label_op, filename_op = preprocess(tf.constant(filename_orig), 64)
sess = tf.keras.backend.get_session()
image, label, filename = sess.run([image_op, label_op, filename_op])
assert image.shape == (64, 64, 3)
assert image.dtype == np.float32
assert np.min(image) >= 0
assert np.max(image) < 1
assert label == 1.0
assert filename.decode("utf-8") == filename_orig
def test_normalize_unnormalize():
reset()
sess = tf.keras.backend.get_session()
input_shape = (64, 64, 3)
x_np = np.random.rand(*input_shape).astype(np.float32) # uniform sampling in [0, 1)
x_batch_np = np.random.rand(2, *input_shape).astype(np.float32) # uniform sampling in [0, 1)
# imagenet preprocessing
model_name = "vgg"
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
x_norm_correct_np = x_np[..., ::-1] * 255 - means
x_batch_norm_correct_np = x_batch_np[..., ::-1] * 255 - means
assert x_np.dtype == np.float32
assert x_np.dtype == x_batch_np.dtype == x_norm_correct_np.dtype == x_batch_norm_correct_np.dtype
# single example
def test(x_norm, x_unnorm):
"""Test normalized and unnormalized versions of x."""
# NOTE: closes over x_np & x_norm_correct_np
assert x_norm.dtype == x_norm_correct_np.dtype
assert x_unnorm.dtype == x_np.dtype
assert np.allclose(x_norm, x_norm_correct_np)
assert not np.allclose(x_norm, x_np)
assert np.all(np.max(x_norm, axis=(0,1)) > 1)
assert np.all(np.max(x_norm, axis=(0,1)) < 255 - means)
assert np.all(np.min(x_norm, axis=(0,1)) < 0)
assert np.all(np.min(x_norm, axis=(0,1)) > 0 - means)
assert np.allclose(x_unnorm, x_np, rtol=1e-4, atol=1e-7)
# batch of examples
def test_batch(x_batch_norm, x_batch_unnorm):
"""Test normalized and unnormalized versions of x_batch."""
# NOTE: closes over x_batch_np & x_batch_norm_correct_np
assert x_batch_norm.dtype == x_batch_norm_correct_np.dtype
assert x_batch_unnorm.dtype == x_batch_np.dtype
assert np.allclose(x_batch_norm, x_batch_norm_correct_np)
assert not np.allclose(x_batch_norm, x_batch_np)
assert np.all(np.max(x_batch_norm, axis=(0,1,2)) > 1)
assert np.all(np.max(x_batch_norm, axis=(0,1,2)) < 255 - means)
assert np.all(np.min(x_batch_norm, axis=(0,1,2)) < 0)
assert np.all(np.min(x_batch_norm, axis=(0,1,2)) > 0 - means)
assert np.allclose(x_batch_unnorm, x_batch_unnorm_np, atol=1e-7)
## numpy
x_norm_np = normalize(x_np, model_name)
x_unnorm_np = unnormalize(x_norm_np, model_name)
test(x_norm_np, x_unnorm_np)
x_batch_norm_np = normalize(x_batch_np, model_name)
x_batch_unnorm_np = unnormalize(x_batch_norm_np, model_name)
test_batch(x_batch_norm_np, x_batch_unnorm_np)
## tensorflow
x = tf.placeholder(tf.float32, [*input_shape])
x_norm = normalize(x, model_name)
x_unnorm = unnormalize(x_norm, model_name)
x_norm_np, x_unnorm_np = sess.run([x_norm, x_unnorm], feed_dict={x: x_np})
test(x_norm_np, x_unnorm_np)
x_batch = tf.placeholder(tf.float32, [None, *input_shape])
x_batch_norm = normalize(x_batch, model_name)
x_batch_unnorm = unnormalize(x_batch_norm, model_name)
x_batch_norm_np, x_batch_unnorm_np = sess.run([x_batch_norm, x_batch_unnorm],
feed_dict={x_batch: x_batch_np})
test_batch(x_batch_norm_np, x_batch_unnorm_np)
# image standardization preprocessing
reset()
sess = tf.keras.backend.get_session()
model_name = "not_vgg"
x_norm_correct_np = x_np * 2 - 1
x_batch_norm_correct_np = x_batch_np * 2 - 1
# single example
def test(x_norm, x_unnorm):
"""Test normalized and unnormalized versions of x."""
# NOTE: closes over x_np & x_norm_correct_np
assert x_norm.dtype == x_norm_correct_np.dtype
assert x_unnorm.dtype == x_np.dtype
assert np.allclose(x_norm, x_norm_correct_np)
assert not np.allclose(x_norm, x_np)
assert np.all(np.max(x_norm, axis=(0,1)) <= 1)
assert np.all(np.max(x_norm, axis=(0,1)) > 0)
assert np.all(np.min(x_norm, axis=(0,1)) >= -1)
assert np.all(np.min(x_norm, axis=(0,1)) < 0)
assert np.allclose(x_unnorm, x_np, rtol=1e-4, atol=1e-7)
# batch of examples
def test_batch(x_batch_norm, x_batch_unnorm):
"""Test normalized and unnormalized versions of x_batch."""
# NOTE: closes over x_batch_np & x_batch_norm_correct_np
assert x_batch_norm.dtype == x_batch_norm_correct_np.dtype
assert x_batch_unnorm.dtype == x_batch_np.dtype
assert np.allclose(x_batch_norm, x_batch_norm_correct_np)
assert not np.allclose(x_batch_norm, x_batch_np)
assert np.all(np.max(x_batch_norm, axis=(0,1,2)) <= 1)
assert np.all(np.max(x_batch_norm, axis=(0,1,2)) > 0)
assert np.all(np.min(x_batch_norm, axis=(0,1,2)) >= -1)
assert np.all(np.min(x_batch_norm, axis=(0,1,2)) < 0)
assert np.allclose(x_batch_unnorm, x_batch_unnorm_np) #, atol=1e-7)
## numpy
x_norm_np = normalize(x_np, model_name)
x_unnorm_np = unnormalize(x_norm_np, model_name)
test(x_norm_np, x_unnorm_np)
x_batch_norm_np = normalize(x_batch_np, model_name)
x_batch_unnorm_np = unnormalize(x_batch_norm_np, model_name)
test_batch(x_batch_norm_np, x_batch_unnorm_np)
## tensorflow
x = tf.placeholder(tf.float32, [*input_shape])
x_norm = normalize(x, model_name)
x_unnorm = unnormalize(x_norm, model_name)
x_norm_np, x_unnorm_np = sess.run([x_norm, x_unnorm], feed_dict={x: x_np})
test(x_norm_np, x_unnorm_np)
x_batch = tf.placeholder(tf.float32, [None, *input_shape])
x_batch_norm = normalize(x_batch, model_name)
x_batch_unnorm = unnormalize(x_batch_norm, model_name)
x_batch_norm_np, x_batch_unnorm_np = sess.run([x_batch_norm, x_batch_unnorm],
feed_dict={x_batch: x_batch_np})
test_batch(x_batch_norm_np, x_batch_unnorm_np)
def test_augment(tmpdir):
# NOTE: pytest will provide a temp directory automatically:
# https://docs.pytest.org/en/latest/tmpdir.html
from PIL import Image
reset()
# create png image
filename = os.path.join(str(tmpdir), "x.png")
patch_size = 64
x = np.random.randint(0, 255, dtype=np.uint8, size=(patch_size, patch_size,3))
Image.fromarray(x).save(filename)
image_op = get_image(filename, 64)
aug_image_op = augment(image_op, patch_size)
sess = tf.keras.backend.get_session()
image, aug_image = sess.run([image_op, aug_image_op])
assert aug_image.shape == (64, 64, 3)
assert aug_image.dtype == np.float32
assert np.min(aug_image) >= 0
assert np.max(aug_image) <= 1
assert not np.allclose(aug_image, x/255)
assert not np.allclose(aug_image, image)
# seeds
reset()
image_op = get_image(filename, 64)
aug_image_op1 = augment(image_op, patch_size, 1)
aug_image_op2 = augment(image_op, patch_size, 2)
sess = tf.keras.backend.get_session()
aug_image1a, aug_image2a = sess.run([aug_image_op1, aug_image_op2])
reset()
image_op = get_image(filename, 64)
aug_image_op1 = augment(image_op, patch_size, 1)
aug_image_op2 = augment(image_op, patch_size, 2)
sess = tf.keras.backend.get_session()
aug_image1b, aug_image2b = sess.run([aug_image_op1, aug_image_op2])
assert np.allclose(aug_image1a, aug_image1b)
assert np.allclose(aug_image2a, aug_image2b)
assert not np.allclose(aug_image1a, aug_image2a)
def test_create_augmented_batch():
import pytest
reset()
sess = tf.keras.backend.get_session()
patch_size = 64
image = np.random.rand(patch_size, patch_size, 3).astype(np.float32)
# wrong sizes
with pytest.raises(AssertionError):
create_augmented_batch(image, 3, patch_size)
create_augmented_batch(image, 31, patch_size)
# correct sizes
def test(batch_size):
aug_images_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images = sess.run(aug_images_tf)
assert aug_images.shape == (batch_size,64,64,3)
test(32)
test(4)
test(1)
# deterministic behavior
def test2(batch_size):
# different session runs
aug_images_1_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images_1 = sess.run(aug_images_1_tf)
aug_images_2_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images_2 = sess.run(aug_images_2_tf)
assert np.array_equal(aug_images_1, aug_images_2)
# same session run
aug_images_1_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images_2_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images_1, aug_images_2 = sess.run([aug_images_1_tf, aug_images_2_tf])
assert np.array_equal(aug_images_1, aug_images_2)
test2(32)
test2(4)
test2(1)
def test_marginalize():
import pytest
reset()
sess = tf.keras.backend.get_session()
shape = (32, 1)
logits = np.random.randn(*shape) # will be embedded directly in tf graph
marg_logits = marginalize(logits) # tf ops
# forgot tf.keras.backend.learning_phase()
# NOTE: this is no longer an error due to a Keras change that sets a default value of 0 for
# `tf.keras.backend.learning_phase()`.
#with pytest.raises(tf.errors.InvalidArgumentError):
# l = sess.run(marg_logits)
# train time
l = sess.run(marg_logits, feed_dict={tf.keras.backend.learning_phase(): 1})
assert l.shape == shape
assert np.array_equal(l, logits)
# test time
l = sess.run(marg_logits, feed_dict={tf.keras.backend.learning_phase(): 0})
assert l.shape == (1, 1)
assert np.allclose(l.squeeze(), np.mean(logits))
# equal labels
reset()
sess = tf.keras.backend.get_session()
labels = np.full(shape, 1)
marg_labels = marginalize(labels)
# train time
l = sess.run(marg_labels, feed_dict={tf.keras.backend.learning_phase(): 1})
assert l.shape == shape
assert np.array_equal(l, labels)
# test time
l = sess.run(marg_labels, feed_dict={tf.keras.backend.learning_phase(): 0})
assert l.shape == (1, 1)
assert np.allclose(l.squeeze(), 1)
# model
def test_compute_l2_reg_loss():
reset()
# create model with a mix of pretrained and new weights
# NOTE: the pretrained layers will be initialized by Keras on creation, while the new Dense
# layer will remain uninitialized
input_shape = (224,224,3)
inputs = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Conv2D(1, 3)(inputs)
x = tf.keras.layers.BatchNormalization()(x)
logits = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
for l in model.layers:
l.trainable = True
sess = tf.keras.backend.get_session()
# all layers
l2_reg = compute_l2_reg_loss(model)
correct_l2_reg = (
tf.nn.l2_loss(model.layers[1].kernel)
+ tf.nn.l2_loss(1.0 - model.layers[2].gamma)
+ tf.nn.l2_loss(model.layers[3].kernel))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert np.array_equal(l2_reg_val, correct_l2_reg_val)
# subset of layers
model.layers[1].trainable = False
l2_reg = compute_l2_reg_loss(model)
correct_l2_reg = (
tf.nn.l2_loss(1.0 - model.layers[2].gamma)
+ tf.nn.l2_loss(model.layers[3].kernel))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert np.array_equal(l2_reg_val, correct_l2_reg_val)
# include frozen layers
model.layers[1].trainable = False
l2_reg = compute_l2_reg_loss(model, True)
correct_l2_reg = (
tf.nn.l2_loss(model.layers[1].kernel)
+ tf.nn.l2_loss(1.0 - model.layers[2].gamma)
+ tf.nn.l2_loss(model.layers[3].kernel))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert np.array_equal(l2_reg_val, correct_l2_reg_val)
model.layers[1].trainable = True
# skip final layer
l2_reg = compute_l2_reg_loss(model, reg_final=False)
correct_l2_reg = (
tf.nn.l2_loss(model.layers[1].kernel)
+ tf.nn.l2_loss(1.0 - model.layers[2].gamma))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert np.array_equal(l2_reg_val, correct_l2_reg_val)
# include biases
l2_reg = compute_l2_reg_loss(model, reg_biases=True)
correct_l2_reg = (
tf.nn.l2_loss(model.layers[1].kernel) + tf.nn.l2_loss(model.layers[1].bias)
+ tf.nn.l2_loss(1.0 - model.layers[2].gamma) + tf.nn.l2_loss(model.layers[2].beta)
+ tf.nn.l2_loss(model.layers[3].kernel) + tf.nn.l2_loss(model.layers[3].bias))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert np.array_equal(l2_reg_val, correct_l2_reg_val)
def test_create_model():
input_shape = (64, 64, 3)
def test(model_name, base_layers):
x = tf.placeholder(tf.float32, [None, *input_shape])
model, model_base = create_model(model_name, input_shape, x)
assert model.input_shape == (None, *input_shape)
assert model.input == x
if base_layers == 0:
assert model_base is None
else:
assert len(model_base.layers) == base_layers
assert model.layers[:len(model_base.layers)] == model_base.layers
assert model.output_shape == (None, 1)
# logreg
reset()
sess = tf.keras.backend.get_session()
test("logreg", 0)
# vgg
reset()
sess = tf.keras.backend.get_session()
test("vgg", 19)
# vgg19
reset()
sess = tf.keras.backend.get_session()
test("vgg19", 22)
# resnet
reset()
sess = tf.keras.backend.get_session()
test("resnet", 174)
# resnet custom
reset()
sess = tf.keras.backend.get_session()
test("resnet_custom", 0)
def test_compute_data_loss():
reset()
sess = tf.keras.backend.get_session()
shape = (32, 1)
logits = np.random.rand(*shape).astype(np.float32)
labels = np.random.binomial(1, 0.5, shape).astype(np.float32)
assert logits.shape == labels.shape == shape
loss = compute_data_loss(labels, logits)
loss_np = sess.run(loss)
assert loss_np.shape == ()
assert type(loss_np) == np.float32
# utils
def test_resettable_metric():
reset()
x = tf.placeholder(tf.int32, [None, 1])
x1 = np.array([1,0,0,0]).reshape(4,1)
x2 = np.array([0,0,0,0]).reshape(4,1)
with tf.name_scope("something"): # testing nested name/variable scopes
mean_op, update_op, reset_op = create_resettable_metric(tf.metrics.mean, 'mean_loss', values=x)
sess = tf.keras.backend.get_session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
_, out_up = sess.run([mean_op, update_op], feed_dict={x: x1})
assert np.allclose([out_up], [1/4])
assert np.allclose([sess.run(mean_op)], [1/4])
assert np.allclose([sess.run(mean_op)], [1/4])
_, out_up = sess.run([mean_op, update_op], feed_dict={x: x1})
assert np.allclose([out_up], [2/8])
assert np.allclose([sess.run(mean_op)], [2/8])
assert np.allclose([sess.run(mean_op)], [2/8])
_, out_up = sess.run([mean_op, update_op], feed_dict={x: x2})
assert np.allclose([out_up], [2/12])
assert np.allclose([sess.run(mean_op)], [2/12])
assert np.allclose([sess.run(mean_op)], [2/12])
sess.run(reset_op) # make sure this works!
_, out_up = sess.run([mean_op, update_op], feed_dict={x: x2})
assert out_up == 0
assert sess.run(mean_op) == 0
_, out_up = sess.run([mean_op, update_op], feed_dict={x: x1})
assert np.allclose([out_up], [1/8])
assert np.allclose([sess.run(mean_op)], [1/8])
assert np.allclose([sess.run(mean_op)], [1/8])
def test_initialize_variables():
# NOTE: keep the `tf.keras.backend.get_session()` call up here to ensure that the
# `initialize_variables` function is working properly.
#tf.reset_default_graph()
##tf.keras.backend.manual_variable_initialization(True)
#tf.keras.backend.clear_session() # this is needed if we want to create sessions at the beginning
reset()
sess = tf.keras.backend.get_session()
# create model with a mix of pretrained and new weights
# NOTE: the pretrained layers will be initialized by Keras on creation, while the new Dense
# layer will remain uninitialized
input_shape = (224,224,3)
inputs = tf.keras.layers.Input(shape=input_shape)
model_base = tf.keras.applications.VGG16(
include_top=False, input_shape=input_shape, input_tensor=inputs)
x = model_base.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
logits = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
# check that pre-trained model is initialized
# NOTE: This occurs because using pretrained weights ends up calling
# `tf.keras.backend.batch_set_value`, which creates assignment ops and calls
# `tf.keras.backend.get_session()` to get the session and then run the assignment ops. The
# `tf.keras.backend.get_session()` call initializes the model variables to random values and sets
# the `_keras_initialized` attribute to True for each variable. Then the assignment ops run and
# actually set the variables to the pretrained values. Without pretrained weights, the
# `tf.keras.backend.get_session()` function is not called upon model creation, and thus these
# variables will remain uninitialized. Furthermore, if we set
# `tf.keras.backend.manual_variable_initialization(True)`, the pretrained weights will be loaded,
# but there will be no indication that those variables were already initialized, and thus we will
# end up reinitializing them to random values. This is all a byproduct of using
# Keras + TensorFlow in a hybrid setup, and we should look into making this less brittle.
for v in model_base.weights:
assert hasattr(v, '_keras_initialized') and v._keras_initialized # check for initialization
assert sess.run(tf.is_variable_initialized(v)) # check for initialization
# the new dense layer is not initialized yet
#with pytest.raises(AssertionError):
assert len(model.layers[-1].weights) == 2
for v in model.layers[-1].weights:
assert not getattr(v, '_keras_initialized', False)
assert not sess.run(tf.is_variable_initialized(v))
# initialize variables, including marking them with the `_keras_initialized` attribute
initialize_variables(sess)
# check that everything is initialized and marked with the `_keras_initialized` attribute
# NOTE: this is important for a hybrid Keras & TensorFlow setup where Keras is being used for the
# model creation part, and raw TensorFlow is being used for the rest. if variables are not
# initialized *and* marked with the special Keras attribute, then certain Keras functions will end
# up accidentally reinitializing variables when they use `tf.keras.backend.get_session()`
# internally. In a pure Keras setup, this would not happen since the model would be initialized
# at the proper times. In a Keras & TensorFlow hybrid setup, this can cause issues. By
# encapsulating this nonsense in a function, we can avoid these problems.
for v in tf.global_variables():
assert hasattr(v, '_keras_initialized') and v._keras_initialized # check for initialization
assert sess.run(tf.is_variable_initialized(v)) # check for initialization
def test_random_seed():
# NOTE: goal here is to prove that random seeds actually work across sessions
# NOTE: after the move from `keras` to `tf.keras`, only the tf seed matters, rather than the
# numpy one
reset()
input_shape = (64,64,3)
seed = 42
# seed
import tensorflow as tf
tf.set_random_seed(seed)
layer1 = tf.keras.layers.Dense(32)
layer1.build(input_shape)
w1 = layer1.get_weights()[0]
reset()
# seed after tf import
import tensorflow as tf
tf.set_random_seed(seed)
layer2 = tf.keras.layers.Dense(32)
layer2.build(input_shape)
w2 = layer2.get_weights()[0]
# compare
assert np.allclose(w1, w2)
def test_num_parallel_calls():
import pytest
reset()
def test(threads):
np.random.seed(42)
tf.set_random_seed(42)
images = np.random.rand(100, 64, 64, 3).astype(np.float32)
def get_data():
dataset = tf.data.Dataset.from_tensor_slices(images) # some initial dataset
#dataset = dataset.map(lambda x: x * 2, num_parallel_calls=threads) # this works fine always
#dataset = dataset.map(lambda x: x * tf.random_normal([64, 64, 3], seed=42),
# num_parallel_calls=threads)
dataset = dataset.map(lambda image: tf.image.random_hue(image, 0.04, seed=42),
num_parallel_calls=threads)
dataset = dataset.batch(32)
x = dataset.make_one_shot_iterator().get_next()
return x
# execution 1
x = get_data()
with tf.Session() as sess:
x_batch1a = sess.run(x)
x_batch1b = sess.run(x)
# clear out everything
tf.reset_default_graph()
# execution 2
x = get_data()
with tf.Session() as sess:
x_batch2a = sess.run(x)
x_batch2b = sess.run(x)
# results should be equivalent across executions
assert np.allclose(x_batch1a, x_batch2a)
assert np.allclose(x_batch1b, x_batch2b)
assert not np.allclose(x_batch1a, x_batch1b)
assert not np.allclose(x_batch2a, x_batch2b)
test(1) # works with 1 thread!
# TODO: eventually, this should not throw an exception:
# https://github.com/tensorflow/tensorflow/issues/13932
with pytest.raises(AssertionError):
test(15) # fails with >1 threads!
def test_image_random_op_seeds():
# this test shows that there is an issue with the `tf.data.Dataset.map` function in which
# graph-level seeds appear to not be propagated within the mapped functions
reset()
np.random.seed(42)
image = np.random.rand(64, 64, 3).astype(np.float32)
tf.set_random_seed(42)
image_aug = tf.image.random_hue(image, 0.04)
with tf.Session() as sess:
image_aug_value1 = sess.run(image_aug)
with tf.Session() as sess:
image_aug_value2 = sess.run(image_aug)
assert np.allclose(image_aug_value1, image_aug_value2)
def test_dataset_reinit_iter_augment_seeds():
import pytest
reset()
np.random.seed(42)
tf.set_random_seed(42)
images = np.random.rand(100, 64, 64, 3).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices(images) # some initial dataset
dataset = dataset.map(lambda image: tf.image.random_hue(image, 0.04, seed=42))
dataset = dataset.batch(32)
iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
init_op = iterator.make_initializer(dataset)
x = iterator.get_next()
sess = tf.Session()
# initialize
sess.run(init_op)
# grab two batches
x_batch1a = sess.run(x)
x_batch1b = sess.run(x)
# reinitialize
sess.run(init_op)
# grab two batches
x_batch2a = sess.run(x)
x_batch2b = sess.run(x)
assert not np.allclose(x_batch1a, x_batch1b)
assert not np.allclose(x_batch2a, x_batch2b)
# ouch! these two are broken -- it turns out that a reinitializable iterator for a Dataset will
# cause any ops with random seeds to be reset, and thus each epoch will be evaluated exactly the
# same. the desired behavior would be to seed the ops once at the very beginning, so that an
# entire training run can be deterministic, but not with the exact same random augmentation during
# each epoch
with pytest.raises(AssertionError):
assert not np.allclose(x_batch1a, x_batch2a)
with pytest.raises(AssertionError):
assert not np.allclose(x_batch1b, x_batch2b)
def test_normalize_dtype():
import pytest
reset()
sess = tf.keras.backend.get_session()
input_shape = (64,64,3)
model = tf.keras.applications.VGG16(include_top=False, input_shape=input_shape)
# check on incorrect float type promotion within the normalize function
def normalize(image):
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
image = image[..., ::-1] # rbg -> bgr
image = image * 255 # float32 in [0, 255]
image = image - means # mean centering using imagenet means
return image
def normalize_incorrect(image):
image = image[..., ::-1] # rbg -> bgr
image = image * 255 # float32 in [0, 255]
image = image - [103.939, 116.779, 123.68] # ouch! this will yield a float64!
return image
x = np.random.randint(0, 255, size=(1, *input_shape), dtype=np.uint8)
x_div_255 = (x / 255).astype(np.float32)
x_div_255_incorrect = x / 255 # float64
x_norm1a = normalize(x_div_255)
x_norm1b = normalize(x_div_255_incorrect)
x_norm2a = normalize_incorrect(x_div_255)
x_norm2b = normalize_incorrect(x_div_255_incorrect)
# tensorflow
x_tf = tf.placeholder(tf.float32, [1, *input_shape])
x_norm_tf1 = normalize(x_tf)
x_norm_tf2 = normalize_incorrect(x_tf)
x_normtf1, x_normtf2 = sess.run([x_norm_tf1, x_norm_tf2], feed_dict={x_tf: x_div_255})
assert x_norm1a.shape == x_norm1b.shape == x_norm2a.shape == x_norm2b.shape == \
x_normtf1.shape == x_normtf2.shape
assert x_norm1a.dtype == x_normtf1.dtype == x_normtf2.dtype == np.float32 # this is what we want
with pytest.raises(AssertionError):
assert x_norm1b.dtype == np.float32 # ouch!
with pytest.raises(AssertionError):
assert x_norm2a.dtype == np.float32 # ouch!
with pytest.raises(AssertionError):
assert x_norm2b.dtype == np.float32 # ouch!
assert | np.allclose(x_norm1a, x_norm1b) | numpy.allclose |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on May, 2020
@author: <NAME> <<EMAIL>>
"""
from abc import ABC
from typing import Union, Iterable
import numpy as np
from scipy import sparse
from sknetwork.linkpred.first_order_core import common_neighbors_node_core, jaccard_node_core, salton_node_core, \
sorensen_node_core, hub_promoted_node_core, hub_depressed_node_core, adamic_adar_node_core, \
resource_allocation_node_core, \
common_neighbors_edges_core, jaccard_edges_core, salton_edges_core, sorensen_edges_core, hub_promoted_edges_core, \
hub_depressed_edges_core, adamic_adar_edges_core, resource_allocation_edges_core
from sknetwork.linkpred.base import BaseLinkPred
from sknetwork.utils.check import check_format
class FirstOrder(BaseLinkPred, ABC):
"""Base class for first order algorithms."""
def __init__(self):
super(FirstOrder, self).__init__()
self.indptr_ = None
self.indices_ = None
def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray]):
"""Fit algorithm to the data.
Parameters
----------
adjacency :
Adjacency matrix of the graph
Returns
-------
self : :class:`FirstOrder`
"""
adjacency = check_format(adjacency)
adjacency.sort_indices()
self.indptr_ = adjacency.indptr.astype(np.int32)
self.indices_ = adjacency.indices.astype(np.int32)
return self
def _predict_node(self, source: int):
"""Prediction for a single node."""
n = self.indptr_.shape[0] - 1
return self._predict_base(source, np.arange(n))
class CommonNeighbors(FirstOrder):
"""Link prediction by common neighbors:
:math:`s(i, j) = |\\Gamma_i \\cap \\Gamma_j|`.
Attributes
----------
indptr_ : np.ndarray
Pointer index for neighbors.
indices_ : np.ndarray
Concatenation of neighbors.
Examples
--------
>>> from sknetwork.data import house
>>> adjacency = house()
>>> cn = CommonNeighbors()
>>> similarities = cn.fit_predict(adjacency, 0)
>>> similarities
array([2, 1, 1, 1, 1])
>>> similarities = cn.predict([0, 1])
>>> similarities
array([[2, 1, 1, 1, 1],
[1, 3, 0, 2, 1]])
>>> similarities = cn.predict((0, 1))
>>> similarities
1
>>> similarities = cn.predict([(0, 1), (1, 2)])
>>> similarities
array([1, 0])
"""
def __init__(self):
super(CommonNeighbors, self).__init__()
def _predict_base(self, source: int, targets: Iterable):
"""Prediction for a single node."""
return np.asarray(common_neighbors_node_core(self.indptr_, self.indices_, np.int32(source),
np.array(targets, dtype=np.int32))).astype(int)
def _predict_edges(self, edges: np.ndarray):
"""Prediction for multiple edges."""
return np.asarray(common_neighbors_edges_core(self.indptr_, self.indices_, edges.astype(np.int32))).astype(int)
class JaccardIndex(FirstOrder):
"""Link prediction by Jaccard Index:
:math:`s(i, j) = \\dfrac{|\\Gamma_i \\cap \\Gamma_j|}{|\\Gamma_i \\cup \\Gamma_j|}`.
Attributes
----------
indptr_ : np.ndarray
Pointer index for neighbors.
indices_ : np.ndarray
Concatenation of neighbors.
Examples
--------
>>> from sknetwork.data import house
>>> adjacency = house()
>>> jaccard = JaccardIndex()
>>> similarities = jaccard.fit_predict(adjacency, 0)
>>> similarities.round(2)
array([1. , 0.25, 0.33, 0.33, 0.25])
>>> similarities = jaccard.predict([0, 1])
>>> similarities.round(2)
array([[1. , 0.25, 0.33, 0.33, 0.25],
[0.25, 1. , 0. , 0.67, 0.2 ]])
>>> similarities = jaccard.predict((0, 1))
>>> similarities.round(2)
0.25
>>> similarities = jaccard.predict([(0, 1), (1, 2)])
>>> similarities.round(2)
array([0.25, 0. ])
References
----------
<NAME>. (1901) étude comparative de la distribution florale dans une portion des Alpes et du Jura.
Bulletin de la Société Vaudoise des Sciences Naturelles, 37, 547-579.
"""
def __init__(self):
super(JaccardIndex, self).__init__()
def _predict_base(self, source: int, targets: Iterable):
"""Prediction for a single node."""
return np.asarray(jaccard_node_core(self.indptr_, self.indices_, np.int32(source), np.array(targets, dtype=np.int32)))
def _predict_edges(self, edges: np.ndarray):
"""Prediction for multiple edges."""
return np.asarray(jaccard_edges_core(self.indptr_, self.indices_, edges.astype(np.int32)))
class SaltonIndex(FirstOrder):
"""Link prediction by Salton Index:
:math:`s(i, j) = \\dfrac{|\\Gamma_i \\cap \\Gamma_j|}{\\sqrt{|\\Gamma_i|.|\\Gamma_j|}}`.
Attributes
----------
indptr_ : np.ndarray
Pointer index for neighbors.
indices_ : np.ndarray
Concatenation of neighbors.
Examples
--------
>>> from sknetwork.data import house
>>> adjacency = house()
>>> salton = SaltonIndex()
>>> similarities = salton.fit_predict(adjacency, 0)
>>> similarities.round(2)
array([1. , 0.41, 0.5 , 0.5 , 0.41])
>>> similarities = salton.predict([0, 1])
>>> similarities.round(2)
array([[1. , 0.41, 0.5 , 0.5 , 0.41],
[0.41, 1. , 0. , 0.82, 0.33]])
>>> similarities = salton.predict((0, 1))
>>> similarities.round(2)
0.41
>>> similarities = salton.predict([(0, 1), (1, 2)])
>>> similarities.round(2)
array([0.41, 0. ])
References
----------
<NAME>., <NAME>., & <NAME>. (2016).
`A survey of link prediction in complex networks.
<https://dl.acm.org/doi/pdf/10.1145/3012704>`_
ACM computing surveys (CSUR), 49(4), 1-33.
"""
def __init__(self):
super(SaltonIndex, self).__init__()
def _predict_base(self, source: int, targets: Iterable):
"""Prediction for a single node."""
return np.asarray(salton_node_core(self.indptr_, self.indices_, np.int32(source), np.array(targets, dtype=np.int32)))
def _predict_edges(self, edges: np.ndarray):
"""Prediction for multiple edges."""
return np.asarray(salton_edges_core(self.indptr_, self.indices_, edges.astype(np.int32)))
class SorensenIndex(FirstOrder):
"""Link prediction by Salton Index:
:math:`s(i, j) = \\dfrac{2|\\Gamma_i \\cap \\Gamma_j|}{|\\Gamma_i|+|\\Gamma_j|}`.
Attributes
----------
indptr_ : np.ndarray
Pointer index for neighbors.
indices_ : np.ndarray
Concatenation of neighbors.
Examples
--------
>>> from sknetwork.data import house
>>> adjacency = house()
>>> sorensen = SorensenIndex()
>>> similarities = sorensen.fit_predict(adjacency, 0)
>>> similarities.round(2)
array([1. , 0.4, 0.5, 0.5, 0.4])
>>> similarities = sorensen.predict([0, 1])
>>> similarities.round(2)
array([[1. , 0.4 , 0.5 , 0.5 , 0.4 ],
[0.4 , 1. , 0. , 0.8 , 0.33]])
>>> similarities = sorensen.predict((0, 1))
>>> similarities.round(2)
0.4
>>> similarities = sorensen.predict([(0, 1), (1, 2)])
>>> similarities.round(2)
array([0.4, 0. ])
References
----------
* <NAME>. (1948). A method of establishing groups of equal amplitude in plant sociology
based on similarity of species content and its application to analyses of the vegetation on Danish commons.
I kommission hos E. Munksgaard.
* <NAME>., <NAME>., & <NAME>. (2016).
`A survey of link prediction in complex networks.
<https://dl.acm.org/doi/pdf/10.1145/3012704>`_
ACM computing surveys (CSUR), 49(4), 1-33.
"""
def __init__(self):
super(SorensenIndex, self).__init__()
def _predict_base(self, source: int, targets: Iterable):
"""Prediction for a single node."""
return np.asarray(sorensen_node_core(self.indptr_, self.indices_, np.int32(source), np.array(targets, dtype=np.int32)))
def _predict_edges(self, edges: np.ndarray):
"""Prediction for multiple edges."""
return np.asarray(sorensen_edges_core(self.indptr_, self.indices_, edges.astype(np.int32)))
class HubPromotedIndex(FirstOrder):
"""Link prediction by Hub Promoted Index:
:math:`s(i, j) = \\dfrac{2|\\Gamma_i \\cap \\Gamma_j|}{min(|\\Gamma_i|,|\\Gamma_j|)}`.
Attributes
----------
indptr_ : np.ndarray
Pointer index for neighbors.
indices_ : np.ndarray
Concatenation of neighbors.
Examples
--------
>>> from sknetwork.data import house
>>> adjacency = house()
>>> hpi = HubPromotedIndex()
>>> similarities = hpi.fit_predict(adjacency, 0)
>>> similarities.round(2)
array([1. , 0.5, 0.5, 0.5, 0.5])
>>> similarities = hpi.predict([0, 1])
>>> similarities.round(2)
array([[1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[0.5 , 1. , 0. , 1. , 0.33]])
>>> similarities = hpi.predict((0, 1))
>>> similarities.round(2)
0.5
>>> similarities = hpi.predict([(0, 1), (1, 2)])
>>> similarities.round(2)
array([0.5, 0. ])
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2002).
`Hierarchical organization of modularity in metabolic networks.
<https://arxiv.org/pdf/cond-mat/0209244.pdf>`_
science, 297(5586), 1551-1555.
"""
def __init__(self):
super(HubPromotedIndex, self).__init__()
def _predict_base(self, source: int, targets: Iterable):
"""Prediction for a single node."""
return np.asarray(hub_promoted_node_core(self.indptr_, self.indices_, np.int32(source),
np.array(targets, dtype=np.int32)))
def _predict_edges(self, edges: np.ndarray):
"""Prediction for multiple edges."""
return np.asarray(hub_promoted_edges_core(self.indptr_, self.indices_, edges.astype(np.int32)))
class HubDepressedIndex(FirstOrder):
"""Link prediction by Hub Depressed Index:
:math:`s(i, j) = \\dfrac{2|\\Gamma_i \\cap \\Gamma_j|}{max(|\\Gamma_i|,|\\Gamma_j|)}`.
Attributes
----------
indptr_ : np.ndarray
Pointer index for neighbors.
indices_ : np.ndarray
Concatenation of neighbors.
Examples
--------
>>> from sknetwork.data import house
>>> adjacency = house()
>>> hdi = HubDepressedIndex()
>>> similarities = hdi.fit_predict(adjacency, 0)
>>> similarities.round(2)
array([1. , 0.33, 0.5 , 0.5 , 0.33])
>>> similarities = hdi.predict([0, 1])
>>> similarities.round(2)
array([[1. , 0.33, 0.5 , 0.5 , 0.33],
[0.33, 1. , 0. , 0.67, 0.33]])
>>> similarities = hdi.predict((0, 1))
>>> similarities.round(2)
0.33
>>> similarities = hdi.predict([(0, 1), (1, 2)])
>>> similarities.round(2)
array([0.33, 0. ])
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2002).
`Hierarchical organization of modularity in metabolic networks.
<https://arxiv.org/pdf/cond-mat/0209244.pdf>`_
science, 297(5586), 1551-1555.
"""
def __init__(self):
super(HubDepressedIndex, self).__init__()
def _predict_base(self, source: int, targets: Iterable):
"""Prediction for a single node."""
return np.asarray(hub_depressed_node_core(self.indptr_, self.indices_, np.int32(source),
np.array(targets, dtype=np.int32)))
def _predict_edges(self, edges: np.ndarray):
"""Prediction for multiple edges."""
return np.asarray(hub_depressed_edges_core(self.indptr_, self.indices_, edges.astype(np.int32)))
class AdamicAdar(FirstOrder):
"""Link prediction by Adamic-Adar index:
:math:`s(i, j) = \\underset{z \\in \\Gamma_i \\cap \\Gamma_j}{\\sum} \\dfrac{1}{\\log |\\Gamma_z|}`.
Attributes
----------
indptr_ : np.ndarray
Pointer index for neighbors.
indices_ : np.ndarray
Concatenation of neighbors.
Examples
--------
>>> from sknetwork.data import house
>>> adjacency = house()
>>> aa = AdamicAdar()
>>> similarities = aa.fit_predict(adjacency, 0)
>>> similarities.round(2)
array([1.82, 0.91, 0.91, 0.91, 0.91])
>>> similarities = aa.predict([0, 1])
>>> similarities.round(2)
array([[1.82, 0.91, 0.91, 0.91, 0.91],
[0.91, 3.8 , 0. , 2.35, 1.44]])
>>> similarities = aa.predict((0, 1))
>>> similarities.round(2)
0.91
>>> similarities = aa.predict([(0, 1), (1, 2)])
>>> similarities.round(2)
array([0.91, 0. ])
References
----------
<NAME>., & <NAME>. (2003). `Friends and neighbors on the web.
<https://www.sciencedirect.com/science/article/pii/S0378873303000091>`_
Social networks, 25(3), 211-230.
"""
def __init__(self):
super(AdamicAdar, self).__init__()
def _predict_base(self, source: int, targets: Iterable):
"""Prediction for a single node."""
return np.asarray(adamic_adar_node_core(self.indptr_, self.indices_, np.int32(source),
| np.array(targets, dtype=np.int32) | numpy.array |
def test_add():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = cle.push(np.asarray([[4, 5, 6]]))
reference = cle.push(np.asarray([[5, 7, 9]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_add_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[6, 7, 8]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_add_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[6, 4, -6]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_iadd():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = cle.push(np.asarray([[4, 5, 6]]))
reference = cle.push(np.asarray([[5, 7, 9]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_iadd_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[6, 7, 8]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_iadd_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[6, 4, -6]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_subtract():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = cle.push(np.asarray([[1, 5, 6]]))
reference = cle.push(np.asarray([[3, -3, -3]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_subtract_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[-1, -3, -2]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_subtract_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = np.asarray([[1, 5, 6]])
reference = cle.push(np.asarray([[3, -3, -3]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_isubtract():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = cle.push(np.asarray([[1, 5, 6]]))
reference = cle.push(np.asarray([[3, -3, -3]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_isubtract_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[-1, -3, -2]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_isubtract_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = np.asarray([[1, 5, 6]])
reference = cle.push(np.asarray([[3, -3, -3]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_divide():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_divide_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_divide_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_idivide():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_idivide_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_idivide_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_multiply():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert | np.array_equal(result, reference) | numpy.array_equal |
import numba
import numpy as np
import argparse
import time
run_parallel = numba.config.NUMBA_NUM_THREADS > 1
@numba.njit(parallel=run_parallel, fastmath=True)
def logistic_regression(Y, X, w, iterations):
for i in range(iterations):
w -= np.dot(((1.0 / (1.0 + np.exp(-Y * np.dot(X, w))) - 1.0) * Y), X)
return w
def main():
parser = argparse.ArgumentParser(description='Logistic Regression.')
parser.add_argument('--dimension', dest='dimension', type=int, default=10)
parser.add_argument('--points', dest='points', type=int, default=20000000)
parser.add_argument('--iterations', dest='iterations',
type=int, default=30)
args = parser.parse_args()
np.random.seed(0)
D = 3
N = 4
iterations = 10
points = np.ones((N, D))
labels = np.ones(N)
w = 2.0 * np.ones(D) - 1.3
t1 = time.time()
w = logistic_regression(labels, points, w, iterations)
compiletime = time.time() - t1
print("SELFPRIMED ", compiletime)
print("checksum ", w)
D = args.dimension
N = args.points
iterations = args.iterations
print("D=", D, " N=", N, " iterations=", iterations)
points = | np.random.random((N, D)) | numpy.random.random |
import copy
import sys
import time
import traceback
import pickle as pickle
import ctypes
import numpy as np
import scipy.interpolate
import xml.etree.ElementTree as xml
from sco_py.expr import *
import core.util_classes.common_constants as const
if const.USE_OPENRAVE:
pass
else:
import pybullet as P
from opentamp.src.policy_hooks.sample_list import SampleList
import opentamp
from opentamp.envs import MJCEnv
import core.util_classes.items as items
from core.util_classes.namo_grip_predicates import dsafe, NEAR_TOL, dmove, HLGraspFailed, HLTransferFailed, HLPlaceFailed, HANDLE_OFFSET
from core.util_classes.openrave_body import OpenRAVEBody
from core.util_classes.viewer import OpenRAVEViewer
from opentamp.src.policy_hooks.agent import Agent
from opentamp.src.policy_hooks.sample import Sample
from opentamp.src.policy_hooks.utils.policy_solver_utils import *
import policy_hooks.utils.policy_solver_utils as utils
from opentamp.src.policy_hooks.utils.tamp_eval_funcs import *
# from opentamp.src.policy_hooks.namo.sorting_prob_4 import *
from opentamp.src.policy_hooks.namo.namo_agent import NAMOSortingAgent
from opentamp.src.policy_hooks.namo.grip_agent import NAMOGripAgent
LOCAL_NEAR_TOL = 0.5
MAX_SAMPLELISTS = 1000
MAX_TASK_PATHS = 100
GRIP_TOL = 0.
MIN_STEP = 1e-2
LIDAR_DIST = 2.
# LIDAR_DIST = 1.5
DSAFE = 5e-1
MAX_STEP = max(1.5*dmove, 1)
LOCAL_FRAME = True
NAMO_XML = os.getcwd() + '/opentamp' + '/robot_info/lidar_namo.xml'
class optimal_pol:
def __init__(self, dU, action_inds, state_inds, opt_traj):
self.dU = dU
self.action_inds = action_inds
self.state_inds = state_inds
self.opt_traj = opt_traj
def act(self, X, O, t, noise):
u = np.zeros(self.dU)
if t < len(self.opt_traj) - 1:
for param, attr in self.action_inds:
if attr == 'gripper':
u[self.action_inds[param, attr]] = self.opt_traj[t, self.state_inds[param, attr]]
elif attr == 'pose':
x, y = self.opt_traj[t+1, self.state_inds['pr2', 'pose']]
curx, cury = X[self.state_inds['pr2', 'pose']]
theta = -self.opt_traj[t, self.state_inds['pr2', 'theta']][0]
if LOCAL_FRAME:
relpos = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]).dot([x-curx, y-cury])
else:
relpos = [x-curx, y-cury]
u[self.action_inds['pr2', 'pose']] = relpos
elif attr == 'vel':
vel = self.opt_traj[t+1, self.state_inds['pr2', 'vel']] # np.linalg.norm(self.opt_traj[t+1, inds]-X[inds])
vel = np.linalg.norm(self.opt_traj[t+1, self.state_inds['pr2', 'pose']] - X[self.state_inds['pr2', 'pose']])
if self.opt_traj[t+1, self.state_inds['pr2', 'vel']] < 0:
vel *= -1
u[self.action_inds[param, attr]] = vel
elif attr == 'theta':
u[self.action_inds[param, attr]] = self.opt_traj[t+1, self.state_inds[param, attr]] - X[self.state_inds[param, attr]]
else:
u[self.action_inds[param, attr]] = self.opt_traj[t+1, self.state_inds[param, attr]] - X[self.state_inds[param, attr]]
else:
u[self.action_inds['pr2', 'gripper']] = self.opt_traj[-1, self.state_inds['pr2', 'gripper']]
if np.any(np.isnan(u)):
u[np.isnan(u)] = 0.
return u
class NAMODoorAgent(NAMOGripAgent):
def __init__(self, hyperparams):
super(NAMOSortingAgent, self).__init__(hyperparams)
self.optimal_pol_cls = optimal_pol
for plan in list(self.plans.values()):
for t in range(plan.horizon):
plan.params['obs0'].pose[:,t] = plan.params['obs0'].pose[:,0]
self.check_col = hyperparams['master_config'].get('check_col', True)
self.robot_height = 1
self.use_mjc = hyperparams.get('use_mjc', False)
self.vel_rat = 0.05
wall_dims = OpenRAVEBody.get_wall_dims('closet')
config = {
'obs_include': ['can{0}'.format(i) for i in range(hyperparams['num_objs'])],
'include_files': [NAMO_XML],
'include_items': [],
'view': False,
'sim_freq': 25,
'timestep': 0.002,
'image_dimensions': (hyperparams['image_width'], hyperparams['image_height']),
'step_mult': 5e0,
'act_jnts': ['robot_x', 'robot_y', 'robot_theta', 'right_finger_joint', 'left_finger_joint']
}
self.main_camera_id = 0
colors = [[0.9, 0, 0, 1], [0, 0.9, 0, 1], [0, 0, 0.9, 1], [0.7, 0.7, 0.1, 1], [1., 0.1, 0.8, 1], [0.5, 0.95, 0.5, 1], [0.75, 0.4, 0, 1], [0.25, 0.25, 0.5, 1], [0.5, 0, 0.25, 1], [0, 0.5, 0.75, 1], [0, 0, 0.5, 1]]
items = config['include_items']
prim_options = self.prob.get_prim_choices(self.task_list)
for name in prim_options[OBJ_ENUM]:
if name =='pr2': continue
cur_color = colors.pop(0)
items.append({'name': name, 'type': 'cylinder', 'is_fixed': False, 'pos': (0, 0, 0.5), 'dimensions': (0.3, 0.2), 'rgba': tuple(cur_color), 'mass': 10.})
items.append({'name': '{0}_end_target'.format(name), 'type': 'box', 'is_fixed': True, 'pos': (0, 0, 1.5), 'dimensions': (NEAR_TOL, NEAR_TOL, 0.05), 'rgba': tuple(cur_color), 'mass': 1.})
for i in range(len(wall_dims)):
dim, next_trans = wall_dims[i]
next_trans[0,3] -= 3.5
next_dim = dim # [dim[1], dim[0], dim[2]]
pos = next_trans[:3,3] # [next_trans[1,3], next_trans[0,3], next_trans[2,3]]
items.append({'name': 'wall{0}'.format(i), 'type': 'box', 'is_fixed': True, 'pos': pos, 'dimensions': next_dim, 'rgba': (0.2, 0.2, 0.2, 1)})
items.append({'name': 'door', 'type': 'door_2d', 'handle_offset': HANDLE_OFFSET, 'handle_dims': (0.325, 0.4), 'door_dims': (0.75, 0.1, 0.4), 'hinge_pos': (-1., 3., 0.5), 'is_fixed': True})
no = self._hyperparams['num_objs']
nt = self._hyperparams['num_targs']
config['load_render'] = hyperparams['master_config'].get('load_render', False)
config['xmlid'] = '{0}_{1}_{2}_{3}'.format(self.process_id, self.rank, no, nt)
self.mjc_env = MJCEnv.load_config(config)
self.targ_labels = {i: np.array(self.prob.END_TARGETS[i]) for i in range(len(self.prob.END_TARGETS))}
self.targ_labels.update({i: self.targets[0]['aux_target_{0}'.format(i-no)] for i in range(no, no+self.prob.n_aux)})
def get_state(self):
x = np.zeros(self.dX)
for pname, attr in self.state_inds:
if attr == 'pose':
if pname.find('door') >= 0:
val = self.mjc_env.get_item_pos('door_base')
x[self.state_inds[pname, attr]] = val[:2]
else:
val = self.mjc_env.get_item_pos(pname)
x[self.state_inds[pname, attr]] = val[:2]
elif attr == 'rotation':
val = self.mjc_env.get_item_rot(pname)
x[self.state_inds[pname, attr]] = val
elif attr == 'gripper':
vals = self.mjc_env.get_joints(['left_finger_joint','right_finger_joint'])
val1 = vals['left_finger_joint']
val2 = vals['right_finger_joint']
val = (val1 + val2) / 2.
x[self.state_inds[pname, attr]] = 0.1 if val > 0 else -0.1
elif attr == 'theta':
if pname.find('door') >= 0:
val = self.mjc_env.get_joints(['door_hinge'])
x[self.state_inds[pname, 'theta']] = val['door_hinge']
else:
val = self.mjc_env.get_joints(['robot_theta'])
x[self.state_inds[pname, 'theta']] = val['robot_theta']
elif attr == 'vel':
val = self.mjc_env.get_user_data('vel', 0.)
x[self.state_inds[pname, 'vel']] = val
assert not np.any(np.isnan(x))
return x
def fill_sample(self, cond, sample, mp_state, t, task, fill_obs=False, targets=None):
if task is None:
task = list(self.plans.keys())[0]
mp_state = mp_state.copy()
onehot_task = tuple([val for val in task if np.isscalar(val)])
plan = self.plans[onehot_task]
ee_pose = mp_state[self.state_inds['pr2', 'pose']]
if targets is None:
targets = self.target_vecs[cond].copy()
sample.set(EE_ENUM, ee_pose, t)
theta = mp_state[self.state_inds['pr2', 'theta']][0]
sample.set(THETA_ENUM, mp_state[self.state_inds['pr2', 'theta']], t)
dirvec = np.array([-np.sin(theta), np.cos(theta)])
sample.set(THETA_VEC_ENUM, dirvec, t)
sample.set(VEL_ENUM, mp_state[self.state_inds['pr2', 'vel']], t)
doortheta = mp_state[self.state_inds['door', 'theta']][0]
handle_pos = mp_state[self.state_inds['door', 'pose']] + [1.5*np.sin(doortheta), 1.5*np.cos(doortheta)]
sample.set(DOOR_ENUM, handle_pos, t)
sample.set(DOOR_THETA_ENUM, mp_state[self.state_inds['door', 'theta']], t)
sample.set(STATE_ENUM, mp_state, t)
sample.set(GRIPPER_ENUM, mp_state[self.state_inds['pr2', 'gripper']], t)
if self.hist_len > 0:
sample.set(TRAJ_HIST_ENUM, self._prev_U.flatten(), t)
x_delta = self._x_delta[1:] - self._x_delta[:1]
sample.set(STATE_DELTA_ENUM, x_delta.flatten(), t)
sample.set(STATE_HIST_ENUM, self._x_delta.flatten(), t)
if self.task_hist_len > 0:
sample.set(TASK_HIST_ENUM, self._prev_task.flatten(), t)
prim_choices = self.prob.get_prim_choices(self.task_list)
task_ind = task[0]
task_name = self.task_list[task_ind]
task_vec = np.zeros((len(self.task_list)), dtype=np.float32)
task_vec[task[0]] = 1.
sample.task_ind = task[0]
sample.set(TASK_ENUM, task_vec, t)
#post_cost = self.cost_f(sample.get_X(t=t), task, cond, active_ts=(sample.T-1, sample.T-1), targets=targets)
#done = np.ones(1) if post_cost == 0 else np.zeros(1)
#sample.set(DONE_ENUM, done, t)
sample.set(DONE_ENUM, np.zeros(1), t)
sample.set(TASK_DONE_ENUM, np.array([1, 0]), t)
grasp = np.array([0, -0.601])
onehottask = tuple([val for val in task if np.isscalar(val)])
sample.set(FACTOREDTASK_ENUM, np.array(onehottask), t)
theta = mp_state[self.state_inds['pr2', 'theta']][0]
if LOCAL_FRAME:
rot = np.array([[np.cos(-theta), -np.sin(-theta)],
[np.sin(-theta), np.cos(-theta)]])
else:
rot = np.eye(2)
if OBJ_ENUM in prim_choices:
obj_vec = np.zeros((len(prim_choices[OBJ_ENUM])), dtype='float32')
obj_vec[task[1]] = 1.
if task_name.find('door') >= 0:
obj_vec[:] = 1. / len(obj_vec)
sample.obj_ind = task[1]
obj_ind = task[1]
obj_name = list(prim_choices[OBJ_ENUM])[obj_ind]
sample.set(OBJ_ENUM, obj_vec, t)
obj_pose = mp_state[self.state_inds[obj_name, 'pose']] - mp_state[self.state_inds['pr2', 'pose']]
base_pos = obj_pose
obj_pose = rot.dot(obj_pose)
sample.set(OBJ_POSE_ENUM, obj_pose.copy(), t)
sample.obj = task[1]
if self.task_list[task[0]].find('move') >= 0:
sample.set(END_POSE_ENUM, obj_pose, t)
sample.set(REL_POSE_ENUM, base_pos, t)
sample.set(ABS_POSE_ENUM, mp_state[self.state_inds[obj_name, 'pose']], t)
if TARG_ENUM in prim_choices:
targ_vec = np.zeros((len(prim_choices[TARG_ENUM])), dtype='float32')
targ_vec[task[2]] = 1.
if self.task_list[task[0]].find('door') >= 0 or self.task_list[task[0]].find('move') >= 0:
targ_vec[:] = 1. / len(targ_vec)
sample.targ_ind = task[2]
targ_ind = task[2]
targ_name = list(prim_choices[TARG_ENUM])[targ_ind]
sample.set(TARG_ENUM, targ_vec, t)
targ_pose = targets[self.target_inds[targ_name, 'value']] - mp_state[self.state_inds['pr2', 'pose']]
targ_off_pose = targets[self.target_inds[targ_name, 'value']] - mp_state[self.state_inds[obj_name, 'pose']]
base_pos = targ_pose
targ_pose = rot.dot(targ_pose)
targ_off_pose = rot.dot(targ_off_pose)
sample.set(TARG_POSE_ENUM, targ_pose.copy(), t)
sample.targ = task[2]
if self.task_list[task[0]].find('put') >= 0 or self.task_list[task[0]].find('leave') >= 0:
sample.set(END_POSE_ENUM, targ_pose, t)
sample.set(REL_POSE_ENUM, base_pos, t)
sample.set(ABS_POSE_ENUM, targets[self.target_inds[targ_name, 'value']], t)
if self.task_list[task[0]].find('put') >= 0 or self.task_list[task[0]].find('leave') >= 0:
targ_pose = np.array([0., 5.5])
sample.set(END_POSE_ENUM, rot.dot(targ_pose-ee_pose), t)
sample.set(REL_POSE_ENUM, targ_pose, t)
sample.set(ABS_POSE_ENUM, targ_pose, t)
if task_name.find('close_door') >= 0 or task_name.find('open_door') >= 0:
theta = mp_state[self.state_inds['door', 'theta']][0] - 0.32
handle_pos = mp_state[self.state_inds['door', 'pose']] + [1.58*np.sin(theta), 1.58*np.cos(theta)]
targ_pose = handle_pos - mp_state[self.state_inds['pr2', 'pose']]
sample.set(END_POSE_ENUM, rot.dot(targ_pose), t)
sample.set(REL_POSE_ENUM, base_pos, t)
sample.set(ABS_POSE_ENUM, handle_pos[:2], t)
sample.set(TRUE_POSE_ENUM, sample.get(REL_POSE_ENUM, t=t), t)
if ABS_POSE_ENUM in prim_choices:
ind = list(prim_choices.keys()).index(ABS_POSE_ENUM)
if ind < len(task) and not np.isscalar(task[ind]):
sample.set(ABS_POSE_ENUM, task[ind], t)
sample.set(END_POSE_ENUM, rot.dot(task[ind] - mp_state[self.state_inds['pr2', 'pose']]), t)
if REL_POSE_ENUM in prim_choices:
ind = list(prim_choices.keys()).index(REL_POSE_ENUM)
if ind < len(task) and not np.isscalar(task[ind]):
sample.set(REL_POSE_ENUM, task[ind], t)
sample.set(END_POSE_ENUM, rot.dot(task[ind]), t)
if END_POSE_ENUM in prim_choices:
ind = list(prim_choices.keys()).index(END_POSE_ENUM)
if ind < len(task) and type(task[ind]) is not int:
sample.set(END_POSE_ENUM, task[ind], t)
sample.task = task
sample.condition = cond
sample.task_name = self.task_list[task[0]]
sample.set(TARGETS_ENUM, targets.copy(), t)
sample.set(GOAL_ENUM, np.concatenate([targets[self.target_inds['{0}_end_target'.format(o), 'value']] for o in prim_choices[OBJ_ENUM]]), t)
if ONEHOT_GOAL_ENUM in self._hyperparams['sensor_dims']:
sample.set(ONEHOT_GOAL_ENUM, self.onehot_encode_goal(sample.get(GOAL_ENUM, t)), t)
sample.targets = targets.copy()
for i, obj in enumerate(prim_choices[OBJ_ENUM]):
sample.set(OBJ_ENUMS[i], mp_state[self.state_inds[obj, 'pose']], t)
targ = targets[self.target_inds['{0}_end_target'.format(obj), 'value']]
sample.set(OBJ_DELTA_ENUMS[i], mp_state[self.state_inds[obj, 'pose']]-ee_pose, t)
sample.set(TARG_ENUMS[i], targ, t)
sample.set(TARG_DELTA_ENUMS[i], targ-mp_state[self.state_inds[obj, 'pose']], t)
if INGRASP_ENUM in self._hyperparams['sensor_dims']:
vec = np.zeros(len(prim_choices[OBJ_ENUM]))
for i, o in enumerate(prim_choices[OBJ_ENUM]):
if np.all( | np.abs(mp_state[self.state_inds[o, 'pose']] - mp_state[self.state_inds['pr2', 'pose']] - grasp) | numpy.abs |
import tensorflow as tf
import numpy as np
import time
import scipy.sparse as sp
from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve, auc
from preprocessing import construct_feed_dict
from outputs import viz_train_val_data, viz_roc_pr_curve, max_gmean_thresh
def train_test_model(adj_norm, adj_label, features, adj_orig, FLAGS, crossval_edges, placeholders, opt, model, model_str, model_timestamp, adj, test_edges, test_edges_false):
acc_cv, ap_cv, roc_cv, f_cv, acc_init_cv, ap_init_cv, roc_init_cv, f_init_cv = ([] for i in range(8))
feed_dict = None
adj_pred = None
iterations = 1 + FLAGS.crossvalidation * (len(adj) - 1)
for cv_set in range(iterations):
print("\nCV run " + str(cv_set+1) + " of " + str(iterations) + "...")
# Construct feed dictionary
feed_dict = construct_feed_dict(adj_norm[cv_set], adj_label[cv_set], features, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Train model
acc_last, ap_last, roc_last, f_last, acc_init, ap_init, roc_init, f_init, opt_thresh, adj_pred = train_model(adj_orig, FLAGS, [x[cv_set] for x in crossval_edges],
placeholders, opt, model, feed_dict, model_str, model_timestamp)
for x,l in zip([acc_last, ap_last, roc_last, f_last, acc_init, ap_init, roc_init, f_init], [acc_cv, ap_cv, roc_cv, f_cv, acc_init_cv, ap_init_cv, roc_init_cv, f_init_cv]):
l.append(x)
if FLAGS.crossvalidation:
cv_str = str(iterations) + " fold CV "
else:
cv_str = "Validation "
print("\n" + cv_str + "ROC AUC score: " + str(np.round(np.mean(roc_cv), 2)) + " with SD: " + str(np.round(np.std(roc_cv),2)))
print(cv_str + "Average Precision: " + str(np.round(np.mean(ap_cv), 2)) + " with SD: " + str(np.round(np.std(ap_cv),2)))
print(cv_str + "Accuracy: " + str(np.round(np.mean(acc_cv), 2)) + " with SD: " + str(np.round(np.std(acc_cv),2)))
print(cv_str + "F1: " + str(np.round(np.mean(f_cv), 2)) + " with SD: " + str(np.round(np.std(f_cv),2)))
#print('\nTest ROC score: ' + str(np.round(test_roc,2)))
#print('Test AP score: ' + str(np.round(test_ap,2)))
#print('Test accuracy (threshold= ' + str(opt_thresh) + "): " + str(np.round(test_acc,2)))
#print('\nRandom Control ROC score: ' + str(np.round(random_roc,2)))
#print('Random Control AP score: ' + str(np.round(random_ap,2)))
#print('Random Control accuracy: ' + str(np.round(random_acc,2)))
#print('\nAverage Init ROC score: ' + str(np.round(np.mean(roc_init_cv),2)))
#print('Average Init AP score: ' + str(np.round(np.mean(ap_init_cv),2)))
#print('Average Init accuracy: ' + str(np.round(np.mean(acc_init_cv),2)) + '\n')
return np.mean(acc_cv), np.mean(ap_cv), np.mean(roc_cv), np.mean(f_cv), adj_pred
def train_model(adj_orig, FLAGS, edges, placeholders, opt, model, feed_dict, model_str, model_timestamp):
# Initialize session
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
loss_train, kl_train, acc_train, ap_train, roc_train, f_train, loss_val, acc_val, ap_val, roc_val, f_val = ([] for i in range(11))
hist_scores = [loss_train, kl_train, acc_train, ap_train, roc_train, f_train, loss_val, acc_val, ap_val, roc_val, f_val]
#initial metrics
train_edges, train_edges_false, val_edges, val_edges_false = edges
adj_pred = predict_adj(feed_dict, sess, model, model_timestamp, placeholders)
_, _, train_loss, train_acc, train_ap, train_roc, _, train_f, opt_thresh = get_scores(adj_pred, adj_orig, train_edges, train_edges_false, model_timestamp)
_, _, val_loss, val_acc, val_ap, val_roc, _, val_f, _ = get_scores(adj_pred, adj_orig, val_edges, val_edges_false, model_timestamp, thresh=opt_thresh)
train_kl = None
scores = [train_loss, train_kl, train_acc, train_ap, train_roc, train_f, val_loss, val_acc, val_ap, val_roc, val_f]
for x, l in zip(scores, hist_scores):
l.append(x)
for epoch in range(FLAGS.epochs):
t = time.time()
# Run single weight update
if model_str == 'gcn_vae':
outs = sess.run([opt.opt_op, opt.cost, opt.accuracy, opt.kl], feed_dict=feed_dict)
else:
outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)
# Compute metrics
adj_pred = predict_adj(feed_dict, sess, model, model_timestamp, placeholders)
ctrl_cost = outs[1]
ctrl_accuracy = outs[2]
if model_str == 'gcn_vae':
train_kl = outs[3]
else:
train_kl = 0
_, total_train_acc, train_loss, train_acc, train_ap, train_roc, train_rp, train_f, opt_thresh = get_scores(adj_pred, adj_orig, train_edges, train_edges_false, model_timestamp)
_, _, val_loss, val_acc, val_ap, val_roc, val_rp, val_f, _ = get_scores(adj_pred, adj_orig, val_edges, val_edges_false, model_timestamp, thresh=opt_thresh)
scores = [train_loss, train_kl, train_acc, train_ap, train_roc, train_f, val_loss, val_acc, val_ap, val_roc, val_f]
for x, l in zip(scores, hist_scores):
l.append(x)
print("Epoch:", '%04d' % (epoch + 1),
#"time=", "{:.5f}".format(time.time() - t),
"train_loss=", "{:.5f}".format(train_loss),
#"train_loss_control=", "{:.5f}".format(ctrl_cost),
#"recon loss=", "{:.5f}".format(train_loss-train_kl), "kl_loss=", "{:.5f}".format(train_kl),
"val_loss=", "{:.5f}".format(val_loss),
#"train_acc_control=", "{:.5f}".format(ctrl_accuracy),
#"total_train_acc=", "{:.5f}".format(total_train_acc),
"train_acc=", "{:.5f}".format(train_acc),
#"train_ap=", "{:.5f}".format(train_ap), "train_roc=", "{:.5f}".format(train_roc),
"val_acc=", "{:.5f}".format(val_acc),
"val_ap=", "{:.5f}".format(val_ap),
"val_rp=", "{:.5f}".format(val_rp),
"val_roc=", "{:.5f}".format(val_roc),
"val_f=", "{:.5f}".format(val_f))
if epoch > FLAGS.early_stopping and loss_val[-1] > np.mean(loss_val[-(FLAGS.early_stopping+1):-1]):
print("\nEarly stopping...")
break
# Plot training & validation metrics
viz_train_val_data(hist_scores, model_str, model_timestamp)
#Get final predicted adj matrix
adj_pred = sigmoid(predict_adj(feed_dict, sess, model, model_timestamp, placeholders))
#Resulting ROC curve
#viz_roc = True
#get_scores(adj_pred, adj_orig, test_edges, test_edges_false, model_timestamp, viz_roc=viz_roc, thresh=opt_thresh)
#best_epoch = roc_val.index(max(roc_val))
#print("Best Epoch (ROC): " + str(best_epoch))
##added to save val edges and val edges false to get scores of the correct val edges
print("Optimization Finished!")
sess.close()
return acc_val[-1], ap_val[-1], roc_val[-1], f_val[-1], acc_val[0], ap_val[0], roc_val[0], f_val[0], opt_thresh, adj_pred
def predict_adj(feed_dict, sess, model, model_timestamp, placeholders, emb=None):
if emb is None:
feed_dict.update({placeholders['dropout']: 0})
emb = sess.run(model.z_mean, feed_dict=feed_dict)
adj_rec = np.dot(emb, emb.T)
return adj_rec
def get_scores(adj_rec, adj_orig, edges_pos, edges_neg, model_timestamp, viz_roc=False, random=False, thresh=None):
if random:
adj_rec = random_adj(adj_rec, (adj_orig.sum()-adj_rec.sum())/2, mode="random_normal")
preds = []
pos = []
for e in edges_pos:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
pos.append(adj_orig[e[0], e[1]])
preds_neg = []
neg = []
for e in edges_neg:
preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
neg.append(adj_orig[e[0], e[1]])
preds_all = np.hstack([preds, preds_neg])
labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
precision, recall, _ = precision_recall_curve(labels_all, preds_all)
rp_auc = auc(recall, precision)
f_score = 2 * (np.mean(precision) * np.mean(recall)) / (np.mean(precision) + np.mean(recall))
if viz_roc:
viz_roc_pr_curve(preds_all, labels_all, model_timestamp)
if thresh is None:
fpr, tpr, thresholds = roc_curve(labels_all, preds_all)
thresh, _, _, _ = max_gmean_thresh(fpr, tpr, thresholds)
#Total accuracy and loss
adj_curr = adj_from_edges(edges_pos, adj_orig.shape)
adj_curr = adj_curr.reshape(1, -1)
adj_rec = adj_rec.reshape(1, -1)
pos_weight = float(adj_orig.shape[0] * adj_orig.shape[0] - (edges_pos.shape[0] * 2)) / (edges_pos.shape[0] * 2)
norm = adj_orig.shape[0] * adj_orig.shape[0] / float((adj_orig.shape[0] * adj_orig.shape[0] - (edges_pos.shape[0] * 2)) * 2)
cost_total = norm * np.mean(weighted_cross_entropy_with_logits(adj_curr, adj_rec, pos_weight))
correct_prediction = (sigmoid(adj_rec) > thresh) == adj_curr
accuracy_total = np.mean(correct_prediction)
#Subset accuracy and loss
test_mask = adj_from_edges(np.vstack([edges_pos, edges_neg]), adj_orig.shape, diag=0)
test_mask = test_mask.reshape(1, -1)
accuracy = np.mean(correct_prediction[test_mask==1])
cost = np.mean(weighted_cross_entropy_with_logits(adj_curr[test_mask==1], adj_rec[test_mask==1], 1))
return cost_total, accuracy_total, cost, accuracy, ap_score, roc_score, rp_auc, f_score, thresh
def weighted_cross_entropy_with_logits(label, pred, pos_weight):
return ((1 - label) * pred + (1 + (pos_weight - 1) * label) * (np.log(1 + np.exp(-abs(pred))) + np.maximum(-pred, 0)))
def sigmoid(x):
return 1 / (1 + | np.exp(-x) | numpy.exp |
#
# Let's use this python script to:
# - perform transit "Quick fit"
# - Search for dips in a lightcurve which match that detected
# - Search for secondaries
# - Perform quick/dirty EB modelling
#
import numpy as np
import pandas as pd
import pickle
import os
import glob
from copy import deepcopy
from datetime import datetime
from scipy import optimize
import exoplanet as xo
import scipy.interpolate as interp
import scipy.optimize as optim
import matplotlib.pyplot as plt
import matplotlib
from astropy.coordinates import SkyCoord
from astropy import units as u
import seaborn as sns
import logging
logging.getLogger('matplotlib.font_manager').disabled = True
MonoData_tablepath = os.path.join('/'.join(os.path.dirname( __file__ ).split('/')[:-1]),'data','tables')
if os.environ.get('MONOTOOLSPATH') is None:
MonoData_savepath = os.path.join('/'.join(os.path.dirname( __file__ ).split('/')[:-1]),'data')
else:
MonoData_savepath = os.environ.get('MONOTOOLSPATH')
if not os.path.isdir(MonoData_savepath):
os.mkdir(MonoData_savepath)
os.environ["CXXFLAGS"]="-fbracket-depth=512" if not "CXXFLAGS" in os.environ else "-fbracket-depth=512,"+os.environ["CXXFLAGS"]
os.environ["CFLAGS"] = "-fbracket-depth=512" if not "CFLAGS" in os.environ else "-fbracket-depth=512,"+os.environ["CFLAGS"]
#creating new hidden directory for theano compilations:
theano_dir=MonoData_savepath+'/.theano_dir_'+str(np.random.randint(8))
theano_pars={'device':'cpu','floatX':'float64',
'base_compiledir':theano_dir,"gcc.cxxflags":"-fbracket-depth=1024"}
'''if MonoData_savepath=="/Users/hosborn/python/MonoToolsData" or MonoData_savepath=="/Volumes/LUVOIR/MonoToolsData":
theano_pars['cxx']='/usr/local/Cellar/gcc/9.3.0_1/bin/g++-9'
if MonoData_savepath=="/Users/hosborn/python/MonoToolsData" or MonoData_savepath=="/Volumes/LUVOIR/MonoToolsData":
theano_pars['cxx']='cxx=/Library/Developer/CommandLineTools/usr/bin/g++'
'''
if os.environ.get('THEANO_FLAGS') is None:
os.environ["THEANO_FLAGS"]=''
for key in theano_pars:
if key not in os.environ["THEANO_FLAGS"]:
os.environ["THEANO_FLAGS"] = os.environ["THEANO_FLAGS"]+','+key+"="+theano_pars[key]
import theano.tensor as tt
import pymc3 as pm
import theano
theano.config.print_test_value = True
theano.config.exception_verbosity='high'
from . import tools
from . import fit
def transit(pars,x):
log_per,b,t0,log_r_pl,u1,u2=pars
# Compute a limb-darkened light curve using starry
light_curve = (
xo.LimbDarkLightCurve([np.clip(u1,0,1),np.clip(u2,0,1)]).get_light_curve(orbit=xo.orbits.KeplerianOrbit(period=np.power(10,log_per), b=b, t0=t0),
r=np.power(10,log_r_pl), t=x).eval()
)
return light_curve.ravel()
def least_sq(pars,x,y,yerr):
model=transit(pars,x)
chisq=-1*np.sum((y-model)**2*yerr**-2)
return chisq
def QuickMonoFit(lc,it0,dur,Rs=None,Ms=None,Teff=None,useL2=False,fit_poly=True,force_tdur=False,
polyorder=2, ndurs=4.2, how='mono', init_period=None,fluxindex='flux_flat',mask=None, **kwargs):
# Performs simple planet fit to monotransit dip given the detection data.
#Initial depth estimate:
dur=0.3 if dur/dur!=1.0 else dur #Fixing duration if it's broken/nan.
winsize=np.clip(dur*ndurs,0.75,3.5)
if mask is None and ((fluxindex=='flux_flat')|(fluxindex=='flux')):
mask=lc['mask']
elif mask is None:
mask=np.isfinite(lc[fluxindex])
timeindex='bin_time' if 'bin_' in fluxindex else 'time'
fluxerrindex='bin_flux_err' if 'bin_' in fluxindex else 'flux_err'
if how=='periodic':
assert init_period is not None
xinit=(lc[timeindex]-it0-init_period*0.5)%init_period-init_period*0.5
nearby=(abs(xinit)<winsize)
else:
xinit=lc[timeindex]-it0
nearby=abs(xinit)<winsize
assert np.sum(nearby)>0
cad = np.nanmedian(np.diff(lc[timeindex])) if 'bin_' in fluxindex else float(int(max(set(list(lc['cadence'][nearby])), key=list(lc['cadence'][nearby]).count)[1:]))/1440
if fluxindex=='flux_flat' and 'flux_flat' not in lc:
#Reflattening and masking transit:
lc=tools.lcFlatten(lc, winsize=9*dur, stepsize=0.1*dur, transit_mask=abs(xinit)>dur*0.5)
if how=='periodic':
assert np.sum(nearby&mask)>0
#print(lc[timeindex][nearby])
x = xinit[nearby&mask]+it0 #re-aligning this fake "mono" with the t0 provided
y = lc[fluxindex][nearby&mask][np.argsort(x)]
y-=np.nanmedian(y)
yerr=lc[fluxerrindex][nearby&mask][np.argsort(x)]
x=np.sort(x).astype(np.float64)
oot_flux=np.nanmedian(y[(abs(x-it0)>0.65*dur)])
int_flux=np.nanmedian(y[(abs(x-it0)<0.35*dur)])
#print(it0,dur,oot_flux,int_flux,abs(oot_flux-int_flux)/lc['flux_unit'],x,y,yerr)
fit_poly=False
init_poly=None
else:
#Mono case:
x=lc[timeindex][nearby&mask].astype(np.float64)
yerr=lc[fluxerrindex][nearby&mask]
if not fit_poly or np.sum(abs(x-it0)<0.6)==0.0:
y=lc[fluxindex][nearby&mask]
y-=np.nanmedian(y)
oot_flux=np.nanmedian(y[(abs(x-it0)>0.65*dur)])
int_flux=np.nanmedian(y[(abs(x-it0)<0.35*dur)])
fit_poly=False
init_poly=None
else:
y=lc[fluxindex][nearby&mask]
y-=np.nanmedian(y)
#print(np.sum(abs(x-it0)<0.6),it0,np.min(x),np.max(x))
init_poly=np.polyfit(x[abs(x-it0)>0.7*dur]-it0,y[abs(x-it0)>0.7*dur],polyorder)
oot_flux=np.nanmedian((y-np.polyval(init_poly,x-it0))[abs(x-it0)>0.65*dur])
int_flux=np.nanmedian((y-np.polyval(init_poly,x-it0))[abs(x-it0)<0.35*dur])
dep=abs(oot_flux-int_flux)*lc['flux_unit']
#print(dep,dur,it0,x,y,init_poly)
with pm.Model() as model:
# Parameters for the stellar properties
if fit_poly:
trend = pm.Normal("trend", mu=0, sd=10.0 ** -np.arange(polyorder+1)[::-1], shape=polyorder+1,testval=init_poly)
flux_trend = pm.Deterministic("flux_trend", tt.dot(np.vander(x - it0, polyorder+1), trend))
#trend = pm.Uniform("trend", upper=np.tile(1,polyorder+1), shape=polyorder+1,
# lower=np.tile(-1,polyorder+1), testval=init_poly)
#trend = pm.Normal("trend", mu=np.zeros(polyorder+1), sd=5*(10.0 ** -np.arange(polyorder+1)[::-1]),
# shape=polyorder+1, testval=np.zeros(polyorder+1))
#trend = pm.Uniform("trend", upper=np.tile(10,polyorder+1),lower=np.tile(-10,polyorder+1),
# shape=polyorder+1, testval=np.zeros(polyorder+1))
else:
mean = pm.Normal("mean", mu=0.0, sd=3*np.nanstd(y))
flux_trend = mean
r_star = Rs if Rs is not None and not np.isnan(Rs) else 1.0
m_star = Ms if Ms is not None and not np.isnan(Ms) else 1.0
Ts = Teff if Teff is not None and not np.isnan(Teff) else 5500.0
u_star = tools.getLDs(Ts)[0]
#xo.distributions.QuadLimbDark("u_star")
if how=='periodic' and init_period is not None:
log_per = pm.Normal("log_per", mu=np.log(init_period),sd=0.4)
else:
rhostar=m_star/r_star**3
init_per = abs(18226*rhostar*((2*np.sqrt((1+dep**0.5)**2-0.41**2))/dur)**-3)
# Orbital parameters for the planets
log_per = pm.Uniform("log_per", lower=np.log(dur*5),upper=np.log(3000),
testval=np.clip(np.log(init_per),np.log(dur*6),np.log(3000))
)
tcen = pm.Bound(pm.Normal, lower=it0-0.7*dur, upper=it0+0.7*dur)("tcen",
mu=it0,sd=0.25*dur,testval=it0)
b = pm.Uniform("b",upper=1.0,lower=0.0,testval=0.2)
log_ror = pm.Uniform("log_ror",lower=-6,upper=-0.5,testval=np.clip(0.5*np.log(dep),-6,-0.5))
ror = pm.Deterministic("ror", tt.exp(log_ror))
#ror, b = xo.distributions.get_joint_radius_impact(min_radius=0.0075, max_radius=0.25,
# testval_r=np.sqrt(dep), testval_b=0.41)
#logror = pm.Deterministic("logror",tt.log(ror))
#pm.Potential("ror_prior", -logror) #Prior towards larger logror
r_pl = pm.Deterministic("r_pl", ror*r_star*109.1)
period = pm.Deterministic("period", tt.exp(log_per))
# Orbit model
orbit = xo.orbits.KeplerianOrbit(r_star=r_star,m_star=m_star,
period=period,t0=tcen,b=b)
vx, vy, vz = orbit.get_relative_velocity(tcen)
vrel=pm.Deterministic("vrel",tt.sqrt(vx**2 + vy**2)/r_star)
#tdur=pm.Deterministic("tdur",(2*tt.sqrt(1-b**2))/vrel)
#correcting for grazing transits by multiplying b by 1-rp/rs
tdur=pm.Deterministic("tdur",(2*tt.sqrt((1+ror)**2-b**2))/vrel)
if force_tdur:
#Adding a potential to force our transit towards the observed transit duration:
pm.Potential("tdur_prior", -0.05*len(x)*abs(tt.log(tdur/dur)))
# The 2nd light (not third light as companion light is not modelled)
# This quantity is in delta-mag
if useL2:
deltamag_contam = pm.Uniform("deltamag_contam", lower=-20.0, upper=20.0)
third_light = pm.Deterministic("third_light", tt.power(2.511,-1*deltamag_contam)) #Factor to multiply normalised lightcurve by
else:
third_light = 0.0
# Compute the model light curve using starry
light_curves = (
xo.LimbDarkLightCurve(u_star).get_light_curve(
orbit=orbit, r=r_pl/109.1, t=x, texp=cad))*(1+third_light)/lc['flux_unit']
transit_light_curve = pm.math.sum(light_curves, axis=-1)
light_curve = pm.Deterministic("light_curve", transit_light_curve + flux_trend)
pm.Normal("obs", mu=light_curve, sd=yerr, observed=y)
#print(model.check_test_point())
if fit_poly:
map_soln = xo.optimize(start=model.test_point,vars=[trend],verbose=False)
map_soln = xo.optimize(start=map_soln,vars=[trend,log_ror,log_per,tcen],verbose=False)
# Fit for the maximum a posteriori parameters
else:
map_soln = xo.optimize(start=model.test_point,vars=[mean],verbose=False)
map_soln = xo.optimize(start=map_soln,vars=[mean,log_ror,log_per,tcen],verbose=True)
'''
map_soln = xo.optimize(start=map_soln,vars=[b, log_ror, log_per, tcen],verbose=False)
if useL2:
map_soln = xo.optimize(start=map_soln,vars=[log_ror, b, deltamag_contam],verbose=False)
map_soln = xo.optimize(start=map_soln,verbose=False)
if fit_poly:
map_soln = xo.optimize(start=map_soln,vars=[trend],verbose=False)
map_soln = xo.optimize(start=map_soln,verbose=False)
map_soln = xo.optimize(start=map_soln,verbose=False)
'''
map_soln, func = xo.optimize(start=map_soln,verbose=False,return_info=True)
'''
interpy=xo.LimbDarkLightCurve(map_soln['u_star']).get_light_curve(
r=float(map_soln['r_pl']),
orbit=xo.orbits.KeplerianOrbit(
r_star=float(map_soln['r_star']),
m_star=float(map_soln['m_star']),
period=float(map_soln['period']),
t0=float(map_soln['t0']),
b=float(map_soln['b'])),
t=interpt
)/(1+map_soln['third_light'])
'''
#print(func)
interpt=np.linspace(map_soln['tcen']-winsize,map_soln['tcen']+winsize,600)
if 'third_light' not in map_soln:
map_soln['third_light']=np.array(0.0)
transit_zoom = (xo.LimbDarkLightCurve(u_star).get_light_curve(
orbit=xo.orbits.KeplerianOrbit(r_star=r_star,m_star=m_star,
period=map_soln['period'],t0=map_soln['tcen'],b=map_soln['b']),
r=map_soln['r_pl']/109.1, t=interpt, texp=cad
)*(1+map_soln['third_light'])
).eval().ravel()/lc['flux_unit']
#Reconstructing best-fit model into a dict:
best_fit={'log_lik_mono':-1*func['fun'],'model_success':str(func['success'])}
for col in map_soln:
if 'interval__' not in col:
if map_soln[col].size==1:
best_fit[col]=float(map_soln[col])
else:
best_fit[col]=map_soln[col].astype(float)
#print({bf:type(best_fit[bf]) for bf in best_fit})
#print(best_fit["vrel"],best_fit["b"],map_soln["tdur"],best_fit["tcen"])
if np.isnan(map_soln["tdur"]):
best_fit['tdur']=dur
#Adding depth:
best_fit['transit_zoom']=transit_zoom
best_fit['monofit_x']=x
best_fit['monofit_ymodel']=map_soln['light_curve']
best_fit['monofit_y']=y
best_fit['monofit_yerr']=yerr
best_fit['depth']=np.max(transit_zoom)-np.min(transit_zoom)
#err = std / sqrt(n_pts in transit)
best_fit['depth_err']=np.nanstd(y[abs(x-best_fit['tcen'])<0.475*best_fit["tdur"]])/\
np.sqrt(np.sum(abs(x-best_fit['tcen'])<0.475*best_fit["tdur"]))
best_fit['snr']=best_fit['depth']/(best_fit['depth_err'])
'''
print("time stuff:", best_fit['tcen'],interpt[0],interpt[-1],
"\nfit stuff:",best_fit['r_pl'],best_fit['b'], best_fit['logror'],best_fit['tdur'],(1+best_fit['third_light'])/lc['flux_unit'],
"\ndepth stuff:",best_fit['depth'],best_fit['depth_err'],lc['flux_unit'],np.min(transit_zoom),np.min(map_soln['light_curve']))'''
#Calculating std in typical bin with width of the transit duration, to compute SNR_red
if how=='mono' or init_period is None:
oot_mask=mask*(abs(lc[timeindex]-best_fit['tcen'])>0.5)*(abs(lc[timeindex]-best_fit['tcen'])<25)
binlc=tools.bin_lc_segment(np.column_stack((lc[timeindex][oot_mask],lc[fluxindex.replace('_flat','')][oot_mask],
lc[fluxerrindex][oot_mask])),best_fit['tdur'])
best_fit['cdpp'] = np.nanmedian(abs(np.diff(binlc[:,1])))#np.nanstd(binlc[:,1])
best_fit['Ntrans']=1
else:
phase=(abs(lc['time']-best_fit['tcen']+0.5*best_fit['tdur'])%init_period)
if len(mask)==len(phase):
oot_mask=mask*(phase>best_fit['tdur'])
else:
oot_mask=lc['mask']*(phase>best_fit['tdur'])
binlc=tools.bin_lc_segment(np.column_stack((lc['time'][oot_mask],lc['flux'][oot_mask],
lc['flux_err'][oot_mask])),best_fit['tdur'])
#Counting in-transit cadences:
durobs=0
for cad in np.unique(lc['cadence']):
if len(mask)==len(phase):
durobs+=np.sum(phase[mask&(lc['cadence']==cad)]<best_fit['tdur'])*float(int(cad[1:]))/1440
else:
durobs+=np.sum(phase[lc['mask']&(lc['cadence']==cad)]<best_fit['tdur'])*float(int(cad[1:]))/1440
best_fit['Ntrans']=durobs/best_fit['tdur']
best_fit['cdpp'] = np.nanmedian(abs(np.diff(binlc[:,1])))#np.nanstd(binlc[:,1])
best_fit['snr_r']=best_fit['depth']/(best_fit['cdpp']/np.sqrt(best_fit['Ntrans']))
best_fit['interpmodel']=interp.interp1d(np.hstack((-10000,interpt-best_fit['tcen'],10000)),
np.hstack((0.0,transit_zoom,0.0)))
if how=='periodic':
for col in ['period','vrel']:
par = best_fit.pop(col) #removing things which may spoil the periodic detection info (e.g. derived period)
best_fit[col+'_mono']=par
assert 'period' not in best_fit
best_fit['period']=init_period
return best_fit
def MonoTransitSearch(lc,ID,mission, Rs=None,Ms=None,Teff=None,
mono_SNR_thresh=6.5,mono_BIC_thresh=-6,n_durs=5,poly_order=3,
n_oversamp=20,binsize=15/1440.0,custom_mask=None,
transit_zoom=3.5,use_flat=False,use_binned=True,use_poly=True,
plot=False, plot_loc=None ,n_max_monos=8, use_stellar_dens=True, **kwargs):
#Searches LC for monotransits - in this case without minimizing for duration, but only for Tdur
'''
lc
ID
mono_SNR_thresh=6.5 - threshold in sigma
mono_BIC_thresh=-10 - threshold in BIC to be used for a significant monotransit
n_durs=5
poly_order=3 - polynomial order to use for a "no transit" fit
n_oversamp=10 - oversampling factor wrt to durations from which to match to lightcurve
binsize=1/96. - size of bins in days
transit_zoom=3.5 - size (in transit durations) to cut around transit when minimizing transit model
use_flat=True - flattens lightcurve before monotransit search
use_binned=True
use_poly=True - fits transits (and sin) with a polynomial (order = poly_order-1) to account for variability
Rs=None
Ms=None
Teff=None
plot=False
plot_loc=None
use_stellar_dens=True - use stellar density to produce transit templates to search.
'''
#Computing a fine x-range to search:
search_xrange=[]
Rs=1.0 if (Rs is None or not use_stellar_dens or np.isnan(Rs)) else float(Rs)
Ms=1.0 if (Ms is None or not use_stellar_dens or np.isnan(Ms)) else float(Ms)
Teff=5800.0 if Teff is None else float(Teff)
mincad=np.min([float(cad[1:])/1440 for cad in np.unique(lc['cadence'])])
interpmodels, tdurs = get_interpmodels(Rs,Ms,Teff,lc['time'],lc['flux_unit'],
n_durs=n_durs,texp=mincad, mission=mission)
#print("Checking input model matches. flux:",np.nanmedian(uselc[:,0]),"std",np.nanstd(uselc[:,1]),"transit model:",
# interpmodels[0](10.0),"depth:",interpmodels[0](0.0))
search_xranges=[]
mask = lc['mask'] if custom_mask is None else custom_mask
#Removing gaps bigger than 2d (with no data)
for n in range(n_durs):
search_xranges_n=[]
if np.max(np.diff(lc['time'][mask]))<tdurs[n]:
lc_regions=[lc['time'][mask]]
else:
lc_regions = np.array_split(lc['time'][mask],1+np.where(np.diff(lc['time'][mask])>tdurs[n])[0])
for arr in lc_regions:
search_xranges_n+=[np.arange(arr[0]+0.33*tdurs[n],arr[-1]-0.33*tdurs[n],tdurs[n]/n_oversamp)]
search_xranges+=[np.hstack(search_xranges_n)]
print(str(ID)+" - Searching "+str(np.sum([len(xr) for xr in search_xranges]))+" positions with "+str(n_durs)+" durations:",','.join(list(np.round(tdurs,3).astype(str))))
#Looping through search and computing chi-sq at each position:
outparams=pd.DataFrame()
def trans_model_neglnlik(params,x,y,sigma2,init_log_dep,interpmodel):
#Returns chi-squared for transit model, plus linear background flux trend
# pars = depth, duration, poly1, poly2
model=np.exp(params[0]-init_log_dep)*interpmodel(x)
return 0.5 * np.sum((y - model)**2 / sigma2 + np.log(sigma2))
def sin_model_neglnlik(params,x,y,sigma2,tcen,dur):
#Returns chi-squared for transit model, plus linear background flux trend
# pars = depth, duration, poly1, poly2
newt=x/(2.6*dur)*2*np.pi
amp=np.exp(-1*np.power(newt, 2.) / (2 * np.power(np.pi, 2.)))
model=params[0]*(amp*np.sin(newt-np.pi*0.5)-0.1)
return 0.5 * np.sum((y - model)**2 / sigma2 + np.log(sigma2))
def trans_model_poly_neglnlik(params,x,y,sigma2,init_log_dep,interpmodel):
#Returns chi-squared for transit model, plus linear background flux trend
# pars = depth, gradient
model=x*params[1]+np.exp(params[0]-init_log_dep)*interpmodel(x)
return 0.5 * np.sum((y - model)**2 / sigma2 + np.log(sigma2))
def sin_model_poly_neglnlik(params,x,y,sigma2,tcen,dur):
#Returns chi-squared for transit model, plus linear background flux trend
# pars = log_depth, poly1, poly2
newt=x/(2*dur)*2*np.pi
amp=np.exp(-1*np.power(newt, 2.) / (2 * np.power(np.pi, 2.)))
model=x*params[1]+np.exp(params[0])*(amp*np.sin(newt-np.pi*0.5)-0.1)
return 0.5 * np.sum((y - model)**2 / sigma2 + np.log(sigma2))
#from progress.bar import IncrementalBar
#bar = IncrementalBar('Searching for monotransit', max = np.sum([len(xr) for xr in search_xranges]))
#For each duration we scan across the lightcurve and compare a transit model to others:
for n,search_xrange in enumerate(search_xranges):
tdur=tdurs[n%n_durs]
#flattening and binning as a function of the duration we are searching in order to avoid
if use_binned:
#Having may points in a lightcurve makes flattening difficult (and fitting needlessly slow)
# So let's bin to a fraction of the tdur - say 9 in-transit points.
lc=tools.lcBin(lc,binsize=tdur/9, use_flat=False, extramask=custom_mask)
if use_flat and not use_poly:
lc=tools.lcFlatten(lc,winsize=tdur*13,stepsize=tdur*0.333,use_bin=True)
uselc=np.column_stack((lc['bin_time'],lc['bin_flux_flat'],lc['bin_flux_err']))
else:
uselc=np.column_stack((lc['bin_time'],lc['bin_flux'],lc['bin_flux_err']))
else:
if use_flat and not use_poly:
lc=tools.lcFlatten(lc,winsize=tdur*13,stepsize=tdur*0.333)
uselc=np.column_stack((lc['time'][mask],lc['flux_flat'][mask],lc['flux_err'][mask]))
else:
uselc=np.column_stack((lc['time'][mask],lc['flux'][mask],lc['flux_err'][mask]))
#Making depth vary from 0.1 to 1.0
init_dep_shifts=np.exp(np.random.normal(0.0,n_oversamp*0.01,len(search_xrange)))
randns=np.random.randint(2,size=(len(search_xrange),2))
cad=np.nanmedian(np.diff(uselc[:,0]))
p_transit = np.clip(3/(len(uselc[:,0])*cad),0.0,0.05)
#What is the probability of transit given duration (used in the prior calculation) - duration
methods=['SLSQP','Nelder-Mead','Powell']
logmodeldep=np.log(abs(interpmodels[n](0.0)))
for n_mod,x2s in enumerate(search_xrange):
#bar.next()
#minimise single params - depth
round_tr=abs(uselc[:,0]-x2s)<(transit_zoom*tdur)
#Centering x array on epoch to search
x=uselc[round_tr,0]-x2s
in_tr=abs(x)<(0.45*tdur)
if len(x[in_tr])>0 and not np.isnan(x[in_tr]).all() and len(x[~in_tr])>0 and not np.isnan(x[~in_tr]).all():
y=uselc[round_tr,1]
oot_median=np.nanmedian(y[~in_tr])
y-=oot_median
yerr=uselc[round_tr,2]
sigma2=yerr**2
init_log_dep=np.log(np.clip(-1*(np.nanmedian(y[in_tr]))*init_dep_shifts[n_mod],0.00000001,1000))
init_noise=np.std(y)
poly_fit=np.polyfit(x,y,poly_order)
poly_neg_llik=0.5 * np.sum((y - np.polyval(poly_fit,x))**2 / sigma2 + np.log(sigma2))
if use_poly:
init_grad=np.polyfit(x[~in_tr],y[~in_tr],1)[0]
res_trans=optim.minimize(trans_model_poly_neglnlik,np.hstack((init_log_dep,init_grad)),
args=(x,y,sigma2,
logmodeldep,interpmodels[n%n_durs]),
method = methods[randns[n_mod,0]])
res_sin=optim.minimize(sin_model_poly_neglnlik,
np.hstack((init_log_dep,init_grad)),
args=(x,y,sigma2,x2s,tdur),
method = methods[randns[n_mod,1]])
else:
res_trans=optim.minimize(trans_model_neglnlik,(init_log_dep),
args=(x,y,sigma2,
logmodeldep,interpmodels[n%n_durs]),
method = methods[randns[n_mod,0]])
res_sin=optim.minimize(sin_model_neglnlik, (init_log_dep),
args=(x,y,sigma2,x2s,tdur),
method = methods[randns[n_mod,1]])
log_len=np.log(np.sum(round_tr))
#BIC = log(n_points)*n_params - 2*(log_likelihood + log_prior)
#BIC = log(n_points)*n_params + 2*(neg_log_likelihood-log_prior).
# Setting log prior of transit as sum of:
# - 0.5*tdur/len(x) - the rough probability that, given some point in time, it's in the centre of a transit
# - normal prior on log(duration) to be within 50% (0.5 in logspace)
# 'BIC_trans':log_len*len(res_trans.x)+2*(res_trans.fun-np.log(p_transit)),
# 'BIC_sin':log_len*len(res_sin.x)+2*(res_sin.fun-np.log(1-p_transit)),
# 'BIC_poly':log_len*len(poly_fit)+2*(poly_neg_llik-np.log(1-p_transit)),
n_params=np.array([len(res_trans.x),len(res_sin.x),len(poly_fit)])
outdic={'tcen':x2s,
'llk_trans':-1*res_trans.fun,
'llk_sin':-1*res_sin.fun,
'llk_poly':-1*poly_neg_llik,
'BIC_trans':log_len*n_params[0]+2*(res_trans.fun-np.log(p_transit)),
'BIC_sin':log_len*n_params[1]+2*(res_sin.fun-np.log(5*p_transit)),
'BIC_poly':log_len*n_params[2]+2*(poly_neg_llik-np.log(1-(6*p_transit))),
'init_dep':np.exp(init_log_dep),
'init_dur':tdur,
'trans_dep':np.exp(res_trans.x[0]),
'sin_dep':np.exp(res_sin.x[0]),
'n_mod':n,
'tran_success':int(res_trans.success),'sin_success':int(res_sin.success),
'all_success':int(res_trans.success&res_sin.success)}
outdic['trans_snr']=outdic['trans_dep']/(init_noise/np.sqrt(outdic['init_dur']/cad))
outdic.update({'poly_'+str(n):poly_fit[n] for n in range(poly_order+1)})
if use_poly:
outdic['trans_grad']=res_trans.x[1]
#outdic.update({'trans_poly_'+str(n):res_trans.x[1+n] for n in range(poly_order)})
outdic['sin_grad']=res_sin.x[1]
#outdic.update({'sin_poly_'+str(n):res_sin.x[1+n] for n in range(poly_order)})
outparams=outparams.append(pd.Series(outdic,name=len(outparams)))
#print(n,len(outparams))
#bar.finish()
outparams=outparams.sort_values('tcen')
#Transit model has to be better than the sin model AND the DeltaBIC w.r.t to the polynomial must be <-10.
# transit depth must be <0.0,
# the sum of DeltaBICs has to be < threshold (e.g. -10)
outparams['sin_llk_ratio']=outparams['llk_trans'].values-outparams['llk_sin'].values
outparams['poly_llk_ratio']=outparams['llk_trans'].values-outparams['llk_poly'].values
outparams['sin_DeltaBIC']=outparams['BIC_trans'].values-outparams['BIC_sin'].values
outparams['poly_DeltaBIC']=outparams['BIC_trans'].values-outparams['BIC_poly'].values
outparams['sum_DeltaBICs']=outparams['sin_DeltaBIC']+outparams['poly_DeltaBIC']
outparams['mean_DeltaBICs']=0.5*outparams['sum_DeltaBICs']
#if use_poly:
#In the case of co-fitting for polynomials, BICs fail, but log likelihoods still work.
#We will use 1.5 (4.5x) over sin and 3 (20x) over polynomial as our threshold:
# signfct=np.where((outparams['sin_llk_ratio']>1.5)&(outparams['poly_llk_ratio']>3)&(outparams['trans_dep']>0.0))[0]
#else:
# signfct=np.where((outparams['sin_llk_ratio']>1.0)&(outparams['poly_DeltaBIC']<mono_BIC_thresh)&(outparams['trans_dep']>0.0))[0]
signfct=(outparams['sin_llk_ratio']>1.5)&(outparams['poly_llk_ratio']>1.5)&(outparams['poly_DeltaBIC']<mono_BIC_thresh)&(outparams['trans_snr']>(mono_SNR_thresh-0.5))
n_sigs=np.sum(signfct)
if n_sigs>0:
best_ix=[]
nix=0
detns={}
while n_sigs>0 and nix<=n_max_monos:
#Getting the best detection:
signfct_df=outparams.loc[signfct]
#Placing the best detection info into our dict:
detn_row=signfct_df.iloc[np.argmin(signfct_df['poly_DeltaBIC'])]
detns[str(nix).zfill(2)]={}
detns[str(nix).zfill(2)]['llk_trans'] = detn_row['llk_trans']
detns[str(nix).zfill(2)]['llk_sin'] = detn_row['llk_sin']
detns[str(nix).zfill(2)]['llk_poly'] = detn_row['llk_poly']
detns[str(nix).zfill(2)]['BIC_trans'] = detn_row['BIC_trans']
detns[str(nix).zfill(2)]['BIC_sin'] = detn_row['BIC_sin']
detns[str(nix).zfill(2)]['BIC_poly'] = detn_row['BIC_poly']
detns[str(nix).zfill(2)]['sin_DeltaBIC']= detn_row['sin_DeltaBIC']
detns[str(nix).zfill(2)]['poly_DeltaBIC']=detn_row['poly_DeltaBIC']
detns[str(nix).zfill(2)]['tcen'] = detn_row['tcen']
detns[str(nix).zfill(2)]['period'] = np.nan
detns[str(nix).zfill(2)]['period_err'] = np.nan
detns[str(nix).zfill(2)]['DeltaBIC'] = detn_row['poly_DeltaBIC']
detns[str(nix).zfill(2)]['tdur'] = detn_row['init_dur']
detns[str(nix).zfill(2)]['depth'] = detn_row['trans_dep']
detns[str(nix).zfill(2)]['orbit_flag'] = 'mono'
detns[str(nix).zfill(2)]['snr'] = detn_row['trans_snr']
#Calculating minimum period:
detns[str(nix).zfill(2)]['P_min'] = calc_min_P(uselc[:,0],detn_row['tcen'],detn_row['init_dur'])
#Removing the regions around this detection from our array
#print(np.sum(abs(outparams['tcen']-detn_row['tcen'])<np.where(outparams['init_dur']<detn_row['init_dur'],
# 0.66*detn_row['init_dur'], 0.66*outparams['init_dur'])))
#print(np.sum(signfct[abs(outparams['tcen']-detn_row['tcen'])<np.where(outparams['init_dur']<detn_row['init_dur'],
# 0.66*detn_row['init_dur'], 0.66*outparams['init_dur'])]))
away_from_this_detn=abs(outparams['tcen']-detn_row['tcen'])>np.where(outparams['init_dur']<detn_row['init_dur'],
0.66*detn_row['init_dur'], 0.66*outparams['init_dur'])
signfct=signfct&away_from_this_detn
n_sigs=np.sum(signfct)
#print(n_sigs,detns[str(nix).zfill(2)]['poly_DeltaBIC'],np.sum(signfct[abs(outparams['tcen']-detn_row['tcen'])<np.where(outparams['init_dur']<detn_row['init_dur'],0.66*detn_row['init_dur'], 0.66*outparams['init_dur'])]))
nix+=1
#print(np.percentile(outparams['poly_DeltaBIC'],[5,16,50,84,95]))
else:
print("n_sigs == 0")
detns={}
if plot:
fig_loc= PlotMonoSearch(lc,ID,outparams,detns,interpmodels,tdurs,custom_mask=custom_mask,
use_flat=use_flat,use_binned=use_binned,use_poly=use_poly,plot_loc=plot_loc)
return detns, outparams, fig_loc
else:
return detns, outparams, None
def calc_min_P(time,tcen,tdur):
abs_times=abs(time-tcen)
abs_times=np.sort(abs_times)
whr=np.where(np.diff(abs_times)>tdur*0.75)[0]
if len(whr)>0:
return abs_times[whr[0]]
else:
return np.max(abs_times)
def PlotMonoSearch(lc, ID, monosearchparams, mono_dic, interpmodels, tdurs,
use_flat=True,use_binned=True,use_poly=False,transit_zoom=2.5,plot_loc=None,custom_mask=None,**kwargs):
if plot_loc is None:
plot_loc = str(ID).zfill(11)+"_Monotransit_Search.pdf"
elif plot_loc[-1]=='/':
plot_loc = plot_loc+str(ID).zfill(11)+"_Monotransit_Search.pdf"
if use_flat and not use_poly:
lc=tools.lcFlatten(lc,winsize=np.median(tdurs)*7.5,stepsize=0.2*np.median(tdurs))
lc=tools.lcBin(lc, 30/1440, use_masked=True, use_flat=True, extramask = custom_mask)
flux_key='flux_flat'
else:
flux_key='flux'
lc=tools.lcBin(lc,30/1440,use_masked=True,use_flat=False)
mask = lc['mask'] if custom_mask is None else custom_mask
fig = plt.figure(figsize=(11.69,8.27))
import seaborn as sns
sns.set_palette(sns.set_palette("RdBu",14))
axes=[]
axes +=[fig.add_subplot(411)]
axes[0].set_title(str(ID).zfill(7)+" - Monotransit Search")
#rast=True if np.sum(lc['mask']>12000) else False
axes[0].plot(lc['time'][mask],lc[flux_key][mask],'.k',alpha=0.28,markersize=0.75, rasterized=True)
if use_flat:
axes[0].plot(lc['bin_time'],lc['bin_flux_flat'],'.k',alpha=0.7,markersize=1.75, rasterized=True)
else:
axes[0].plot(lc['bin_time'],lc['bin_flux'],'.k',alpha=0.7,markersize=1.75, rasterized=True)
axes[0].set_ylim(np.percentile(lc[flux_key][mask],(0.25,99.75)))
axes[0].set_ylabel("flux")
axes[0].set_xticks([])
axes[0].set_xticklabels([])
axes +=[fig.add_subplot(412)]
axes[1].plot([monosearchparams['tcen'].values[0],monosearchparams['tcen'].values[-1]],[-10,-10],'--k',alpha=0.25)
#plt.plot(monosearchparams_2['tcen'],monosearchparams_2['worstBIC'],'.',c='C5',alpha=0.4)
for n,dur in enumerate(np.unique(monosearchparams['init_dur'])):
if n==0:
axes[1].plot(monosearchparams.loc[monosearchparams['init_dur']==dur,'tcen'],
monosearchparams.loc[monosearchparams['init_dur']==dur,'poly_DeltaBIC'],
c=sns.color_palette()[n],alpha=0.6,label='Transit - polynomial', rasterized=True)
axes[1].plot(monosearchparams.loc[monosearchparams['init_dur']==dur,'tcen'],
monosearchparams.loc[monosearchparams['init_dur']==dur,'sin_DeltaBIC'],
c=sns.color_palette()[-1*(n+1)],alpha=0.6,label='Transit - wavelet', rasterized=True)
else:
axes[1].plot(monosearchparams.loc[monosearchparams['init_dur']==dur,'tcen'],
monosearchparams.loc[monosearchparams['init_dur']==dur,'poly_DeltaBIC'],
c=sns.color_palette()[n],alpha=0.6, rasterized=True)
axes[1].plot(monosearchparams.loc[monosearchparams['init_dur']==dur,'tcen'],
monosearchparams.loc[monosearchparams['init_dur']==dur,'sin_DeltaBIC'],
c=sns.color_palette()[-1*(n+1)],alpha=0.6, rasterized=True)
axes[1].legend(prop={'size': 5})
ix=(np.isfinite(monosearchparams['sum_DeltaBICs']))&(monosearchparams['sin_DeltaBIC']<1e8)&(monosearchparams['poly_DeltaBIC']<1e8)
min_bic=np.nanmin(monosearchparams.loc[ix,'poly_DeltaBIC'])
maxylim=np.nanmax([np.percentile(monosearchparams.loc[ix,'poly_DeltaBIC'],98),
np.percentile(monosearchparams.loc[ix,'sin_DeltaBIC'],98)])
axes[1].set_ylim(maxylim,min_bic)
axes[1].set_ylabel("Delta BIC")
axes[1].set_xlabel("Time [BJD-"+str(lc['jd_base'])+"]")
if len(mono_dic)>1:
trans_model_mins=[]
n_poly=int(np.sum([1 for n in range(10) if 'poly_'+str(n) in mono_dic[list(mono_dic.keys())[0]]]))
for nm,monopl in enumerate(mono_dic):
tdur=mono_dic[monopl]['tdur']
tcen=mono_dic[monopl]['tcen']
transit_zoom=2.25
axes[1].text(tcen,np.clip(np.min([mono_dic[monopl]['sin_DeltaBIC'],mono_dic[monopl]['poly_DeltaBIC']]),
min_bic,1e6),monopl)
axes +=[fig.add_subplot(4,len(mono_dic),nm+2*len(mono_dic)+1)]
axes[-1].set_xticks([])
axes[-1].set_xticklabels([])
axes[-1].text(tcen+0.1,np.clip(np.min([mono_dic[monopl]['sin_DeltaBIC'],mono_dic[monopl]['poly_DeltaBIC']]),
min_bic,1e6),
monopl)
for n,dur in enumerate(np.unique(monosearchparams['init_dur'])):
index=(monosearchparams['init_dur']==dur)&(abs(monosearchparams['tcen']-tcen)<transit_zoom*tdur)
axes[-1].plot(monosearchparams.loc[index,'tcen'],
monosearchparams.loc[index,'BIC_trans']-monosearchparams.loc[index,'BIC_poly'],
c=sns.color_palette()[n], alpha=0.6, rasterized=True)
axes[-1].plot(monosearchparams.loc[index,'tcen'],
monosearchparams.loc[index,'BIC_trans']-monosearchparams.loc[index,'BIC_sin'],
c=sns.color_palette()[-1*n], alpha=0.6, rasterized=True)
#plt.plot(monosearchparams['tcen'],monosearchparams['worstBIC'],',k')
axes[-1].plot([tcen,tcen],[-150,130],'--k',linewidth=1.5,alpha=0.25)
axes[-1].set_xlim(tcen-tdur*transit_zoom,tcen+tdur*transit_zoom)
axes[-1].set_ylim(maxylim,min_bic)
if nm==0:
axes[-1].set_ylabel("Delta BIC")
else:
axes[-1].set_yticks([])
axes[-1].set_yticklabels([])
mono_dets=monosearchparams.loc[monosearchparams['tcen']==mono_dic[monopl]['tcen']].iloc[0]
axes +=[fig.add_subplot(4,len(mono_dic),nm+3*len(mono_dic)+1)]
nmod=np.arange(len(tdurs))[np.argmin(abs(tdurs-tdur))]
round_tr=mask&(abs(lc['time']-tcen)<(transit_zoom*tdur))
x=(lc['time'][round_tr]-tcen)
y=lc[flux_key][round_tr]
y_offset=np.nanmedian(lc[flux_key][round_tr&(abs(lc['time']-tcen)>(0.7*tdur))]) if use_poly else 0
y-=y_offset
#Plotting polynomial:
axes[-1].plot(lc['time'][round_tr],np.polyval([mono_dets['poly_'+str(n)].values[0] for n in range(n_poly)],x),'--',
c=sns.color_palette()[-4],linewidth=2.0,alpha=0.6, rasterized=True)
#Plotting transit:
modeldep=abs(interpmodels[nmod](0.0))
if use_flat and not use_poly:
trans_model=(mono_dets['trans_dep']/modeldep)*interpmodels[nmod](x)
else:
trans_model=mono_dets['trans_grad']*x+(mono_dets['trans_dep']/modeldep)*interpmodels[nmod](x)
#Plotting sin wavelet:
newt=x/(2*tdur)*2*np.pi
amp=np.exp(-1*np.power(newt*1.25, 2.) / (2 * np.power(np.pi, 2.)))
if use_flat and not use_poly:
sin_model=mono_dets['sin_dep']*(amp*np.sin(newt-np.pi*0.5)-0.1)
else:
sin_model=mono_dets['sin_grad']*x+mono_dets['sin_dep']*(amp*np.sin(newt-np.pi*0.5)-0.1)
if nm==0:
axes[-1].set_ylabel("Flux")
else:
axes[-1].set_yticks([])
axes[-1].set_yticklabels([])
axes[-1].plot(lc['time'][round_tr],y,'.k',markersize=1.5,alpha=0.3, rasterized=True)
round_tr_bin=abs(lc['bin_time']-tcen)<(transit_zoom*tdur)
axes[-1].plot(lc['bin_time'][round_tr_bin],lc['bin_flux'][round_tr_bin]-y_offset,
'.k',alpha=0.7,markersize=2.5, rasterized=True)
axes[-1].plot(lc['time'][round_tr],trans_model,'-',
c=sns.color_palette()[0],linewidth=2.0,alpha=0.85, rasterized=True)
axes[-1].plot(lc['time'][round_tr],sin_model,'-.',
c=sns.color_palette()[-1],linewidth=2.0,alpha=0.6, rasterized=True)
axes[-1].set_xlim(tcen-tdur*transit_zoom,tcen+tdur*transit_zoom)
trans_model_mins+=[np.min(trans_model)]
#print(trans_model_mins)
trans_model_min=np.min(np.array(trans_model_mins))
for nm,monopl in enumerate(mono_dic):
axes[2+2*nm+1].set_ylim(trans_model_min*1.2,np.percentile(lc['bin_flux'],97.5))
fig.subplots_adjust(wspace=0, hspace=0.15)
#plt.tight_layout()
fig.savefig(plot_loc, dpi=400)
#plt.xlim(1414,1416)
return plot_loc
'''
#smearing this out by 0.3*tdur to avoid "micro peaks" in the array making multiple detections
def gaussian(x , s):
#Simple gaussian given position-adjusted x and sigma in order to convolve chisq spectrum
return 1./np.sqrt( 2. * np.pi * s**2 ) * np.exp( -x**2 / ( 2. * s**2 ) )
chisqs_conv=np.convolve(chisqs, np.fromiter( (gaussian(x, n_oversamp*0.5) for x in range(-1*n_oversamp, n_oversamp, 1 ) ), np.float ), mode='same' )
#Finding all points below some threshold:
rms_chisq=np.std(chisqs_conv[chisqs_conv>np.percentile(chisqs_conv,20)])
bool_in_trans=chisqs_conv<(-1*sigma_threshold*rms_chisq)
print("N_trans_points",np.sum(bool_in_trans))
ix_in_trans=[]
#Looping through each "region" where chisq is lower than threshold and finding minimum value:
starts = search_xrange[:-1][np.diff(bool_in_trans.astype(int))>0]
stops = search_xrange[1:][np.diff(bool_in_trans.astype(int))<0]
for ns in range(len(starts)):
ix_bool=(search_xrange>starts[ns])&(search_xrange<stops[ns])
ix_in_trans+=[search_xrange[ix_bool][np.argmin(chisqs[ix_bool])]]
if len(ix_in_trans)==0:
#No extra eclipse found
return None
elif len(ix_in_trans)==1:
#Found single eclipse-like feature.
return {'t0':search_xrange[ix_in_trans[0]],'chisq':chisqs[ix_in_trans[0]],
'dep':outparams[ix_in_trans[0],0],'dur':outparams[ix_in_trans[0],1]}
elif len(ix_in_trans)>1:
#Found multiple eclipse-like features?
peak=ix_in_trans[np.argmax(chisqs[ix_in_trans])]
return {'t0':search_xrange[peak],'chisq':chisqs[peak],
'dep':outparams[peak,0],'dur':outparams[peak,1]}
'''
def PeriodicPlanetSearch(lc, ID, planets, use_binned=False, use_flat=True, binsize=15/1440.0, n_search_loops=5,
rhostar=None, Ms=1.0, Rs=1.0, Teff=5800,
multi_FAP_thresh=0.00125, multi_SNR_thresh=7.0,
plot=False, plot_loc=None, mask_prev_planets=True, **kwargs):
#Searches an LC (ideally masked for the monotransiting planet) for other *periodic* planets.
from transitleastsquares import transitleastsquares
print("Using TLS on ID="+str(ID)+" to search for multi-transiting planets")
#Using bins if we have a tonne of points (eg >1 sector). If not, we can use unbinned data.
use_binned=True if len(lc['flux'])>10000 else use_binned
#Max period is half the total observed time NOT half the distance from t[0] to t[-1]
p_max=0.66*np.sum(np.diff(lc['time'])[np.diff(lc['time'])<0.4])
#np.clip(0.75*(np.nanmax(lc[prefix+'time'])-np.nanmin(lc[prefix+'time'])),10,80)
suffix='_flat' if use_flat else ''
if use_flat:
#Setting the window to fit over as 5*maximum duration
rhostar=1.0 if rhostar==0.0 or rhostar is None else rhostar
durmax = (p_max/(3125*rhostar))**(1/3)
plmask_0 = np.tile(False,len(lc['time']))
if mask_prev_planets:
#Masking each of those transit events we detected during the MonoTransit search
for pl in planets:
plmask_0+=abs(lc['time']-planets[pl]['tcen'])<0.6*planets[pl]['tdur']
lc=tools.lcFlatten(lc,winsize=11*durmax,transit_mask=~plmask_0)
print("after flattening:",np.sum(lc['mask']))
if use_binned:
lc=tools.lcBin(lc,binsize=binsize,use_flat=use_flat)
#print("binned",lc.keys,np.sum(lc['mask']))
suffix='_flat'
#else:
# print(use_binned, 'bin_flux' in lc)
prefix='bin_' if use_binned else ''
if plot:
sns.set_palette("viridis",10)
fig = plt.figure(figsize=(11.69,8.27))
rast=True if np.sum(lc['mask']>12000) else False
#plt.plot(lc['time'][lc['mask']],lc['flux_flat'][lc['mask']]*lc['flux_unit']+(1.0-np.nanmedian(lc['flux_flat'][lc['mask']])*lc['flux_unit']),',k')
#if prefix=='bin_':
# plt.plot(lc[prefix+'time'],lc[prefix+'flux'+suffix]*lc['flux_unit']+(1.0-np.nanmedian(lc[prefix+'flux'+suffix])*lc['flux_unit']),'.k')
plt.subplot(312)
plt.plot(lc['time'][lc['mask']],
lc['flux_flat'][lc['mask']]*lc['flux_unit']+(1.0-np.nanmedian(lc['flux_flat'][lc['mask']])*lc['flux_unit']),
'.k', markersize=0.5,rasterized=rast)
lc=tools.lcBin(lc, binsize=1/48, use_flat=True)
plt.plot(lc['bin_time'],
lc['bin_flux_flat']*lc['flux_unit']+(1.0-np.nanmedian(lc['bin_flux_flat'])*lc['flux_unit']),
'.k', markersize=4.0)
#Looping through, masking planets, until none are found.
#{'01':{'period':500,'period_err':100,'FAP':np.nan,'snr':np.nan,'tcen':tcen,'tdur':tdur,'rp_rs':np.nan}}
if prefix+'mask' in lc:
anommask=lc[prefix+'mask'][:]
else:
anommask=~np.isnan(lc[prefix+'flux'+suffix][:])
plmask=np.tile(False,len(anommask))
t_zero=np.nanmin(lc['time'])
SNR_last_planet=100;init_n_pl=len(planets);n_pl=len(planets);results=[]
while SNR_last_planet>multi_SNR_thresh and n_pl<(n_search_loops+init_n_pl):
if len(planets)>1:
planet_name=str(int(1+np.max([float(key) for key in planets]))).zfill(2)
else:
planet_name='00'
#Making model. Making sure lc is at 1.0 and in relatie flux, not ppt/ppm:
'''
if np.sum(plmask)>0:
#Re-doing flattening with other transits now masked (these might be causing
lc=tools.lcFlatten(lc,winsize=11*durmax,use_binned=use_binned,transit_mask=~plmask)
'''
modx = lc[prefix+'time']-t_zero
mody = lc[prefix+'flux'+suffix] * lc['flux_unit']+(1.0-np.nanmedian(lc[prefix+'flux'+suffix][anommask])*lc['flux_unit'])
#print(n_pl,len(mody),len(anommask),np.sum(anommask),len(plmask),np.sum(plmask))
if np.sum(plmask)>0:
mody[plmask] = mody[anommask][np.random.choice(np.sum(anommask),np.sum(plmask))][:]
#anommask *= tools.CutAnomDiff(mody)
#print(n_pl,"norm_mask:",np.sum(lc['mask']),"anoms:",np.sum(anommask),"pl mask",np.sum(plmask),"total len",len(anommask))
model = transitleastsquares(modx[anommask], mody[anommask])
results+=[model.power(period_min=1.1,period_max=p_max,duration_grid_step=1.0625,Rstar=Rs,Mstar=Ms,
use_threads=1,show_progress_bar=False, n_transits_min=3)]
#print(results[-1])
if 'FAP' in results[-1] and 'snr' in results[-1] and not np.isnan(results[-1]['snr']) and 'transit_times' in results[-1]:
#Defining transit times as those times where the SNR in transit is consistent with expectation (>-3sigma)
snr_per_trans_est=np.sqrt(np.sum(results[-1].snr_per_transit>0))
trans=np.array(results[-1]['transit_times'])[results[-1].snr_per_transit>snr_per_trans_est/2]
else:
trans=[]
phase_nr_trans=(lc[prefix+'time'][~plmask&anommask]-results[-1]['T0']-0.5*results[-1]['period'])%results[-1]['period']-0.5*results[-1]['period']
if 'FAP' in results[-1] and 'snr' in results[-1] and np.sum(abs(phase_nr_trans)<0.5*np.clip(results[-1]['duration'],0.2,2))>3:
plparams = QuickMonoFit(deepcopy(lc),results[-1]['T0'],np.clip(results[-1]['duration'],0.15,3),
init_period=results[-1]['period'],how='periodic',ndurs=4.5,Teff=Teff,Rs=Rs,Ms=Ms,
fluxindex=prefix+'flux'+suffix,mask=~plmask&anommask, **kwargs)
SNR= | np.max([plparams['snr'],results[-1].snr]) | numpy.max |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.