prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
#!/usr/bin/env python
import numpy as np
import sys
def outliers_modified_z_score(ys):
threshold = 4.5
median_y = np.median(ys)
median_absolute_deviation_y = np.median([
|
np.abs(y - median_y)
|
numpy.abs
|
import asyncio
import os
import wave
from unittest import TestCase
import av
import cv2
import numpy
from aiortc import AudioStreamTrack, VideoFrame, VideoStreamTrack
from aiortc.contrib.media import (MediaBlackhole, MediaPlayer, MediaRecorder,
video_frame_from_avframe,
video_frame_from_bgr, video_frame_from_gray,
video_frame_to_bgr)
from .utils import run
def create_audio(path, channels=1, sample_rate=8000, sample_width=2):
writer = wave.open(path, 'wb')
writer.setnchannels(channels)
writer.setframerate(sample_rate)
writer.setsampwidth(sample_width)
writer.writeframes(b'\x00' * sample_rate * sample_width * channels)
writer.close()
def create_video(path, width=640, height=480, fps=20, duration=1):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(path, fourcc, fps, (width, height))
frames = duration * fps
for i in range(frames):
s = i * 256 // frames
pixel = (s, 256 - s, (128 - 2 * s) % 256)
image = numpy.full((height, width, 3), pixel, numpy.uint8)
out.write(image)
out.release()
class MediaBlackholeTest(TestCase):
def test_audio(self):
recorder = MediaBlackhole()
recorder.addTrack(AudioStreamTrack())
recorder.start()
run(asyncio.sleep(2))
recorder.stop()
def test_audio_and_video(self):
recorder = MediaBlackhole()
recorder.addTrack(AudioStreamTrack())
recorder.start()
run(asyncio.sleep(2))
recorder.stop()
def test_video(self):
recorder = MediaBlackhole()
recorder.addTrack(VideoStreamTrack())
recorder.start()
run(asyncio.sleep(2))
recorder.stop()
class MediaPlayerTest(TestCase):
def setUp(self):
self.audio_path = os.path.join(os.path.dirname(__file__), 'test.wav')
create_audio(self.audio_path)
self.video_path = os.path.join(os.path.dirname(__file__), 'test.avi')
create_video(self.video_path)
def tearDown(self):
os.unlink(self.audio_path)
os.unlink(self.video_path)
def test_audio_file_8kHz(self):
player = MediaPlayer(path=self.audio_path)
# read all frames
player.start()
for i in range(49):
frame = run(player.audio.recv())
self.assertEqual(frame.channels, 1)
self.assertEqual(len(frame.data), 1920)
self.assertEqual(frame.sample_rate, 48000)
self.assertEqual(frame.sample_width, 2)
player.stop()
def test_audio_file_48kHz(self):
create_audio(self.audio_path, sample_rate=48000)
player = MediaPlayer(path=self.audio_path)
# read all frames
player.start()
for i in range(50):
frame = run(player.audio.recv())
self.assertEqual(frame.channels, 1)
self.assertEqual(len(frame.data), 1920)
self.assertEqual(frame.sample_rate, 48000)
self.assertEqual(frame.sample_width, 2)
player.stop()
def test_video_file(self):
player = MediaPlayer(path=self.video_path)
# read all frames
player.start()
for i in range(20):
frame = run(player.video.recv())
self.assertEqual(len(frame.data), 460800)
self.assertEqual(frame.width, 640)
self.assertEqual(frame.height, 480)
player.stop()
class MediaRecorderTest(TestCase):
def test_audio_mp3(self):
recorder = MediaRecorder(path='foo.mp3')
recorder.addTrack(AudioStreamTrack())
recorder.start()
run(asyncio.sleep(2))
recorder.stop()
def test_audio_wav(self):
recorder = MediaRecorder(path='foo.wav')
recorder.addTrack(AudioStreamTrack())
recorder.start()
run(asyncio.sleep(2))
recorder.stop()
def test_audio_and_video(self):
recorder = MediaRecorder(path='foo.mp4')
recorder.addTrack(AudioStreamTrack())
recorder.addTrack(VideoStreamTrack())
recorder.start()
run(asyncio.sleep(2))
recorder.stop()
def test_video(self):
recorder = MediaRecorder(path='foo.mp4')
recorder.addTrack(VideoStreamTrack())
recorder.start()
run(asyncio.sleep(2))
recorder.stop()
class VideoFrameTest(TestCase):
def test_video_frame_from_bgr(self):
image = numpy.full((480, 640, 3), (0, 0, 0), numpy.uint8)
frame = video_frame_from_bgr(image, timestamp=123)
self.assertEqual(len(frame.data), 460800)
self.assertEqual(frame.width, 640)
self.assertEqual(frame.height, 480)
self.assertEqual(frame.timestamp, 123)
def test_video_frame_from_gray(self):
image =
|
numpy.full((480, 640), 0, numpy.uint8)
|
numpy.full
|
from numpy.core import shape_base
import latte
import numpy as np
import pytest
from latte.functional.disentanglement import sap
class TestSAP:
def test_continuous_below_thresh(self):
z = np.zeros(shape=(16, 8))
z[:, 0] = np.arange(16, dtype=float)
a = np.arange(16, dtype=float)[:, None]
sap_score = sap.sap(z, a)
np.testing.assert_array_almost_equal(sap_score, [1.0])
def test_continuous_above_thresh(self):
z = np.zeros(shape=(16, 2))
z[:, 0] = np.arange(16, dtype=float)
z[:, 1] = [
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
]
a = np.arange(16, dtype=float)[:, None]
sap_score = sap.sap(z, a)
|
np.testing.assert_array_almost_equal(sap_score, [0.988235294])
|
numpy.testing.assert_array_almost_equal
|
'''
TFTest.py
This file contains unit tests for the TFANN module.
'''
import numpy as np
import tensorflow as tf
from TFANN import ANN, ANNC, ANNR, MLPB, MLPMC, RFSANNR
import traceback
import time
def RunTests(T):
f = True
for i, Ti in enumerate(T):
t1 = time.time()
try:
rv = Ti()
except Exception as e:
traceback.print_exc()
rv = False
t2 = time.time()
ANN.Reset()
f = rv and f
print('{:s} {:3d}:\t{:s}\t({:7.4f}s)'.format('Test ', i + 1, 'PASSED' if rv else 'FAILED', t2 - t1))
if f:
print('All tests passed!')
else:
print('Warning! At least one test failed!')
def T1():
'''
Tests basic functionality of ANNR
'''
A = np.random.rand(32, 4)
Y = np.random.rand(32, 1)
a = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16, name = 'mlpr1')
a.fit(A, Y)
S = a.score(A, Y)
if np.isnan(S):
return False
YH = a.predict(A)
if Y.shape != YH.shape:
return False
return True
def T2():
'''
Tests basic functionality of ANNC
'''
A = np.random.rand(32, 4)
Y = np.array((16 * [1]) + (16 * [0]))
a = ANNC([4], [('F', 4), ('AF', 'tanh'), ('F', 2)], maxIter = 16, name = 'mlpc2')
a.fit(A, Y)
S = a.score(A, Y)
if np.isnan(S):
return False
YH = a.predict(A)
if Y.shape != YH.shape:
return False
return True
def T3():
'''
Tests basic functionality of MLPB
'''
A = np.random.randint(0, 2, size = (32, 5))
Y = np.random.randint(0, 2, size = (32, 2))
a = MLPB([5], [('F', 4), ('AF', 'tanh'), ('F', 2), ('AF', 'tanh'), ('F', 2)], maxIter = 16, name = 'mlpb1')
a.fit(A, Y)
S = a.score(A, Y)
if np.isnan(S):
return False
YH = a.predict(A)
if Y.shape != YH.shape:
return False
return True
def T4():
'''
Tests basic functionality of RFSMLPB
'''
A = np.random.randint(0, 2, size = (32, 6))
Y = np.random.randint(0, 2, size = (32, 4, 6))
a = RFSMLPB([6, 6, 6, 6], maxIter = 12, name = 'rfsmlpb1')
a.fit(A, Y)
S = a.score(A, Y)
if np.isnan(S):
return False
YH = a.predict(A)
if Y.shape != YH.shape:
return False
return True
def T5():
'''
Tests basic functionality of MLPMC
'''
A = np.random.rand(33, 5)
Y = np.tile(['y', 'n', 'm'], 55).reshape(33, 5)
a = MLPMC([5], 5 * [[('F', 4), ('AF', 'tanh'), ('F', 3)]], maxIter = 12, name = 'mlpmc1')
a.fit(A, Y)
S = a.score(A, Y)
if np.isnan(S):
return False
YH = a.predict(A)
if Y.shape != YH.shape:
return False
return True
def T6():
'''
Tests basic functionality of CNNC
'''
A = np.random.rand(32, 9, 9, 3)
Y = np.array((16 * [1]) + (16 * [0]))
ws = [('C', [3, 3, 3, 4], [1, 1, 1, 1]), ('AF', 'relu'), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('F', 16), ('AF', 'relu'), ('F', 2)]
a = ANNC([9, 9, 3], ws, maxIter = 12, name = "cnnc1")
a.fit(A, Y)
S = a.score(A, Y)
if np.isnan(S):
return False
YH = a.predict(A)
if Y.shape != YH.shape:
return False
return True
def T7():
'''
Tests basic functionality of CNNC
'''
A =
|
np.random.rand(32, 9, 9, 3)
|
numpy.random.rand
|
import numpy as np
from pyspark import RDD
from pyspark import SparkContext
from pyspark.mllib.linalg.distributed import DistributedMatrix
__all__ = ['ColBlockMatrix', 'makeColBlockMatrix']
def _max_l2_norm_col(block, p):
"""
Find the column with the largest L2 norm in this `block`
and return it as a (norm, column) tuple.
:param block: A tuple of the form (index, np.array) as
in the RDD elements of a ColBlockMatrix.
:param p: An optional sparsity pattern which determines which
rows of this block to consider in computing the column
L2 norms. Format must be valid for numpy advanced array
indexing. If not provided, all rows used.
"""
matrix = np.array(block[1]) # TODO: why is this not already an np.array?
if p is not None:
matrix = matrix[p,:]
norms = np.linalg.norm(matrix, axis=0)
max_index = np.argmax(norms)
return (norms[max_index], matrix[:,max_index])
def _compare_l2_norms(col1, col2):
"""
Compares two tuples as returned by _max_l2_norm_col
returning the one with the larger L2 norm.
"""
return max(col1, col2, key = lambda x: x[0])
def _subtract_outer_from_block(block, colsPerBlock, left, right):
"""
Subtract the appropriate block of the outer product of
`left` and `right` from this `block`
:param block: A tuple of the form (index, np.array) as
in the RDD elements of a ColBlockMatrix.
:param colsPerBlock: The number of columns that we expect each
block to have (may not be true of last block).
:param left: A column vector with dimension equal to the
number of rows of this block.
:param right: A column vector with dimension greater than or equal
to the number of columns of this block.
"""
matrix = np.array(block[1])
first_col = block[0] * colsPerBlock
stop_col = first_col + matrix.shape[1]
sub_right = right[first_col:stop_col]
new_matrix = matrix - np.outer(left, sub_right)
return (block[0], new_matrix)
def _broadcast_np_array(ary):
"""
Take a numpy.array and broadcast it to all workers. If the array's size in
memory is greater than or equal to 2 GB, first split the array into
manageable chunks and return a list of broadcast variables.
:param ary: The numpy.array to broadcast.
"""
gbs = ary.nbytes / 2**30
sc = SparkContext.getOrCreate()
if gbs >= 2: ## Spark can only serialize objects of size less than 2048 MB
nBlocks = np.floor(gbs / 2) + 1 ## add 1 block to ensure below limit
colsPerBlock = int(np.floor(ary.shape[1] / nBlocks))
splits = range(colsPerBlock, ary.shape[1], colsPerBlock)
blocks = np.split(ary, splits, axis=1)
blocks = list(zip(range(len(blocks)), blocks))
return [sc.broadcast(el) for el in blocks]
else:
return sc.broadcast(ary)
def _reassemble_broadcasted_np_array(broadcast):
"""
Extract (and put back together) an np.array that was broadcasted (and split
up) via _broadcast_np_array.
:param broadcast: A single pyspark.Broadcast or list of them containing the
numpy.array.
"""
if isinstance(broadcast, list):
blocks = [b.value for b in broadcast]
blocks = sorted(blocks, key = lambda x: x[0])
blocks = [x[1] for x in blocks]
return np.hstack(blocks)
else:
return broadcast.value
def _unpersist_broadcasted_np_array(broadcast):
"""
Unpersist a single pyspark.Broadcast variable or a list of them.
:param broadcast: A single pyspark.Broadcast or list of them.
"""
if isinstance(broadcast, list):
[b.unpersist() for b in broadcast]
else:
broadcast.unpersist()
return None
def makeColBlockMatrix(matrix, colsPerBlock, cache=True):
"""
Take an in-memory matrix and make a distributed ColBlockMatrix.
:param matrix: The matrix to distribute.
:param colsPerBlock: The number of columns that make up each column block.
"""
sc = SparkContext.getOrCreate()
matrix = np.array(matrix)
splits = range(colsPerBlock, matrix.shape[1], colsPerBlock)
blocks = np.split(matrix, splits, axis=1)
blocks = sc.parallelize(zip(range(len(blocks)), blocks), len(blocks))
if cache:
return ColBlockMatrix(blocks, colsPerBlock).cache()
else:
return ColBlockMatrix(blocks, colsPerBlock)
class ColBlockMatrix(DistributedMatrix):
"""
Represents a distributed block matrix where each block contains
all of the rows and a small number of the columns. Heavily
influenced by pyspark.mllib.linalg.distributed.BlockMatrix.
However, note that there is much less error checking and
validation here.
:param colBlocks: An RDD of column blocks (blockColIndex, sub-matrix)
that form this distributed matrix. The sub-matrices
are numpy.ndarrays and should all have the same number
of rows colsPerBlock columns.
:param colsPerBlock: Number of columns that make up each block.
The blocks forming the final columns are not
required to have the given number of columns.
"""
def __init__(self, colBlocks, colsPerBlock):
if not isinstance(colBlocks, RDD):
raise TypeError("blocks should be an RDD of sub-matrix column "
"blocks as (int, matrix) tuples, got %s" %
type(colBlocks))
self.__colBlocks = colBlocks
self.__colsPerBlock = colsPerBlock
@property
def colBlocks(self):
"""
The RDD of sub-matrix column blocks.
"""
return self.__colBlocks
@property
def colsPerBlock(self):
"""
Number of columns that make up each column block.
"""
return self.__colsPerBlock
def leftMultiply(self, matrix, broadcast=True):
"""
Multiplies each block of this ColumnBlockMatrix by a numpy.array
on the left. The number of columns of `matrix` must be the same as
the number of rows of this ColumnBlockMatrix.
:param matrix: The numpy.array
:param broadcast: Whether or not to broadcast the numpy.array.
"""
colBlocks = self.colBlocks
if broadcast:
b = _broadcast_np_array(matrix)
newColBlocks = colBlocks.map(lambda x: (x[0], np.matmul(
_reassemble_broadcasted_np_array(b), x[1]))
)
_unpersist_broadcasted_np_array(b)
else:
newColBlocks = colBlocks.map(lambda x: (x[0], np.matmul(matrix, x[1])))
return ColBlockMatrix(newColBlocks, self.colsPerBlock)
def toLocalMatrix(self):
"""
Collect the distributed matrix on the driver as a numpy.array.
"""
colBlocks = sorted(self.colBlocks.collect(), key = lambda x: x[0])
colBlocks = [x[1] for x in colBlocks]
return
|
np.hstack(colBlocks)
|
numpy.hstack
|
import numpy as np
import pandas as pd
from pandas import DataFrame
from keras.models import load_model
from sklearn.metrics.pairwise import euclidean_distances
from scipy.spatial import distance
import random
import csv
class Environment():
#'''Approximate Environment model in which the agent operates'''
def __init__(self, grid_file,iBeacon_loc,labeled_data, runtime=5., init_pose=None):
self.init_pose = init_pose
self.grid = np.load(grid_file)
self.runtime = runtime
self.b_loc = np.load(iBeacon_loc)
#Data with 13 BLE values and the encoded columns and rows
self.data = pd.read_csv(labeled_data)
self.dt = 1 / 10.0 # Timestep
self.lower_bounds = np.array([0,0])
self.upper_bounds = np.array([self.grid.shape[0]-1,self.grid.shape[1]-1])
#self.env_model = load_model('model_weights/weights.environment.h5')
self.reset()
def reset(self):
#'''reset or initialize all the environment variables'''
self.time = 0.0
self.pose = np.array([2, 14]) if self.init_pose is None else np.copy(self.init_pose)
self.cols = self.grid.shape[1]
self.rows = self.grid.shape[0]
self.distance = 0
self.BLE_vals = self.calc_dis_BLE (self.pose)#self.calc_BLE (self.pose)
self.done = False
self.last_pose = np.array([2, 14]) if self.init_pose is None else np.copy(self.init_pose)
def deep_inferred_BLE(self,position):
#'''prediction of 13 iBeacon values for a given position based on deep neural network model'''
ph1 = []
ph1.clear()
ph1.append(position[1])
ph1.append(position[0])
for j in range(0,self.b_loc.shape[0]):
ph1.append(3 * distance.euclidean(np.array([position[1],position[0]]), self.b_loc[j]))#Column first!
x = np.array(ph1).reshape(-1,15)
prediction = self.env_model.predict(x)
for i in range(0,prediction.shape[1]):
if (prediction[0,i] < 0.25):
prediction[0,i] = 0
else:
prediction[0,i] = 1 - ((x[0,i+2])/24)
if (prediction[0,i] < 0):
prediction[0,i] = 0
return np.array(prediction)
def inferred_BLE(self,position):
#'''prediction of 13 iBeacon values for a given position based on mathematical model'''
ph2 = []
ph2.clear()
for j in range(0,self.b_loc.shape[0]):
ph2.append(3 * distance.euclidean(np.array([position[1],position[0]]), self.b_loc[j]))#Column first!
array = np.array(ph2)
array = array - array[np.argmin(array)]
min_index = np.argmin(array)
min_val = array[min_index]
array[min_index] = 1000
s_min_index = np.argmin(array)
s_min_val = array[s_min_index]
array[s_min_index] = 1000
if s_min_val > 5.5:
s_min_val = 0
t_min_index = np.argmin(array)
t_min_val = array[t_min_index]
if t_min_val > 5.5:
t_min_val = 0
result = np.zeros((array.shape))
result[min_index] = (1.1/np.exp(min_val*0.1/1))-(0.03*5)
if s_min_val > 0:
result[s_min_index] = (1.1/np.exp(s_min_val*0.1/1))-(0.03*5)
if t_min_val > 0:
result[t_min_index] = (1.1/np.exp(t_min_val*0.1/1))-(0.03*5)
return result
def calc_BLE (self,position):
#'''assign 13 iBeacon values for a given position'''
search = self.data[(self.data['col']==position[1]) & (self.data['row']==position[0])]
search_arr = search.values
if search_arr.shape[0] > 0:
rn = random.randint(0,search_arr.shape[0]-1)
return search_arr[rn,0:13]
else:
return self.inferred_BLE(position)
#return self.deep_inferred_BLE(position)
def calc_dis_BLE (self,position):
#'''calculate distance between a given position and the 13 iBeacon locations '''
ph2 = []
ph2.clear()
for j in range(0,self.b_loc.shape[0]):
ph2.append(3 * distance.euclidean(np.array([position[1],position[0]]), self.b_loc[j]))#Column first!
return np.array(ph2)
def next_timestep(self, direction):
# '''
#if direction == 0: #move east
# position = np.array([self.pose[0],(self.pose[1]+1)])
#elif direction == 1: #move south-east
# position = np.array([self.pose[0]+1,self.pose[1]+1])
#elif direction == 2: #move south
# position = np.array([self.pose[0]+1,self.pose[1]])
#elif direction == 3: #move south-west
# position = np.array([self.pose[0]+1,self.pose[1]-1])
#elif direction == 4: #move west
# position = np.array([self.pose[0],self.pose[1]-1])
#elif direction == 5: #move north-west
# position = np.array([self.pose[0]-1,self.pose[1]-1])
#elif direction == 6: #move north
# position = np.array([self.pose[0]-1,self.pose[1]])
#elif direction == 7: #move north-east
# position = np.array([self.pose[0]-1,self.pose[1]+1])
#else:
# position = self.pose
#'''
#change the position based on a given action (direction)
if direction == 0: #move east
position = np.array([self.pose[0],(self.pose[1]+1)])
elif direction == 1: #move south
position = np.array([self.pose[0]+1,self.pose[1]])
elif direction == 2: #move west
position = np.array([self.pose[0],self.pose[1]-1])
elif direction == 3: #move north
position =
|
np.array([self.pose[0]-1,self.pose[1]])
|
numpy.array
|
import pandas
import numpy
import csv
import sys
import event
event_fname = sys.argv[1]
sort_fname = sys.argv[2]
exp_data =
|
numpy.genfromtxt(event_fname, delimiter=',')
|
numpy.genfromtxt
|
from typing import Optional, Any
import numpy
from pyscf import __config__
from pyscf import lib as pyscf_lib
from pyscf.pbc.lib.kpts_helper import gamma_point
from pyscf.pbc.df import fft as pyscf_fft
from pyscfad import lib
from pyscfad.lib import numpy as jnp
from pyscfad.pbc import tools
from pyscfad.pbc.gto import Cell
def get_pp(mydf, kpts=None, cell=None):
'''Get the periodic pseudotential nuc-el AO matrix, with G=0 removed.
'''
from pyscf import gto
from pyscf.pbc.gto import pseudo
if cell is None:
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
mesh = mydf.mesh
SI = cell.get_SI()
Gv = cell.get_Gv(mesh)
vpplocG = pseudo.get_vlocG(cell, Gv)
vpplocG = -jnp.einsum('ij,ij->j', SI, vpplocG)
ngrids = len(vpplocG)
# vpploc evaluated in real-space
vpplocR = tools.ifft(vpplocG, mesh).real
vpp = [0] * len(kpts_lst)
for ao_ks_etc, p0, p1 in mydf.aoR_loop(mydf.grids, kpts_lst, cell=cell):
ao_ks = ao_ks_etc[0]
for k, ao in enumerate(ao_ks):
vpp[k] += jnp.dot(ao.T.conj()*vpplocR[p0:p1], ao)
ao = ao_ks = None
# vppnonloc evaluated in reciprocal space
fakemol = gto.Mole()
fakemol._atm = numpy.zeros((1,gto.ATM_SLOTS), dtype=numpy.int32)
fakemol._bas = numpy.zeros((1,gto.BAS_SLOTS), dtype=numpy.int32)
ptr = gto.PTR_ENV_START
fakemol._env = numpy.zeros(ptr+10)
fakemol._bas[0,gto.NPRIM_OF ] = 1
fakemol._bas[0,gto.NCTR_OF ] = 1
fakemol._bas[0,gto.PTR_EXP ] = ptr+3
fakemol._bas[0,gto.PTR_COEFF] = ptr+4
# buf for SPG_lmi upto l=0..3 and nl=3
buf = numpy.empty((48,ngrids), dtype=numpy.complex128)
def vppnl_by_k(kpt):
Gk = Gv + kpt
G_rad = pyscf_lib.norm(Gk, axis=1)
#aokG = ft_ao.ft_ao(cell, Gv, kpt=kpt) * (1/cell.vol)**.5
# use numerical fft for now
coords = mydf.grids.coords
aoR = cell.pbc_eval_gto('GTOval', coords, kpt=kpt)
assert numpy.prod(mesh) == len(coords) == ngrids
aokG = tools.fftk(aoR.T, mesh, numpy.exp(-1j*
|
numpy.dot(coords, kpt)
|
numpy.dot
|
"""
Testing for dataio.py in mmhelper module
"""
# FILE : test_io.py
# CREATED : 11/11/16 13:04:18
# AUTHOR : <NAME> <<EMAIL>>
# DESCRIPTION : Unittests for IO functions
#
import traceback
import unittest
import os
import tempfile
import numpy as np
import skimage.io as skio
import mmhelper.dataio as mio
class TestLoadData(unittest.TestCase):
"""
Class for testing the loading of data
"""
def setUp(self):
# Create a temporary file with test input data
# NB using high numbers to avoid annoying low contrast image
# warning from skimage.io
self.data = np.array([
[100, 200, 3],
[4, 5, 6],
[7, 8, 9],
[100, 200, 255],
], dtype='uint8')
self.filename = tempfile.mkstemp(".tif")[1]
skio.imsave(self.filename, self.data)
def test_load_data(self):
"""
Tests loading data
"""
np.testing.assert_array_equal(
mio.load_data(self.filename),
self.data)
def tearDown(self):
try:
os.remove(self.filename)
except BaseException:
print("WARNING: UNABLE TO REMOVE TEMPORARY TESTING FILE:")
print(self.filename)
print("DUE TO ERROR:")
traceback.print_exc()
print("MAKE SURE TO MANUALLY REMOVE THE FILE YOURSELF")
print("(OR LET YOUR SYSTEM DEAL WITH IT!)")
class TestSampleData(unittest.TestCase):
"""
Class for testing the loading of sample data
"""
def setUp(self):
self.default_shape = (200, 220)
self.dtype_wells = np.dtype(np.float64)
self.dtype_labs = np.dtype(np.uint16)
def test_load_sample_well_data(self):
"""
Tests the loading of the sample well data
"""
# The data itself is random, so let's just make sure
# we get arrays that look about right
wells, labs = mio.load_sample_well_data()
self.assertEqual(wells.shape, self.default_shape)
self.assertEqual(labs.shape, self.default_shape)
self.assertIs(wells.dtype, self.dtype_wells)
self.assertIs(labs.dtype, self.dtype_labs)
def tearDown(self):
pass
class TestFluoSplit(unittest.TestCase):
"""
Class for testing the splitting of fluorescence data
"""
def setUp(self):
self.default_shape = np.arange(24).reshape(2, 3, 4)
self.brightfield_image = np.arange(12).reshape(1, 3, 4)
self.fluo_image = np.arange(12, 24).reshape(1, 1, 3, 4)
def test(self):
"""
Tests the splitting of fluorescence data
"""
data, fluo_data = mio.split_fluorescence(
self.default_shape, num_fluo=1)
|
np.testing.assert_array_equal(data, self.brightfield_image)
|
numpy.testing.assert_array_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 18:02:18 2021
@author: jonas
This file implements a class that learns to play the game snake with a neural network. The class inherits from the Snake class (that's where the game logic is implemented).
"""
# Import the snake game logic
from snake import Snake, FORWARD, LEFT, RIGHT
# Import Pathlib for reading, writing files
from pathlib import Path
# Import random to do random walks
import random as r
# Store training data as json
import json
# Progress bar
from tqdm import tqdm
# Used for math and neural network
import numpy as np
class NeuralNetwork(Snake):
"""
This class inherits the game logic from the Snake class and trys to learn ply the game snake with a neural network
"""
def __init__(self, *args, **kwargs):
"""
Initialise the class and call the initialisation of the snake game class.
Parameters
----------
*args :
Passed to Snake class.
**kwargs :
Passed to Snake class.
Returns
-------
Instance of the NeuralNetwork class.
"""
# Initialise parent
super().__init__(*args, **kwargs)
# Initialise array. Will hold all game states of one game before they're written to file.
self.game_state_history = []
#
# >>> PREPROCESS GAME STATES
#
def reduce_gameState_dimensions(self, gameState):
"""
This method takes the state of a game as input and uses some math shit to create an array of numbers between 0 and 1. Each number describes one property of the game state (like how far the walls are away in every direction, ...)
TODO: DOCSTRING
Parameters
----------
gameState : TYPE
DESCRIPTION.
Returns
-------
None.
"""
#
# TODO:
# - The facial recognition stuff to determine where the body of the snake is placed (left or right?, forward or backward?, close or far away?)
# - repairing hamiltonean cycles
# - ...?
# DONE:
# - Relative distances (distance to walls, distance to body (can the body move out of the way in time?), distance to apple )
#
# Get some important stats about the game state
#
# Get the position of the snakes head
SNAKE_HEAD = np.array(gameState["snake_position"][0])
# Get the size of the board
BOARD_SIZE = np.array(gameState["BOARD_SIZE"])
# Get the direction the snake is currently facing
FORWARD_ABSOLUTE = SNAKE_HEAD - np.array(gameState["snake_position"][1])
# Rotate the direction the snake is currently facing by 90° clockwise
# This yields the vector facing to the right of the snake
RIGHT_ABSOLUTE = FORWARD_ABSOLUTE @ np.array([ [0, 1], [-1, 0] ])
# Get the transformation matrix to convert absolute normalised vectors into vectors relative to the snakes travelling direction
# This is done by writing the basis vectors of the new basis columnwise in the matrix.
ABSOLUTE_TO_RELATIVE_DIRECTION= np.array([ FORWARD_ABSOLUTE, RIGHT_ABSOLUTE ]).T
# Get the normalised position of the snake's head
# Normalise with the board size. This way the neural net knows where the walls are. If one component is either 0 or 1, it hot a wall.
snakeHeadX, snakeHeadY = SNAKE_HEAD / BOARD_SIZE
#
# >>> DISTANCE TO WALL OR BODY
#
# Get the distance to the nearest part of the snake body
# If the body is not in the way, use the distance to the wall
# Get the relative and normalised position of the snakes body and view them relative to the direction of travel
relativeSnakeBody = [ ( ( SNAKE_HEAD - np.array(body) ) / BOARD_SIZE ) @ ABSOLUTE_TO_RELATIVE_DIRECTION
for body in gameState["snake_position"] ]
# Make a list of the points on the wall the snake can run into, when walking along a straight line (relative positions).
relativeWallPosition = [ np.array(0 , snakeHeadY),
np.array(snakeHeadX, 0 ),
np.array(1 , snakeHeadY),
np.array(snakeHeadX, 1 ) ]
# Make a list of all things that the snake might run into (relative positions of the snake body and the walls)
relativeSnakeObstacles = relativeSnakeBody + relativeWallPosition
# Get the distance to the nearest obstacle in the directions FORWARD, RIGHT and LEFT
# Get all distances to the the obstacles in front of the snake
relativeDistanceObstacleForward = [ # Get eucledean distance (Because of relative directions the second component is 0.)
pos[0]
# Loop over all obstacles
for pos in relativeSnakeObstacles
# Use only obstacles that are in the FORWARD direction ([1,0]).
if ( np.linalg.norm(pos) == np.array([1,0]) ).all() ]
# Get the smallest distance in front of the snake
relativeDistanceObstacleForward = min(relativeDistanceObstacleForward)
# Get all distances to the the obstacles to the right of the snake
relativeDistanceObstacleRight = [ # Get eucledean distance (Because of relative directions the first component is 0.)
pos[1]
# Loop over all obstacles
for pos in relativeSnakeObstacles
# Use only obstacles that are in the RIGHT direction ([0,1]).
if ( np.linalg.norm(pos) == np.array([0,1]) ).all() ]
# Get the smallest distance to the right
relativeDistanceObstacleRight = min(relativeDistanceObstacleRight)
# Get all distances to the the obstacles to the right of the snake
relativeDistanceObstacleLeft = [ # Get eucledean distance (Because of relative directions the first component is 0 and the second is always negative.)
-pos[1]
# Loop over all obstacles
for pos in relativeSnakeObstacles
# Use only obstacles that are in the LEFT direction ([0,-1]).
if ( np.linalg.norm(pos) == np.array([0,-1]) ).all() ]
# Get the smallest distance to the left
relativeDistanceObstacleLeft = min(relativeDistanceObstacleLeft)
#
# <<< DISTANCE TO WALL OR BODY DONE
#
# Get the normalised difference vector from the snakes head to the apple. This way the snake knows where the apple is and how far away it is.
absoluteDirectionApple = ( np.array(gameState["apple_position"]) - SNAKE_HEAD ) / BOARD_SIZE
# Transform the absolute vector into a vector relative to the snakes travelling direction.
directionAppleX, directionAppleY = absoluteDirectionApple @ ABSOLUTE_TO_RELATIVE_DIRECTION
# Get the normalised distance from the snake's head to the apple
distanceApple = np.sqrt(directionAppleX**2 + directionAppleY**2)
# Return 1D numpy array to describe the state of the game (with reduced dimensions)
return np.array([ directionAppleX, directionAppleY, distanceApple,
relativeDistanceObstacleForward, relativeDistanceObstacleRight, relativeDistanceObstacleLeft
])
def evaluate_action(self, gameState):
"""
This function takes in the current gameState and the planned action (the action is part of the gameState dictionary) to create a score. This score determines how good the move was.
TODO: DOCSTRING
Parameters
----------
gameState : TYPE
DESCRIPTION.
Returns
-------
None.
"""
# This variable contains the value of this move. The function will change the variable, depending on how good the move is and return the value.
GAMESCORE = 0.1
# TODO: Predict the next gameState by executing the action.
# Introduce first metric of success. Just check if the snake is getting closer to the apple
#
# 1. Get currenct distance to apple
#
# Get the vector from the snakes head to the apple
directionToApple = np.array(gameState["apple_position"]) - np.array(gameState["snake_position"][0])
# Get the current distance between the snakes head and the apple
currentDistanceToApple = np.sqrt(sum(directionToApple**2))
#
# 2. Get future distance to apple
# Predict the distance from the snakes head to the apple after the action
#
# Get the direction the snake is currently facing
current_direction = np.array(gameState["snake_position"][0]) - np.array(gameState["snake_position"][1])
# CONVERT RELATIVE ACTION (LEFT; RIGHT; FORWARD) TO ABSOLUTE DIRECTIONS (VECTOR)
# Define a rotation angle based on the relative direction (LEFT=-90°, FORWARD=0°, RIGHT=+90°)
rotation_angle = gameState["next_action"]*np.pi/2
# Rotate the current absolute direction with a rotation matrix. Matrix will rotate the current_direction by rotation_angle radians.
absoluteAction = current_direction @ np.array([ [ int(np.cos(rotation_angle)), int(
|
np.sin(rotation_angle)
|
numpy.sin
|
"""
.. module:: sampler
:synopsis: Generic sampler
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <>
This module defines one key function, :func:`run`, that distributes the work to
the desired actual sampler (Metropolis Hastings, MultiNest, or PolyChord so far).
It also defines a serie of helper functions, that aim to be generically used by
all different sampler methods:
* :func:`get_covariance_matrix`
* :func:`read_args_from_chain`
* :func:`read_args_from_bestfit`
* :func:`accept_step`
* :func:`compute_lkl`
"""
from __future__ import print_function
import numpy as np
import sys
import warnings
import io_mp
from io_mp import dictitems,dictvalues,dictkeys
import os
import scipy.linalg as la
import scipy.optimize as op
def run(cosmo, data, command_line):
"""
Depending on the choice of sampler, dispatch the appropriate information
The :mod:`mcmc` module is used as previously, except the call to
:func:`mcmc.chain`, or :func:`MultiNest.run` is now within
this function, instead of from within :mod:`MontePython`.
In the long term, this function should contain any potential hybrid scheme.
"""
if command_line.method == 'MH':
import mcmc
mcmc.chain(cosmo, data, command_line)
data.out.close()
elif command_line.method == 'NS':
import MultiNest as mn
mn.run(cosmo, data, command_line)
elif command_line.method == 'PC':
import PolyChord as pc
pc.run(cosmo, data, command_line)
elif command_line.method == 'CH':
import cosmo_hammer as hammer
hammer.run(cosmo, data, command_line)
elif command_line.method == 'IS':
import importance_sampling as ims
ims.run(cosmo, data, command_line)
elif command_line.method == 'Der':
import add_derived as der
der.run(cosmo, data, command_line)
elif command_line.method == 'Fisher':
command_line.fisher = True
import mcmc
mcmc.chain(cosmo, data, command_line)
try:
data.out.close()
except AttributeError:
pass
else:
raise io_mp.ConfigurationError(
"Sampling method %s not understood" % command_line.method)
def read_args_from_chain(data, chain):
"""
Pick up the last accepted values from an input chain as a starting point
Function used only when the restart flag is set. It will simply read the
last line of an input chain, using the tail command from the extended
:class:`io_mp.File` class.
.. warning::
That method was not tested since the adding of derived parameters. The
method :func:`read_args_from_bestfit` is the prefered one.
.. warning::
This method works because of the particular presentation of the chain,
and the use of tabbings (not spaces). Please keep this in mind if you
are having difficulties
Parameters
----------
chain : str
Name of the input chain provided with the command line.
"""
chain_file = io_mp.File(chain, 'r')
parameter_names = data.get_mcmc_parameters(['varying'])
commented_line = 0
success = 0
# Test if last chain entry contains a step or a commented line
while not success:
if chain_file.tail(1)[0].decode(encoding='utf-8').split('\t')[0] == '#':
commented_line += 1
else:
success += 1
if commented_line == 1000:
raise ConfigurationError('Error loading chains files. '
'Last 1000 entries of a chain are commented')
i = 1
for elem in parameter_names:
#data.mcmc_parameters[elem]['last_accepted'] = float(
# chain_file.tail(1)[0].split('\t')[i])
data.mcmc_parameters[elem]['last_accepted'] = float(
chain_file.tail(commented_line+1)[commented_line].decode(encoding='utf-8').split('\t')[i])
i += 1
def read_args_from_bestfit(data, bestfit):
"""
Deduce the starting point either from the input file, or from a best fit
file.
Parameters
----------
bestfit : str
Name of the bestfit file from the command line.
"""
parameter_names = data.get_mcmc_parameters(['varying'])
bestfit_file = open(bestfit, 'r')
for line in bestfit_file:
if line.find('#') != -1:
bestfit_names = line.strip('#').replace(' ', '').\
replace('\n', '').split(',')
bestfit_values = np.zeros(len(bestfit_names), 'float64')
else:
line = line.split()
for i in range(len(line)):
bestfit_values[i] = line[i]
print
print('\nStarting point for rescaled parameters:')
for elem in parameter_names:
if elem in bestfit_names:
data.mcmc_parameters[elem]['last_accepted'] = \
bestfit_values[bestfit_names.index(elem)] / \
data.mcmc_parameters[elem]['scale']
print('from best-fit file : ', elem, ' = ')
print(bestfit_values[bestfit_names.index(elem)] / \
data.mcmc_parameters[elem]['scale'])
else:
data.mcmc_parameters[elem]['last_accepted'] = \
data.mcmc_parameters[elem]['initial'][0]
print('from input file : ', elem, ' = ')
print(data.mcmc_parameters[elem]['initial'][0])
def get_covariance_matrix(cosmo, data, command_line):
"""
Compute the covariance matrix, from an input file or from an existing
matrix.
Reordering of the names and scaling take place here, in a serie of
potentially hard to read methods. For the sake of clarity, and to avoid
confusions, the code will, by default, print out a succession of 4
covariance matrices at the beginning of the run, if starting from an
existing one. This way, you can control that the paramters are set
properly.
.. note::
The set of parameters from the run need not to be the exact same
set of parameters from the existing covariance matrix (not even the
ordering). Missing parameter from the existing covariance matrix will
use the sigma given as an input.
"""
# Setting numpy options in terms of precision (useful when writing to files
# or displaying a result, but does not affect the precision of the
# computation).
np.set_printoptions(precision=2, linewidth=150)
parameter_names = data.get_mcmc_parameters(['varying'])
# Define quiet setting if not previously defined
try:
command_line.quiet
except:
command_line.quiet = False
# if the user provides a .covmat file or if user asks to compute a fisher matrix
if command_line.cov is not None:
cov = open('{0}'.format(command_line.cov), 'r')
i = 0
for line in cov:
if line.find('#') != -1:
# Extract the names from the first line
covnames = line.strip('#').replace(' ', '').\
replace('\n', '').split(',')
# Initialize the matrices
matrix = np.zeros((len(covnames), len(covnames)), 'float64')
rot = np.zeros((len(covnames), len(covnames)))
else:
line = line.split()
for j in range(len(line)):
matrix[i][j] = np.array(line[j], 'float64')
i += 1
# First print out
if not command_line.silent and not command_line.quiet:
print('\nInput covariance matrix:')
print(covnames)
print(matrix)
# Deal with the all problematic cases.
# First, adjust the scales between stored parameters and the ones used
# in mcmc
scales = []
for elem in covnames:
if elem in parameter_names:
scales.append(data.mcmc_parameters[elem]['scale'])
else:
scales.append(1)
scales = np.diag(scales)
# Compute the inverse matrix, and assert that the computation was
# precise enough, by comparing the product to the identity matrix.
invscales = np.linalg.inv(scales)
np.testing.assert_array_almost_equal(
np.dot(scales, invscales), np.eye(np.shape(scales)[0]),
decimal=5)
# Apply the newly computed scales to the input matrix
matrix = np.dot(invscales.T, np.dot(matrix, invscales))
# Second print out, after having applied the scale factors
if not command_line.silent and not command_line.quiet:
print('\nFirst treatment (scaling)')
print(covnames)
print(matrix)
# Rotate matrix for the parameters to be well ordered, even if some
# names are missing or some are in extra.
# First, store the parameter names in temp_names that also appear in
# the covariance matrix, in the right ordering for the code (might be
# different from the input matri)
temp_names = [elem for elem in parameter_names if elem in covnames]
# If parameter_names contains less things than covnames, we will do a
# small trick. Create a second temporary array, temp_names_2, that will
# have the same dimension as covnames, and containing:
# - the elements of temp_names, in the order of parameter_names (h
# index)
# - an empty string '' for the remaining unused parameters
temp_names_2 = []
h = 0
not_in = [elem for elem in covnames if elem not in temp_names]
for k in range(len(covnames)):
if covnames[k] not in not_in:
temp_names_2.append(temp_names[h])
h += 1
else:
temp_names_2.append('')
# Create the rotation matrix, that will put the covariance matrix in
# the right order, and also assign zeros to the unused parameters from
# the input. These empty columns will be removed in the next step.
for k in range(len(covnames)):
for h in range(len(covnames)):
try:
if covnames[k] == temp_names_2[h]:
rot[h][k] = 1.
else:
rot[h][k] = 0.
except IndexError:
# The IndexError exception means that we are dealing with
# an unused parameter. By enforcing the corresponding
# rotation matrix element to 0, the resulting matrix will
# still have the same size as the original, but with zeros
# on the unused lines.
rot[h][k] = 0.
matrix = np.dot(rot, np.dot(matrix, np.transpose(rot)))
# Third print out
if not command_line.silent and not command_line.quiet:
print('\nSecond treatment (partial reordering and cleaning)')
print(temp_names_2)
print(matrix)
# Final step, creating a temporary matrix, filled with 1, that will
# eventually contain the result.
matrix_temp = np.ones((len(parameter_names),
len(parameter_names)), 'float64')
indices_final = np.zeros(len(parameter_names))
indices_initial = np.zeros(len(covnames))
# Remove names that are in parameter names but not in covnames, and
# set to zero the corresponding columns of the final result.
for k in range(len(parameter_names)):
if parameter_names[k] in covnames:
indices_final[k] = 1
for zeros in np.where(indices_final == 0)[0]:
matrix_temp[zeros, :] = 0
matrix_temp[:, zeros] = 0
# Remove names that are in covnames but not in param_names
for h in range(len(covnames)):
if covnames[h] in parameter_names:
indices_initial[h] = 1
# There, put a place holder number (we are using a pure imaginary
# number: i, to avoid any problem) in the initial matrix, so that the
# next step only copy the interesting part of the input to the final
# matrix.
max_value = np.finfo(np.float64).max
for zeros in np.where(indices_initial == 0)[0]:
matrix[zeros, :] = [max_value for _ in range(
len(matrix[zeros, :]))]
matrix[:, zeros] = [max_value for _ in range(
len(matrix[:, zeros]))]
# Now put in the temporary matrix, where the 1 were, the interesting
# quantities from the input (the one that are not equal to i).
matrix_temp[matrix_temp == 1] = matrix[matrix != max_value]
matrix = np.copy(matrix_temp)
# on all other lines, that contain 0, just use sigma^2
for zeros in np.where(indices_final == 0)[0]:
matrix[zeros, zeros] = np.array(
data.mcmc_parameters[parameter_names[zeros]]['initial'][3],
'float64')**2
# else, take sigmas^2.
else:
matrix = np.identity(len(parameter_names), 'float64')
for index, elem in enumerate(parameter_names):
matrix[index][index] = np.array(
data.mcmc_parameters[elem]['initial'][3], 'float64')**2
# Final print out, the actually used covariance matrix
if not command_line.silent and not command_line.quiet:
print('\nDeduced starting covariance matrix:\n')
print(parameter_names)
print(matrix)
#inverse, and diagonalization
eigv, eigV = np.linalg.eig(np.linalg.inv(matrix))
#if command_line.start_from_fisher:
# command_line.fisher = True
#if command_line.fisher:
# eigv, eigV, matrix = get_fisher_matrix(cosmo, data, command_line, matrix)
return eigv, eigV, matrix
def get_minimum(cosmo, data, command_line, covmat):
if not command_line.silent:
warnings.warn("Minimization implementation is being tested")
# Create the center dictionary, which will hold the center point
# information
center = {}
parameter_names = data.get_mcmc_parameters(['varying'])
if not command_line.bf:
for elem in parameter_names:
center[elem] = data.mcmc_parameters[elem]['initial'][0]
else:
read_args_from_bestfit(data, command_line.bf)
for elem in parameter_names:
center[elem] = data.mcmc_parameters[elem]['last_accepted']
stepsizes = np.zeros(len(parameter_names), 'float64')
parameters = np.zeros(len(parameter_names), 'float64')
bounds = np.zeros([len(parameter_names),2], 'float64')
cons = ()
for index, elem in enumerate(parameter_names):
parameters[index] = center[elem]
stepsizes[index] = 0.1*covmat[index,index]**0.5
if data.mcmc_parameters[elem]['initial'][1] == None:
bounds[index,0] = center[elem] - 1.*covmat[index,index]**0.5
else:
bounds[index,0] = data.mcmc_parameters[elem]['initial'][1]
if data.mcmc_parameters[elem]['initial'][2] == None:
bounds[index,1] = center[elem] + 1.*covmat[index,index]**0.5
else:
bounds[index,1] = data.mcmc_parameters[elem]['initial'][2]
cons += ({'type': 'ineq', 'fun': lambda x: x[index] - bounds[index,0]},
{'type': 'ineq', 'fun': lambda x: bounds[index,1] - x[index]},)
print('bounds on ',elem,' : ',bounds[index,0],bounds[index,1])
#FK: use list-comprehension so that the parameter values are distinguishable from step to step
print('parameters: ',[param for param in parameters])
print('stepsizes: ',stepsizes[0])
print('bounds: ',bounds)
#minimum, chi2 = op.fmin_cg(chi2_eff,
# Use unconstrained Polak & Ribiere conjugate gradient algorithm
# CosmoMC uses a constrained (Fletcher & Reeves) version of this
#xopt, fopt, func_calls, grad_calls, warnflags, allvecs = op.fmin_cg(chi2_eff,
# parameters,
# #fprime = gradient_chi2_eff,
# epsilon = stepsizes,
# args = (cosmo,data),#bounds),
# full_output = True,
# disp = True,
# retall = True)
# Use constrained Newton conjugate gradient algorithm
#x, nfeval, rc = op.fmin_tnc(chi2_eff,
# parameters,
# #fprime = gradient_chi2_eff,
# args = (cosmo,data),
# approx_grad = True,
# bounds = bounds,
# epsilon = stepsizes,
# disp = 5)
#result = op.minimize(chi2_eff,
# parameters,
# args = (cosmo,data),
# method='COBYLA',
# #method='SLSQP',
# constraints=cons,
# #bounds=bounds,
# tol=0.000001,
# options = {'disp': True,
# 'rhobeg': stepsizes})
# #'eps': stepsizes})
# For HST with 1 param the best is TNC with 'eps':stepsizes, bounds, tol, although bounds make it smlower (but avoids htting unphysical region)
# For forecasts or Planck lite SLSQP with tol=0.00001 works well, but does not work for full Planck TTTEEE highl
result = op.minimize(chi2_eff,
parameters,
args = (cosmo,data),
#method='trust-region-exact',
#method='BFGS',
#method='TNC',
#method='L-BFGS-B',
method='SLSQP',
#options={'eps':stepsizes},
#constraints=cons,
bounds=bounds,
tol=command_line.minimize_tol)
#options = {'disp': True})
#'initial_tr_radius': stepsizes,
#'max_tr_radius': stepsizes})
#result = op.differential_evolution(chi2_eff,
# bounds,
# args = (cosmo,data))
print('Final output of minimize')
for index,elem in enumerate(parameter_names):
print(elem, 'new:', result.x[index], ', old:', parameters[index])
#FK: return also min chi^2:
return result.x, result.fun
def chi2_eff(params, cosmo, data, bounds=False):
parameter_names = data.get_mcmc_parameters(['varying'])
for index, elem in enumerate(parameter_names):
#print(elem,params[index])
data.mcmc_parameters[elem]['current'] = params[index]
if not type(bounds) == type(False):
if (params[index] < bounds[index,0]) or (params[index] > bounds[index,1]):
chi2 = 1e30
print(elem+' exceeds bounds with value %f and bounds %f < x < %f' %(params[index],bounds[index,0],bounds[index,1]))
return chi2
# Update current parameters to the new parameters, only taking steps as requested
data.update_cosmo_arguments()
# Compute loglike value for the new parameters
chi2 = -2.*compute_lkl(cosmo, data)
#FK: use list-comprehension so that the parameter values are distinguishable from step to step
print('In minimization: ',chi2,' at ',[param for param in params])
return chi2
def gradient_chi2_eff(params, cosmo, data, bounds=False):
parameter_names = data.get_mcmc_parameters(['varying'])
for index, elem in enumerate(parameter_names):
data.mcmc_parameters[elem]['current'] = params[index]
# Update current parameters to the new parameters, only taking steps as requested
data.update_cosmo_arguments()
# Compute loglike value for the new parameters
chi2 = -2.*compute_lkl(cosmo, data)
# Initialise the gradient field
gradient = np.zeros(len(parameter_names), 'float64')
for index, elem in enumerate(parameter_names):
dx = 0.01*params[index]
#
data.mcmc_parameters[elem]['current'] += dx
data.update_cosmo_arguments()
chi2_plus = -2.*compute_lkl(cosmo, data)
#
data.mcmc_parameters[elem]['current'] -= 2.*dx
data.update_cosmo_arguments()
chi2_minus = -2.*compute_lkl(cosmo, data)
#
gradient[index] = (chi2_plus - chi2_minus)/2./dx
#
data.mcmc_parameters[elem]['current'] += dx
return gradient
def get_fisher_matrix(cosmo, data, command_line, inv_fisher_matrix, minimum=0):
# Fisher matrix method by <NAME>
# Contributions from <NAME>, <NAME>
# We will work out the fisher matrix for all the parameters and
# write it and its inverse to a file
# Pass input to data structure
data.fisher_step_it = int(command_line.fisher_step_it)
data.fisher_delta = command_line.fisher_delta
data.fisher_tol = command_line.fisher_tol
data.fisher_sym_lkl = command_line.fisher_sym_lkl
# Whether to use symmetric step. Symmetric step is recommended, as it makes the
# computation faster and in most cases is sufficient.
data.use_symmetric_step = True
if command_line.fisher_asymmetric:
data.use_symmetric_step = False
if not command_line.silent:
warnings.warn("Fisher implementation is being tested")
# Create the center dictionary, which will hold the center point information
center = {}
parameter_names = data.get_mcmc_parameters(['varying'])
if not type(minimum) == int:
for index, elem in enumerate(parameter_names):
center[elem] = minimum[index]
elif not command_line.bf:
for elem in parameter_names:
center[elem] = data.mcmc_parameters[elem]['initial'][0]
else:
read_args_from_bestfit(data, command_line.bf)
for elem in parameter_names:
center[elem] = data.mcmc_parameters[elem]['last_accepted']
scales = np.zeros((len(parameter_names)))
invscales = np.zeros((len(parameter_names)))
for index, elem in enumerate(parameter_names):
data.mcmc_parameters[elem]['current'] = center[elem]
scales[index] = data.mcmc_parameters[elem]['scale']
invscales[index] = 1./data.mcmc_parameters[elem]['scale']
# Load stepsize from input covmat or covmat generated from param file
# JL TODO: check this, and try another scheme to be sure that index and elem refer to the same params in the same order
# here the stepsizes are for the scaled parameter (e.g. 100*omega_b)
stepsize = np.zeros([len(parameter_names),3])
for index in range(len(parameter_names)):
stepsize[index,0] = -(inv_fisher_matrix[index][index])**0.5
stepsize[index,1] = (inv_fisher_matrix[index][index])**0.5
# Adjust stepsize in case step exceeds boundary
stepsize = adjust_fisher_bounds(data,command_line,center,stepsize)
fisher_iteration = 0
fisher_status = 0
while fisher_iteration < data.fisher_step_it and not fisher_status:
fisher_iteration += 1
# Compute the Fisher matrix and the gradient array at the center point.
print("Compute Fisher [iteration %d/%d] with following stepsizes for scaled parameters:" % (fisher_iteration,data.fisher_step_it))
for index in range(len(parameter_names)):
print("%s : diagonal element = %e" % (parameter_names[index],inv_fisher_matrix[index,index]))
# For rotating the step array, not implemented
step_matrix = np.identity(len(parameter_names), dtype='float64')
# Compute fisher matrix
fisher_matrix, gradient = compute_fisher(data, command_line, cosmo, center, stepsize, step_matrix)
# If we want to rotate back to the cosmological parameter basis
if not command_line.silent:
print("Fisher matrix computed [iteration %d/%d]" % (fisher_iteration,data.fisher_step_it))
# Compute inverse of the fisher matrix, catch LinAlgError exception
try:
inv_fisher_matrix =
|
np.linalg.inv(fisher_matrix)
|
numpy.linalg.inv
|
import unittest.mock as mock
import pytest
import numpy as np
from smqtk_descriptors.interfaces.image_descriptor_generator import ImageDescriptorGenerator
from xaitk_saliency import GenerateImageSimilarityBlackboxSaliency
from xaitk_saliency.exceptions import ShapeMismatchError, UnexpectedDimensionsError
def test_generate_checks_success() -> None:
"""
Test successful passage through the wrapper method.
"""
m_impl = mock.Mock(spec=GenerateImageSimilarityBlackboxSaliency)
# test images
test_ref_image = np.empty((50, 50))
test_query_image = np.empty((256, 256, 7))
# mock _generate result with matching height and width to query image
exp_res = np.ones((256, 256))
m_impl._generate.return_value = exp_res
# mock image descriptor generator
m_desc_generator = mock.Mock(spec=ImageDescriptorGenerator)
res = GenerateImageSimilarityBlackboxSaliency.generate(
m_impl,
test_ref_image,
test_query_image,
m_desc_generator
)
m_impl._generate.assert_called_once_with(
test_ref_image,
test_query_image,
m_desc_generator
)
assert np.array_equal(res, exp_res)
def test_generate_checks_image_shape() -> None:
"""
Test that the input images conform to our assumption.
"""
m_impl = mock.Mock(spec=GenerateImageSimilarityBlackboxSaliency)
m_desc_generator = mock.Mock(spec=ImageDescriptorGenerator)
# bad ref image
test_ref_image = np.empty((256,))
test_query_image =
|
np.empty((256, 400, 3))
|
numpy.empty
|
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Run test:
>> pytest -q python/test/test_table_properties.py
"""
import operator
import os
from pandas import DataFrame
from pycylon import CylonContext
from pycylon import Table
from pycylon.io import CSVReadOptions
from pycylon.io import read_csv
import pycylon as cn
import pyarrow as pa
import numpy as np
import pandas as pd
'''
Run test:
>> pytest -q python/test/test_table_properties.py
'''
'''
Test Cases for Comparison Operations
-------------------------------------
Comparison on DataFrame
------------------------
Case 1: Compare based on a column (each value in a column is checked against the comparison
value)
Case 2: Compare based on the whole table (all values in the table is checked against the
comparison value)
Comparison Operators
--------------------
1. == -> operator.__eq__
2. != -> operator.__ne__
3. < -> operator.__lt__
4. > -> operator.__gt__
5. <= -> operator.__le__
6. >= -> operator.__ge__
'''
def test_properties():
ctx: CylonContext = CylonContext(config=None, distributed=False)
table1_path = '/tmp/user_usage_tm_1.csv'
table2_path = '/tmp/user_usage_tm_2.csv'
assert os.path.exists(table1_path) and os.path.exists(table2_path)
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30)
tb: Table = read_csv(ctx, table1_path, csv_read_options)
pdf = tb.to_pandas()
def generate_filter_and_result(op, column: str, input, comparison_value):
if column:
filter = op(input[column], comparison_value)
return filter, input[filter]
else:
filter = op(input, comparison_value)
return filter, input[filter]
def do_comparison_on_pdf_and_tb(tb_filter: Table, tb_result: Table, pdf_filter: DataFrame,
pdf_result: DataFrame, is_full_table):
if is_full_table:
assert tb_filter.to_pandas().values.tolist() == pdf_filter.values.tolist()
assert tb_result.to_pandas().fillna(0).values.tolist() == pdf_result.fillna(
0).values.tolist()
else:
assert tb_filter.to_pandas().values.flatten().tolist() == pdf_filter.values.tolist()
assert tb_result.to_pandas().values.tolist() == pdf_result.values.tolist()
ops = [operator.__eq__, operator.__ne__, operator.__lt__, operator.__gt__, operator.__le__,
operator.__ge__]
value = 519.12
columns = ['monthly_mb', None]
is_full_table_flags = [False, True]
for column, is_full_table in zip(columns, is_full_table_flags):
for op in ops:
tb_filter_all, tb_filter_all_result = generate_filter_and_result(op, column, tb, value)
pdf_filter_all, pdf_filter_all_result = generate_filter_and_result(op, column, pdf,
value)
do_comparison_on_pdf_and_tb(tb_filter=tb_filter_all, tb_result=tb_filter_all_result,
pdf_filter=pdf_filter_all, pdf_result=pdf_filter_all_result,
is_full_table=is_full_table)
def test_string_type_filters():
ctx: CylonContext = CylonContext()
tb: Table = Table.from_pydict(ctx, {"A": ['a', 'b', 'c', 'ab', 'a'],
"B": [1, 2, 3, 4, 5]})
pdf = tb.to_pandas()
def generate_filter_and_result(op, column: str, input, comparison_value):
if column:
filter = op(input[column], comparison_value)
return filter, input[filter]
else:
filter = op(input, comparison_value)
return filter, input[filter]
def do_comparison_on_pdf_and_tb(tb_filter: Table, tb_result: Table, pdf_filter: DataFrame,
pdf_result: DataFrame, is_full_table):
if is_full_table:
assert tb_filter.to_pandas().values.tolist() == pdf_filter.values.tolist()
assert tb_result.to_pandas().fillna(0).values.tolist() == pdf_result.fillna(
0).values.tolist()
else:
assert tb_filter.to_pandas().values.flatten().tolist() == pdf_filter.values.tolist()
assert tb_result.to_pandas().values.tolist() == pdf_result.values.tolist()
ops = [operator.__eq__, operator.__ne__, operator.__lt__, operator.__gt__, operator.__le__,
operator.__ge__]
value = "a"
columns = ["A"]
is_full_table_flags = [False]
for column, is_full_table in zip(columns, is_full_table_flags):
for op in ops:
tb_filter_all, tb_filter_all_result = generate_filter_and_result(op, column, tb, value)
pdf_filter_all, pdf_filter_all_result = generate_filter_and_result(op, column, pdf,
value)
do_comparison_on_pdf_and_tb(tb_filter=tb_filter_all, tb_result=tb_filter_all_result,
pdf_filter=pdf_filter_all, pdf_result=pdf_filter_all_result,
is_full_table=is_full_table)
def test_filter():
ctx: CylonContext = CylonContext(config=None, distributed=False)
table1_path = '/tmp/user_usage_tm_1.csv'
table2_path = '/tmp/user_usage_tm_2.csv'
assert os.path.exists(table1_path) and os.path.exists(table2_path)
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30)
tb: Table = read_csv(ctx, table1_path, csv_read_options)
column_name = 'monthly_mb'
ops = [operator.__or__, operator.__and__]
or_limits = [600, 5000, 15000]
and_limits = [0, 5000, 1000]
comp_op_or = [operator.__gt__, operator.__le__, operator.__gt__]
comp_op_and = [operator.__gt__, operator.__le__, operator.__gt__]
limits = [or_limits, and_limits]
comp_ops = [comp_op_or, comp_op_and]
for op, limit, comp_op in zip(ops, limits, comp_ops):
print("Op ", op)
tb_cond_1 = comp_op[0](tb[column_name], limit[0])
tb_cond_2 = comp_op[1](tb[column_name], limit[1])
tb_cond_3 = comp_op[2](tb[column_name], limit[2])
res_1_op = op(tb_cond_1, tb_cond_2)
res_2_op = op(res_1_op, tb_cond_3)
res_1 = tb[res_1_op]
res_2 = tb[res_2_op]
column_pdf_1 = res_1[column_name].to_pandas()
column_pdf_2 = res_2[column_name].to_pandas()
column_1 = column_pdf_1[column_name]
for col in column_1:
assert op(comp_op[0](col, limit[0]), comp_op[1](col, limit[1]))
column_2 = column_pdf_2[column_name]
for col in column_2:
assert op(op(comp_op[0](col, limit[0]), comp_op[1](col, limit[1])),
comp_op[2](col, limit[2]))
def test_drop():
ctx: CylonContext = CylonContext(config=None, distributed=False)
table1_path = '/tmp/user_usage_tm_1.csv'
assert os.path.exists(table1_path)
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30)
tb: Table = read_csv(ctx, table1_path, csv_read_options)
drop_column = 'outgoing_sms_per_month'
tb_new = tb.drop([drop_column])
assert not tb_new.column_names.__contains__(drop_column)
def test_fillna():
col_names = ['col1', 'col2']
data_list_numeric = [[1, 2, None, 4, 5], [6, 7, 8, 9, None]]
fill_value = 0
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb_numeric = Table.from_list(ctx, col_names, data_list_numeric)
cn_tb_numeric_fillna = cn_tb_numeric.fillna(fill_value)
data_list = list(cn_tb_numeric_fillna.to_pydict().values())
for col in data_list:
assert not col.__contains__(None)
assert col.__contains__(fill_value)
def test_where():
col_names = ['col1', 'col2']
data_list_numeric = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb = Table.from_list(ctx, col_names, data_list_numeric)
cn_tb_where = cn_tb.where(cn_tb > 3)
print(cn_tb_where)
cn_tb_where_with_other = cn_tb.where(cn_tb > 3, 100)
print(cn_tb_where_with_other)
print(cn_tb > 3)
def test_rename():
col_names = ['col1', 'col2', 'col3', 'col4']
data_list_numeric = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15],
[16, 17, 18, 19, 20]]
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb = Table.from_list(ctx, col_names, data_list_numeric)
index_values = [0, 1, 2, 3, 4]
cn_tb.set_index(index_values)
prev_col_names = cn_tb.column_names
# with dictionary
columns = {'col1': 'col-1', 'col3': 'col-3'}
cn_tb.rename(columns)
new_col_names = cn_tb.column_names
for key in columns:
value = columns[key]
assert prev_col_names.index(key) == new_col_names.index(value)
# with list
cn_tb_list = Table.from_list(ctx, col_names, data_list_numeric)
cn_tb_list.set_index(index_values)
prev_col_names = cn_tb_list.column_names
new_column_names = ['col-1', 'col-2', 'col-3', 'col-4']
cn_tb_list.rename(new_column_names)
assert cn_tb_list.column_names == new_column_names
def test_invert():
# Bool Invert Test
data_list = [[False, True, False, True, True], [False, True, False, True, True]]
pdf = DataFrame(data_list)
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb = Table.from_pandas(ctx, pdf)
invert_cn_tb = ~cn_tb
invert_pdf = ~pdf
assert invert_cn_tb.to_pandas().values.tolist() == invert_pdf.values.tolist()
def test_neg():
npr =
|
np.array([[1, 2, 3, 4, 5, -6, -7], [-1, -2, -3, -4, -5, 6, 7]])
|
numpy.array
|
"""
This part of code is the DQN brain, which is a brain of the agent.
All decisions are made in here.
Using Tensorflow to build the neural network.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.7.3
"""
import numpy as np
import pandas as pd
import tensorflow as tf
np.random.seed(1)
tf.random.set_seed(1)
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
'''
n_actions:4,动作数量(上下左右)
n_features:2,状态数量(x,y)
'''
print('n_actions:', n_actions)
print('n_features:', n_features)
print('learning_rate:', learning_rate)
print('reward_decay:', reward_decay)
print('e_greedy:', e_greedy)
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory =
|
np.zeros((self.memory_size, n_features * 2 + 2))
|
numpy.zeros
|
"""
Some simple utilities for plotting our transform codes
"""
import bisect
import numpy as np
from scipy.stats import kurtosis
from skimage.measure import compare_ssim as ssim
from matplotlib import pyplot as plt
from matplotlib.image import NonUniformImage
import matplotlib.gridspec as gridspec
from matplotlib.ticker import FormatStrFormatter, StrMethodFormatter
tab10colors = plt.get_cmap('tab10').colors
blue_red = plt.get_cmap('RdBu_r')
def compute_pSNR(target, reconstruction, manual_sig_mag=None):
"""
Parameters
----------
target : ndarray
A target tensor
reconstruction : ndarray
The resconstruction of target (with error)
manual_sig_mag : float, optional
The minimum and maximum value for a family of signals from which target
is drawn. Sets a reference for how big the mean squared error actually
is. If not provided, estimated from the target signal itself.
"""
if manual_sig_mag is None:
signal_magnitude = np.max(target) - np.min(target)
else:
signal_magnitude = manual_sig_mag
MSE = np.mean(np.square(target - reconstruction))
if MSE != 0:
return 10. * np.log10((signal_magnitude**2)/MSE)
else:
return np.inf
def compute_ssim(target, reconstruction, manual_sig_mag=None):
"""
Note: may need to be updated for skimage > 0.15
Parameters
----------
target : ndarray
A target tensor
reconstruction : ndarray
The resconstruction of target (with error)
manual_sig_mag : float, optional
The minimum and maximum value for a family of signals from which target
is drawn. Sets a reference for how big the error actually
is. If not provided, estimated from the target signal itself.
"""
if manual_sig_mag is None:
signal_magnitude = np.max(target) - np.min(target)
else:
signal_magnitude = manual_sig_mag
# these are the settings that the scikit-image documentation indicates
# match the ones chosen in the original SSIM paper (Wang 2004, I believe).
return ssim(target, reconstruction, data_range=signal_magnitude,
gaussian_weights=True, sigma=1.5, use_sample_covariance=False)
def standardize_for_imshow(image):
"""
A luminance standardization for pyplot's imshow
This just allows me to specify a simple, transparent standard for what white
and black correspond to in pyplot's imshow method. Likely could be
accomplished by the colors.Normalize method, but I want to make this as
explicit as possible. If the image is nonnegative, we divide by the scalar
that makes the largest value 1.0. If the image is nonpositive, we
divide by the scalar that makes the smallest value -1.0, and then add 1, so
that this value is 0.0, pitch black. If the image has both positive and
negative values, we divide and shift so that 0.0 in the original image gets
mapped to 0.5 for imshow and the largest absolute value gets mapped to
either 0.0 or 1.0 depending on whether it was positive of negative.
Parameters
----------
image : ndarray
The image to be standardized, can be (h, w) or (h, w, c). All operations
are scalar operations applied to every color channel. Note this, may
change hue of color images, I think.
Returns
-------
standardized_image : ndarray
An RGB image in the range [0.0, 1.0], ready to be showed by imshow.
raw_val_mapping : tuple(float, float, float)
Indicates what raw values got mapped to 0.0, 0.5, and 1.0, respectively
"""
max_val = np.max(image)
min_val = np.min(image)
if max_val == min_val: # constant value
standardized_image = 0.5 * np.ones(image.shape)
if max_val > 0:
raw_val_mapping = [0.0, max_val, 2*max_val]
elif max_val < 0:
raw_val_mapping = [2*max_val, max_val, 0.0]
else:
raw_val_mapping = [-1.0, 0.0, 1.0]
else:
if min_val >= 0:
standardized_image = image / max_val
raw_val_mapping = [0.0, 0.5*max_val, max_val]
elif max_val <= 0:
standardized_image = (image / -min_val) + 1.0
raw_val_mapping = [min_val, 0.5*min_val, 0.0]
else:
# straddles 0.0. We want to map 0.0 to 0.5 in the displayed image
skew_toward_max = np.argmax([abs(min_val), abs(max_val)])
if skew_toward_max:
normalizer = (2 * max_val)
raw_val_mapping = [-max_val, 0.0, max_val]
else:
normalizer = (2 * np.abs(min_val))
raw_val_mapping = [min_val, 0.0, -min_val]
standardized_image = (image / normalizer) + 0.5
return standardized_image, raw_val_mapping
def display_dictionary(dictionary, renormalize=False, reshaping=None,
groupings=None, label_inds=False, highlighting=None,
plot_title=""):
"""
Plot each of the dictionary elements side by side
Parameters
----------
dictionary : ndarray(float32, size=(s, n) OR (s, c, kh, kw))
If the size of dictionary is (s, n), this is a 'fully-connected'
dictionary where each basis element has the same dimensionality as the
image it is trying to represent. n is the size of the image and s the
number of basis functions. If the size of dictionary is (s, c, kh, kw),
this is a 'convolutional' dictionary where each basis element is
(potentially much) smaller than the image it is trying to represent. c
is the number of channels that in the input space, kh is the dictionary
kernel height, and kw is the dictionary kernel width.
renormalize : bool, optional
If present, display basis functions on their own color scale, using
standardize_for_imshow() to put values in the range [0, 1]. Will
accentuate the largest-magnitude values in the dictionary element.
Default False.
reshaping : tuple(int, int), optional
Should only be specified for a fully-connected dictionary (where
dictionary.ndim==2). The dimension of each patch before vectorization
to size n. We reshape the dictionary elements based on this. Default None
label_inds : bool, optional
Supimpose the index into the dictionary of each element in the displayed
grid--helps with quick lookup/selection of individual dictionary
elements. Default False.
highlighting : dictionary, optional
This is used to re-sort and color code the dictionary elements according
to scalar weights. Has two keys:
'weights' : ndarray(float, size=(s,))
The weights for each dictionary element
'color_range': tuple(float, float)
Values less than or equal to highlighting['color_range'][0] get mapped
to dark blue, and values greater than or equal to
highlighting['color_range'][1] get mapped to dark red.
'reorder' : bool
Use the highlighting weights to reorder the dictionary.
Default None.
plot_title : str, optional
The title of the plot. Default ""
Returns
-------
dictionary_figs : list
A list containing pyplot figures. Can be saved separately, or whatever
from the calling function
"""
if groupings is None:
t_ims, raw_val_mapping, lab_w_pix_coords = get_dictionary_tile_imgs(
dictionary, reshape_to_these_dims=reshaping, indv_renorm=renormalize,
highlights=highlighting)
else:
t_ims = get_dictionary_tile_imgs_arr_by_group(dictionary, groupings,
indv_renorm=renormalize, reshape_to_these_dims=reshaping,
highlights=highlighting)
fig_refs = []
for fig_idx in range(len(t_ims)):
fig = plt.figure(figsize=(10, 10))
ax = plt.axes([0.075, 0.075, 0.85, 0.85]) # [bottom, left, height, width]
fig.suptitle(plot_title + ', fig {} of {}'.format(
fig_idx+1, len(t_ims)), fontsize=20)
im_ref = ax.imshow(t_ims[fig_idx], interpolation='None')
if label_inds and groupings is None:
for lab_and_coord in lab_w_pix_coords[fig_idx]:
ax.text(lab_and_coord[2], lab_and_coord[1], lab_and_coord[0],
fontsize=6, verticalalignment='top',
horizontalalignment='left', color='w')
ax.axis('off')
if not renormalize and groupings is None:
# add a luminance colorbar. Because there isn't good rgb colorbar
# support in pyplot I hack this by adding another image subplot
cbar_ax = plt.axes([0.945, 0.4, 0.01, 0.2])
gradient = np.linspace(1.0, 0.0, 256)[:, None]
cbar_ax.imshow(gradient, cmap='gray')
cbar_ax.set_aspect('auto')
cbar_ax.yaxis.tick_right()
cbar_ax.xaxis.set_ticks([])
cbar_ax.yaxis.set_ticks([255, 128, 0])
cbar_ax.yaxis.set_ticklabels(['{:.2f}'.format(x)
for x in raw_val_mapping], fontsize=8)
fig_refs.append(fig)
return fig_refs
def get_dictionary_tile_imgs(dictionary, indv_renorm=False,
reshape_to_these_dims=None, highlights=None,
one_d_tile=False):
"""
Arranges a dictionary into a series of imgs that tile elements side by side
We do some simple rescaling to provide a standard interpretation of white and
black pixels in the image (and everything in-between).
Parameters
----------
dictionary : ndarray(float32, size=(s, n) OR (s, c, kh, kw))
See docstring of display_dictionary above.
indv_renorm : bool, optional
See docstring of display_dictionary above.
reshape_to_these_dims : tuple(int, int), optional
See docstring of display_dictionary above.
highlights : dictionary, optional
See docstring of display_dictionary above.
one_d_tile : bool, optional
Force the tile to just be side-by-side 1d. Otherwise we find the nearest
square-shaped tile. This option is useful for subgroup plotting, where
we might want to have small subgroups displayed 1d. Default False.
Returns
-------
tile_imgs : list(ndarray)
Each element is an image to be displayed by imshow
imshow_to_raw_mapping : tuple(float, float, float)
Returned by standardize_for_imshow(), this indicates which values in the
original dictionary got mapped to 0.0, 0.5, and 1.0, respectively, in
the displayed image.
label_with_pix_coords : list(list(tuple))
Indicates the pixel location in the array/image of the upper left hand
corner of each dictionary element. Outer list index specifies which of
the individual arrays/images (usually just one), inner list index
specifies index of the element within the image, and the tuple has
three components: (*index into full dictionary*, *vert_pos*, *horz_pos*)
"""
if indv_renorm:
imshow_to_raw_mapping = None # each dict element put on their own scale
else:
dictionary, imshow_to_raw_mapping = standardize_for_imshow(dictionary)
if highlights is not None:
if highlights['reorder']:
# reorder by weight
new_ordering = np.argsort(highlights['weights'])[::-1]
dictionary = dictionary[new_ordering]
highlights['weights'] = highlights['weights'][new_ordering]
weight_colors = (
(highlights['weights'] - highlights['color_range'][0]) /
(highlights['color_range'][1] - highlights['color_range'][0]))
if highlights['color_range'][0] >= 0 or highlights['color_range'][1] <= 0:
print('Warning: Red and Blue will not correspond',
'to positive and negative weights')
if one_d_tile:
max_de_per_img = 80
else:
max_de_per_img = 80*80 # max 80x80 {d}ictionary {e}lements per tile img
assert np.sqrt(max_de_per_img) % 1 == 0, 'please pick a square number'
num_de = dictionary.shape[0]
num_tile_imgs = int(np.ceil(num_de / max_de_per_img))
# this determines how many dictionary elements are arranged in a square
# grid within any given img
if num_tile_imgs > 1:
de_per_img = max_de_per_img
else:
if one_d_tile:
de_per_img = num_de
else:
squares = [x**2 for x in range(1, int(
|
np.sqrt(max_de_per_img)
|
numpy.sqrt
|
"""
integration testing module for tunable coupler element
and line specific chain of signal generation.
"""
# System imports
import copy
import pickle
import pytest
import numpy as np
# Main C3 objects
from c3.c3objs import Quantity as Qty
from c3.parametermap import ParameterMap as PMap
from c3.experiment import Experiment as Exp
from c3.system.model import Model as Mdl
from c3.generator.generator import Generator as Gnr
# Building blocks
import c3.generator.devices as devices
import c3.signal.gates as gates
import c3.system.chip as chip
import c3.signal.pulse as pulse
# Libs and helpers
import c3.libraries.hamiltonians as hamiltonians
import c3.libraries.envelopes as envelopes
lindblad = False
dressed = True
q1_lvls = 3
q2_lvls = 3
tc_lvls = 3
freq_q1 = 6.189e9
freq_q2 = 5.089e9
freq_tc = 8.1e9
phi_0_tc = 10
fluxpoint = phi_0_tc * 0.23
d = 0.36
anhar_q1 = -286e6
anhar_q2 = -310e6
anhar_TC = -235e6
coupling_strength_q1tc = 142e6
coupling_strength_q2tc = 116e6
coupling_strength_q1q2 = 0 * 1e6
t1_q1 = 23e-6
t1_q2 = 70e-6
t1_tc = 15e-6
t2star_q1 = 27e-6
t2star_q2 = 50e-6
t2star_tc = 7e-6
init_temp = 0.06
v2hz = 1e9
t_final = 10e-9 # Time for single qubit gates
sim_res = 100e9
awg_res = 2.4e9
cphase_time = 100e-9 # Two qubit gate
flux_freq = 829 * 1e6
offset = 0 * 1e6
fluxamp = 0.1 * phi_0_tc
t_down = cphase_time - 5e-9
xy_angle = 0.3590456701578104
framechange_q1 = 0.725 * np.pi
framechange_q2 = 1.221 * np.pi
# ### MAKE MODEL
q1 = chip.Qubit(
name="Q1",
desc="Qubit 1",
freq=Qty(value=freq_q1, min_val=5.0e9, max_val=8.0e9, unit="Hz 2pi"),
anhar=Qty(value=anhar_q1, min_val=-380e6, max_val=-120e6, unit="Hz 2pi"),
hilbert_dim=q1_lvls,
t1=Qty(value=t1_q1, min_val=5e-6, max_val=90e-6, unit="s"),
t2star=Qty(value=t2star_q1, min_val=10e-6, max_val=90e-6, unit="s"),
temp=Qty(value=init_temp, min_val=0.0, max_val=0.12, unit="K"),
)
q2 = chip.Qubit(
name="Q2",
desc="Qubit 2",
freq=Qty(value=freq_q2, min_val=5.0e9, max_val=8.0e9, unit="Hz 2pi"),
anhar=Qty(value=anhar_q2, min_val=-380e6, max_val=-120e6, unit="Hz 2pi"),
hilbert_dim=q2_lvls,
t1=Qty(value=t1_q2, min_val=5e-6, max_val=90e-6, unit="s"),
t2star=Qty(value=t2star_q2, min_val=10e-6, max_val=90e-6, unit="s"),
temp=Qty(value=init_temp, min_val=0.0, max_val=0.12, unit="K"),
)
tc_at = chip.Transmon(
name="TC",
desc="Tunable Coupler",
freq=Qty(value=freq_tc, min_val=0.0e9, max_val=10.0e9, unit="Hz 2pi"),
phi=Qty(
value=fluxpoint, min_val=-5.0 * phi_0_tc, max_val=5.0 * phi_0_tc, unit="Wb"
),
phi_0=Qty(
value=phi_0_tc, min_val=phi_0_tc * 0.9, max_val=phi_0_tc * 1.1, unit="Wb"
),
d=Qty(value=d, min_val=d * 0.9, max_val=d * 1.1, unit=""),
hilbert_dim=tc_lvls,
anhar=Qty(value=anhar_TC, min_val=-380e6, max_val=-120e6, unit="Hz 2pi"),
t1=Qty(value=t1_tc, min_val=1e-6, max_val=90e-6, unit="s"),
t2star=Qty(value=t2star_tc, min_val=1e-6, max_val=90e-6, unit="s"),
temp=Qty(value=init_temp, min_val=0.0, max_val=0.12, unit="K"),
)
q1tc = chip.Coupling(
name="Q1-TC",
desc="Coupling qubit 1 to tunable coupler",
connected=["Q1", "TC"],
strength=Qty(
value=coupling_strength_q1tc, min_val=0 * 1e4, max_val=200e6, unit="Hz 2pi"
),
hamiltonian_func=hamiltonians.int_XX,
)
q2tc = chip.Coupling(
name="Q2-TC",
desc="Coupling qubit 2 to t×unable coupler",
connected=["Q2", "TC"],
strength=Qty(
value=coupling_strength_q2tc, min_val=0 * 1e4, max_val=200e6, unit="Hz 2pi"
),
hamiltonian_func=hamiltonians.int_XX,
)
q1q2 = chip.Coupling(
name="Q1-Q2",
desc="Coupling qubit 1 to qubit 2",
connected=["Q1", "Q2"],
strength=Qty(
value=coupling_strength_q1q2, min_val=0 * 1e4, max_val=200e6, unit="Hz 2pi"
),
hamiltonian_func=hamiltonians.int_XX,
)
drive_q1 = chip.Drive(
name="Q1",
desc="Drive on Q1",
connected=["Q1"],
hamiltonian_func=hamiltonians.x_drive,
)
drive_q2 = chip.Drive(
name="Q2",
desc="Drive on Q2",
connected=["Q2"],
hamiltonian_func=hamiltonians.x_drive,
)
flux = chip.Drive(
name="TC",
desc="Flux drive/control on tunable couler",
connected=["TC"],
hamiltonian_func=hamiltonians.z_drive,
)
phys_components = [tc_at, q1, q2]
line_components = [flux, q1tc, q2tc, q1q2, drive_q1, drive_q2]
model = Mdl(phys_components, line_components, [])
model.set_lindbladian(lindblad)
model.set_dressed(dressed)
# ### MAKE GENERATOR
lo = devices.LO(name="lo", resolution=sim_res)
awg = devices.AWG(name="awg", resolution=awg_res)
dig_to_an = devices.DigitalToAnalog(name="dac", resolution=sim_res)
resp = devices.Response(
name="resp",
rise_time=Qty(value=0.3e-9, min_val=0.05e-9, max_val=0.6e-9, unit="s"),
resolution=sim_res,
)
mixer = devices.Mixer(name="mixer")
fluxbias = devices.FluxTuning(
name="fluxbias",
phi_0=Qty(
value=phi_0_tc, min_val=0.9 * phi_0_tc, max_val=1.1 * phi_0_tc, unit="Wb"
),
phi=Qty(
value=fluxpoint, min_val=-1.0 * phi_0_tc, max_val=1.0 * phi_0_tc, unit="Wb"
),
omega_0=Qty(
value=freq_tc, min_val=0.9 * freq_tc, max_val=1.1 * freq_tc, unit="Hz 2pi"
),
d=Qty(value=d, min_val=d * 0.9, max_val=d * 1.1, unit=""),
anhar=Qty(value=anhar_q1, min_val=-380e6, max_val=-120e6, unit="Hz 2pi"),
)
v_to_hz = devices.VoltsToHertz(
name="v2hz",
V_to_Hz=Qty(value=v2hz, min_val=0.9 * v2hz, max_val=1.1 * v2hz, unit="Hz 2pi/V"),
)
device_dict = {
dev.name: dev for dev in [lo, awg, mixer, dig_to_an, resp, v_to_hz, fluxbias]
}
generator = Gnr(
devices=device_dict,
chains={
"TC": ["lo", "awg", "dac", "resp", "mixer", "fluxbias"],
"Q1": ["lo", "awg", "dac", "resp", "mixer", "v2hz"],
"Q2": ["lo", "awg", "dac", "resp", "mixer", "v2hz"],
},
)
# ### MAKE GATESET
nodrive_env = pulse.Envelope(name="no_drive", params={}, shape=envelopes.no_drive)
carrier_parameters = {
"freq": Qty(value=freq_q1, min_val=0e9, max_val=10e9, unit="Hz 2pi"),
"framechange": Qty(value=0.0, min_val=-3 * np.pi, max_val=5 * np.pi, unit="rad"),
}
carr_q1 = pulse.Carrier(
name="carrier", desc="Frequency of the local oscillator", params=carrier_parameters
)
carr_q2 = copy.deepcopy(carr_q1)
carr_q2.params["freq"].set_value(freq_q2)
carr_tc = copy.deepcopy(carr_q1)
carr_tc.params["freq"].set_value(flux_freq)
flux_params = {
"amp": Qty(value=fluxamp, min_val=0.0, max_val=5, unit="V"),
"t_final": Qty(
value=cphase_time,
min_val=0.5 * cphase_time,
max_val=1.5 * cphase_time,
unit="s",
),
"t_up": Qty(
value=5 * 1e-9, min_val=0.0 * cphase_time, max_val=0.5 * cphase_time, unit="s"
),
"t_down": Qty(
value=t_down, min_val=0.5 * cphase_time, max_val=1.0 * cphase_time, unit="s"
),
"risefall": Qty(
value=5 * 1e-9, min_val=0.0 * cphase_time, max_val=1.0 * cphase_time, unit="s"
),
"freq_offset": Qty(
value=offset, min_val=-50 * 1e6, max_val=50 * 1e6, unit="Hz 2pi"
),
"xy_angle": Qty(
value=xy_angle, min_val=-0.5 * np.pi, max_val=2.5 * np.pi, unit="rad"
),
}
flux_env = pulse.Envelope(
name="flux",
desc="Flux bias for tunable coupler",
params=flux_params,
shape=envelopes.flattop,
)
CRZp = gates.Instruction(
name="Id:CRZp", t_start=0.0, t_end=cphase_time, channels=["Q1", "Q2", "TC"]
)
CRZp.add_component(flux_env, "TC")
CRZp.add_component(carr_tc, "TC")
CRZp.add_component(nodrive_env, "Q1")
CRZp.add_component(carr_q1, "Q1")
CRZp.comps["Q1"]["carrier"].params["framechange"].set_value(framechange_q1)
CRZp.add_component(nodrive_env, "Q2")
CRZp.add_component(carr_q2, "Q2")
CRZp.comps["Q2"]["carrier"].params["framechange"].set_value(framechange_q2)
# ### MAKE EXPERIMENT
parameter_map = PMap(instructions=[CRZp], model=model, generator=generator)
exp = Exp(pmap=parameter_map)
##### TESTING ######
with open("test/tunable_coupler_data.pickle", "rb") as filename:
data = pickle.load(filename)
@pytest.mark.integration
def test_coupler_frequency() -> None:
coupler_01 = np.abs(
np.abs(model.eigenframe[model.state_labels.index((0, 0, 0))])
- np.abs(model.eigenframe[model.state_labels.index((1, 0, 0))])
)
rel_diff = np.abs((coupler_01 - data["coupler_01"]) / data["coupler_01"])
assert rel_diff < 1e-12
@pytest.mark.integration
def test_coupler_anahrmonicity() -> None:
coupler_12 = np.abs(
np.abs(model.eigenframe[model.state_labels.index((1, 0, 0))])
- np.abs(model.eigenframe[model.state_labels.index((2, 0, 0))])
)
rel_diff = np.abs((coupler_12 - data["coupler_12"]) / data["coupler_12"])
assert rel_diff < 1e-12
@pytest.mark.integration
def test_energy_levels() -> None:
model = parameter_map.model
parameter_map.set_parameters([0.0], [[["TC-phi"]]])
model.update_model()
labels = [
model.state_labels[indx]
for indx in np.argsort(np.abs(model.eigenframe) / 2 / np.pi / 1e9)
]
product_basis = []
dressed_basis = []
ordered_basis = []
transforms = []
steps = 101
min_ratio = -0.10
max_ratio = 0.7
flux_ratios = np.linspace(min_ratio, max_ratio, steps, endpoint=True)
for flux_ratio in flux_ratios:
flux_bias = flux_ratio * phi_0_tc
parameter_map.set_parameters(
[flux_bias, 0.0, 0.0, 0.0],
[
[["TC-phi"]],
[["Q1-TC-strength"]],
[["Q2-TC-strength"]],
[["Q1-Q2-strength"]],
],
)
model.update_model()
product_basis.append(
[
model.eigenframe[model.state_labels.index(label)] / 2 / np.pi / 1e9
for label in labels
]
)
parameter_map.set_parameters(
[coupling_strength_q1tc, coupling_strength_q2tc, coupling_strength_q1q2],
[[["Q1-TC-strength"]], [["Q2-TC-strength"]], [["Q1-Q2-strength"]]],
)
model.update_model()
ordered_basis.append(
[
model.eigenframe[model.state_labels.index(label)] / 2 / np.pi / 1e9
for label in labels
]
)
parameter_map.model.update_dressed(ordered=False)
dressed_basis.append(
[
model.eigenframe[model.state_labels.index(label)] / 2 / np.pi / 1e9
for label in model.state_labels
]
)
transforms.append(
np.array(
[
np.real(model.transform[model.state_labels.index(label)])
for label in labels
]
)
)
parameter_map.set_parameters([fluxpoint], [[["TC-phi"]]])
model.update_model()
dressed_basis = np.array(dressed_basis)
ordered_basis = np.array(ordered_basis)
product_basis = np.array(product_basis)
print((np.abs(product_basis - data["product_basis"]) < 1).all())
assert (np.abs(product_basis - data["product_basis"]) < 1).all()
assert (np.abs(ordered_basis - data["ordered_basis"]) < 1).all()
# Dressed basis might change at avoided crossings depending on how we
# decide to deal with it. Atm no state with largest probability is chosen.
assert (np.abs(dressed_basis - data["dressed_basis"]) < 1).all()
@pytest.mark.slow
@pytest.mark.integration
def test_dynamics_CPHASE() -> None:
# Dynamics (closed system)
exp.set_opt_gates(["Id:CRZp"])
exp.get_gates()
dUs = []
for indx in range(len(exp.dUs["Id:CRZp"])):
if indx % 50 == 0:
dUs.append(exp.dUs["Id:CRZp"][indx].numpy())
dUs =
|
np.array(dUs)
|
numpy.array
|
import numpy as np
from scipy.optimize import minimize, LinearConstraint, fsolve
from scipy.optimize import NonlinearConstraint
import scipy
import time
SCIPY_VERSION = scipy.__version__
def array_func(func):
"""
Decorator to handle various array arguments
"""
def unwrap(*args, **kwargs):
self = args[0]
conc = args[1]
shape = kwargs.pop("shape", None)
if isinstance(conc, list):
conc = np.array(conc)
if isinstance(shape, list):
shape = np.array(shape)
if np.isscalar(conc) and shape is None:
return func(self, conc, **kwargs)
elif isinstance(conc, np.ndarray) and shape is None:
if len(conc.shape) != 1:
raise ValueError("Concentration has to be a 1D array!")
return [func(self, conc[i], **kwargs)
for i in range(conc.shape[0])]
elif np.isscalar(conc) and np.isscalar(shape):
return func(self, conc, shape=[shape, 0.0, 0.0], **kwargs)
elif np.isscalar(conc) and isinstance(shape, np.ndarray):
if len(shape.shape) == 1 and shape.shape[0] == 3:
return func(self, conc, shape=shape, **kwargs)
elif len(shape.shape) == 2 and shape.shape[1] == 3:
return [func(self, conc, shape[i, :], **kwargs)
for i in range(shape.shape[0])]
else:
raise ValueError("When shape is a Numpy array it has to be "
"either of length 3 or of length Nx3! "
"Got: {}".format(shape.shape))
elif isinstance(conc, np.ndarray) and isinstance(shape, np.ndarray):
if conc.shape[0] != shape.shape[0]:
raise ValueError("The number entries in the shape array has to"
" match the number of entries in the conc "
"array!")
if len(shape.shape) == 1:
return [func(self, conc[i], shape=[shape[i], 0.0, 0.0],
**kwargs) for i in range(conc.shape[0])]
elif shape.shape[1] == 3:
return [func(self, conc[i], shape=shape[i, :], **kwargs)
for i in range(conc.shape[0])]
else:
raise ValueError("Dimension of shape argument has to be either"
"Nx3 or 3")
else:
raise ValueError("Concentation and shape arguments has to be "
"floats or arrays!")
return unwrap
class TwoPhaseLandauPolynomialBase(object):
"""Class for fitting a Landau polynomial to free energy data
In general terms a two phase landau polynomial is:
1. A multidimensional object where there can be be up to
three auxillary fields, but only one free variable
At equillibrium, the auxillary fields are slaved by
the concentration variable
2. The "transition" where auxillary fields changed from one
value to another, is determined by a linear term of the
form A*(x - x_c), where x_c is the transition point of
the free variable.
From the developers side, the idea behind having a base class is that
it allows for different representations of the functional form of the
free variable, different fitting algorithms etc.
:param float c1: Center concentration for the first phase
:param float c2: Center concentration for the second phase
:param np.ndarray init_guess: Initial guess for the parameters
The polynomial fitting is of the form
A*(x - c1)^2 + B*(x-c2)*y^2 + C*y^4 + D*y^6
This array should therefore contain initial guess
for the four parameter A, B, C and D.
:param int conc_order1: Order of the polynomial in the first phase
:param int conc_order2: Order of the polynomial in the second phase
"""
def __init__(self, c1=0.0, c2=1.0, num_dir=3, init_guess=None,
conc_order1=2, conc_order2=2):
self.conc_coeff2 = np.zeros(conc_order2+1)
self.coeff_shape = np.zeros(5)
self.conc_order1 = conc_order1
self.conc_order2 = conc_order2
self.c1 = c1
self.c2 = c2
self.init_guess = init_guess
self.num_dir = num_dir
self.boundary_coeff = None
@array_func
def equil_shape_order(self, conc):
"""Calculate the equillibrium shape concentration.
The equillibrium shape order parameter is determined by finding the
minima of the free energy curve at a given concentration. In case of
multiple order parameters the value returned corresponds to a minima
where all other shape order parameters are zero.
:param float conc: Concentration
"""
C = self.coeff_shape[0]
D = self.coeff_shape[2]
if abs(D) < 1E-8:
n_eq = -0.5*self._eval_phase2(conc)/C
if n_eq < 0.0:
return 0.0
return n_eq
delta = (C/(3.0*D))**2 - \
self._eval_phase2(conc)/(3.0*D)
if delta < 0.0:
return 0.0
n_eq = -C/(3.0*D) + np.sqrt(delta)
if n_eq < 0.0:
return 0.0
return np.sqrt(n_eq)
@array_func
def equil_shape_order_derivative(self, conc):
"""Calculate the partial derivative of the equillibrium
shape parameter with respect to the concentration.
NOTE: This return the derivative of the square of of the
order parameter with respect to the concentration.
"""
C = self.coeff_shape[0]
D = self.coeff_shape[2]
delta = (C/(3.0*D))**2 - \
self._eval_phase2(conc)/(3.0*D)
if delta < 0.0:
return 0.0
n_eq = self.equil_shape_order(conc)
if n_eq <= 0.0:
return 0.0
return -0.5*self._deriv_phase2(conc) / \
(3*np.sqrt(delta)*D*2*n_eq)
def _eval_phase2(self, conc):
"""Evaluate the polynomial in phase2.
:param float conc:
"""
return np.polyval(self.conc_coeff2, conc)
def _eval_phase1(self, conc):
"""Evaluate regressor in phase 1."""
raise NotImplementedError("Has to be implemented in child classes")
def _deriv_phase2(self, conc):
"""Evaluate the derivative in the second phase."""
p2der = np.polyder(self.conc_coeff2)
return np.polyval(p2der, conc)
def _deriv_phase1(self, conc):
raise NotImplementedError("Has to be implemented in child classes")
@array_func
def eval_at_equil(self, conc):
"""Evaluate the free energy at equillibrium order.
:param float conc: Concentration
"""
n_eq = self.equil_shape_order(conc)
return self._eval_phase1(conc) + \
self._eval_phase2(conc)*n_eq**2 + \
self.coeff_shape[0]*n_eq**4 + \
self.coeff_shape[2]*n_eq**6
@array_func
def evaluate(self, conc, shape=None):
"""
Evaluate the free energy polynomial
:param float conc: Concentration
:param shape list: List with the shape order parameters.
If None, the shape order parameters are set to their
equillibrium
"""
if shape is None:
return self.eval_at_equil(conc)
full_shape = np.zeros(3)
full_shape[:len(shape)] = shape
shape = full_shape
return self._eval_phase1(conc) + \
self._eval_phase2(conc)*np.sum(shape**2) + \
self.coeff_shape[0]*np.sum(shape**4) + \
self.coeff_shape[1]*(shape[0]**2 * shape[1]**2 +
shape[0]**2 * shape[2]**2 +
shape[1]**2 * shape[2]**2) + \
self.coeff_shape[2]*np.sum(shape**6) + \
self.coeff_shape[3]*(shape[0]**4 * (shape[1]**2 + shape[2]**2) +
shape[1]**4 * (shape[0]**2 + shape[2]**2) +
shape[2]**4 * (shape[0]**2 + shape[1]**2)) + \
self.coeff_shape[4]*np.prod(shape**2)
@array_func
def partial_derivative(self, conc, shape=None, var="conc", direction=0):
"""Return the partial derivative with respect to variable."""
allowed_var = ["conc", "shape"]
if var not in allowed_var:
raise ValueError("Variable has to be one of {}".format(allowed_var))
if shape is None:
shape = np.array([np.sqrt(self.equil_shape_order(conc))])
if isinstance(shape, list):
shape = np.array(shape)
try:
_ = shape[0]
except (TypeError, IndexError):
# Shape was a scalar, convert to array
shape = np.array([shape])
full_shape = np.zeros(3)
full_shape[:len(shape)] = shape
shape = full_shape
if var == "conc":
p2_der = np.polyder(self.conc_coeff2)
return self._deriv_phase1(conc) + self._deriv_phase2(conc)*np.sum(shape**2)
elif var == "shape":
d = direction
return 2*self._eval_phase2(conc)*shape[d] + \
4*self.coeff_shape[0]*shape[d]**3 + \
2*self.coeff_shape[1]*shape[d]*(shape[(d+1) % 3] + shape[(d+2) % 3]) + \
6*self.coeff_shape[2]*shape[d]**5 + \
4*self.coeff_shape[3]*shape[d]**3*(shape[(d+1) % 3]**2 + shape[(d+2) % 3]**2) + \
2*self.coeff_shape[3]*shape[d]*(shape[(d+1) % 3]**4 + shape[(d+2) % 3]**4) + \
2*self.coeff_shape[4]*shape[d]*shape[(d+1) % 3]**2 * shape[(d+2) % 3]**2
else:
raise ValueError("Unknown derivative type!")
def _get_slope_parameter(self, C, D, transition_conc):
return C**2/(3*D*(transition_conc - self.c2))
def fit(self, *args, **kwargs):
raise NotImplementedError("Has to be implemented in child classes")
def to_dict(self):
"""Store the required arguments that can be used to
construct poly terms for phase field calculations."""
from itertools import permutations
data = {}
data["terms"] = []
num_terms = len(self.conc_coeff2)
for power, c in enumerate(self.conc_coeff2.tolist()):
for active_shape in range(1, 4):
entry = {
"coeff": c,
"powers": [num_terms-power-1, 0, 0, 0]
}
entry["powers"][active_shape] = 2
data["terms"].append(entry)
power_templates = [
[4, 0, 0],
[2, 2, 0],
[6, 0, 0],
[4, 2, 0],
[2, 2, 2]
]
for i, p_template in enumerate(power_templates):
used_perms = set()
for perm in permutations(p_template):
if perm in used_perms:
continue
entry = {
"coeff": self.coeff_shape[i],
"powers": [0] + list(perm)
}
used_perms.add(perm)
data["terms"].append(entry)
return data
def save_poly_terms(self, fname="pypolyterm.json"):
import json
with open(fname, 'w') as outfile:
json.dump(self.to_dict(), outfile, indent=2)
print("Coefficient stored in {}".format(fname))
def _equil_shape_fixed_conc_and_shape_intermediates(self, conc, shape,
min_type):
"""Return helper quantities for the equillibrium shape."""
K = self._eval_phase2(conc)
K += self.coeff_shape[1]*shape**2
K += self.coeff_shape[3]*shape**4
Q = self.coeff_shape[0] + self.coeff_shape[3]*shape**2
if min_type == "mixed":
Q += 0.5*self.coeff_shape[1]
Q += 0.5*self.coeff_shape[4]*shape**2
D = 3.0*self.coeff_shape[2]
return K, Q, D
@array_func
def equil_shape_fixed_conc_and_shape(self, conc, shape=None,
min_type="pure"):
"""Return the equillibrium shape parameter.
:param float conc: Concentration
:param float shape: Shape parameter
:param str min_type: Type of minimum. If pure, the third
shape parameter is set to zero. If mixed, the two
free shape parameters are required to be the same.
"""
allowed_types = ["pure", "mixed"]
if min_type not in allowed_types:
raise ValueError("min_type has to be one of {}"
"".format(allowed_types))
if shape is None:
raise ValueError("Shape has to be passed!")
shape = shape[0]
K, Q, D = self._equil_shape_fixed_conc_and_shape_intermediates(
conc, shape, min_type)
delta = (Q/D)**2 - K/D
if delta < 0.0:
return 0.0
n_sq = -Q/D +
|
np.sqrt(delta)
|
numpy.sqrt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************
# @Time : 2018/10/24 18:52
# @Author : <NAME> & <NAME>
# @Lab : nesa.zju.edu.cn
# @File : AttackEval.py
# **************************************
import argparse
import os
import random
import shutil
import sys
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import torch
from PIL import Image, ImageFilter
from skimage.measure import compare_ssim as SSIM
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from Attacks.AttackMethods.AttackUtils import predict
from RawModels.MNISTConv import MNISTConvNet
from RawModels.ResNet import resnet20_cifar
# help function for the Gaussian Blur transformation of images
def gaussian_blur_transform(AdvSample, radius, oriDataset):
if oriDataset.upper() == 'CIFAR10':
assert AdvSample.shape == (3, 32, 32)
sample = np.transpose(np.round(AdvSample * 255), (1, 2, 0))
image = Image.fromarray(np.uint8(sample))
gb_image = image.filter(ImageFilter.GaussianBlur(radius=radius))
gb_image = np.transpose(np.array(gb_image), (2, 0, 1)).astype('float32') / 255.0
return gb_image
if oriDataset.upper() == 'MNIST':
assert AdvSample.shape == (1, 28, 28)
sample = np.transpose(np.round(AdvSample * 255), (1, 2, 0))
# for MNIST, there is no RGB
sample = np.squeeze(sample, axis=2)
image = Image.fromarray(np.uint8(sample))
gb_image = image.filter(ImageFilter.GaussianBlur(radius=radius))
gb_image = np.expand_dims(np.array(gb_image).astype('float32'), axis=0) / 255.0
return gb_image
# help function for the image compression transformation of images
def image_compress_transform(IndexAdv, AdvSample, dir_name, quality, oriDataset):
if oriDataset.upper() == 'CIFAR10':
assert AdvSample.shape == (3, 32, 32)
sample = np.transpose(np.round(AdvSample * 255), (1, 2, 0))
image = Image.fromarray(np.uint8(sample))
saved_adv_image_path = os.path.join(dir_name, '{}th-adv-cifar.png'.format(IndexAdv))
image.save(saved_adv_image_path)
output_IC_path = os.path.join(dir_name, '{}th-IC-adv-cifar.jpg'.format(IndexAdv))
cmd = 'guetzli --quality {} {} {}'.format(quality, saved_adv_image_path, output_IC_path)
assert os.system(cmd) == 0, 'guetzli tool should be install before, https://github.com/google/guetzli'
IC_image = Image.open(output_IC_path).convert('RGB')
IC_image = np.transpose(np.array(IC_image), (2, 0, 1)).astype('float32') / 255.0
return IC_image
if oriDataset.upper() == 'MNIST':
assert AdvSample.shape == (1, 28, 28)
sample = np.transpose(np.round(AdvSample * 255), (1, 2, 0))
sample = np.squeeze(sample, axis=2) # for MNIST, there is no RGB
image = Image.fromarray(np.uint8(sample), mode='L')
saved_adv_image_path = os.path.join(dir_name, '{}th-adv-mnist.png'.format(IndexAdv))
image.save(saved_adv_image_path)
output_IC_path = os.path.join(dir_name, '{}th-IC-adv-mnist.jpg'.format(IndexAdv))
cmd = 'guetzli --quality {} {} {}'.format(quality, saved_adv_image_path, output_IC_path)
assert os.system(cmd) == 0, 'guetzli tool should be install before, https://github.com/google/guetzli'
IC_image = Image.open(output_IC_path).convert('L')
IC_image = np.expand_dims(np.array(IC_image).astype('float32'), axis=0) / 255.0
return IC_image
class AttackEvaluate:
def __init__(self, DataSet='MNIST', AttackName='FGSM', RawModelLocation='../RawModels/', CleanDataLocation='../CleanDatasets/',
AdvExamplesDir='../AdversarialExampleDatasets/', device=torch.device('cpu')):
self.device = device
# check and set the support data set
assert DataSet.upper() in ['MNIST', 'CIFAR10'], "The data set must be MNIST or CIFAR10"
self.dataset = DataSet.upper()
self.color_mode = 'RGB' if self.dataset == 'CIFAR10' else 'L'
# check and set the supported attack name
self.attack_name = AttackName.upper()
supported_un_targeted = ['FGSM', 'RFGSM', 'BIM', 'PGD', 'UMIFGSM', 'DEEPFOOL', 'UAP', 'OM']
supported_targeted = ['LLC', "RLLC", 'ILLC', 'JSMA', 'TMIFGSM', 'BLB', 'CW2', 'EAD']
assert self.attack_name in supported_un_targeted or self.attack_name in supported_targeted, \
"\nCurrently, our implementation support attacks of FGSM, RFGSM, BIM, UMIFGSM, DeepFool, LLC, RLLC, ILLC, TMIFGSM, JSMA, CW2,....\n"
# set the Target (UA or TA) according to the AttackName
if self.attack_name.upper() in supported_un_targeted:
self.Targeted = False
else:
self.Targeted = True
# load the raw model
raw_model_location = '{}{}/model/{}_raw.pt'.format(RawModelLocation, self.dataset, self.dataset)
if self.dataset == 'MNIST':
self.raw_model = MNISTConvNet().to(device)
self.raw_model.load(path=raw_model_location, device=device)
else:
self.raw_model = resnet20_cifar().to(device)
self.raw_model.load(path=raw_model_location, device=device)
# get the clean datasets / true_labels
self.nature_samples = np.load('{}{}/{}_inputs.npy'.format(CleanDataLocation, self.dataset, self.dataset))
self.labels_samples = np.load('{}{}/{}_labels.npy'.format(CleanDataLocation, self.dataset, self.dataset))
# get the targets labels
# prepare the targeted label (least likely label) for LLC RLLC and ILLC
if self.attack_name.upper() in ['LLC', 'RLLC', 'ILLC']:
self.targets_samples = np.load('{}{}/{}_llc.npy'.format(CleanDataLocation, self.dataset, self.dataset))
else:
self.targets_samples = np.load('{}{}/{}_targets.npy'.format(CleanDataLocation, self.dataset, self.dataset))
# get the adversarial examples
self.AdvExamplesDir = AdvExamplesDir + self.attack_name + '/' + self.dataset + '/'
if os.path.exists(self.AdvExamplesDir) is False:
print("the directory of {} is not existing, please check carefully".format(self.AdvExamplesDir))
self.adv_samples = np.load('{}{}_AdvExamples.npy'.format(self.AdvExamplesDir, self.attack_name))
# self.adv_labels = np.load('{}{}_AdvLabels.npy'.format(self.AdvExamplesDir, self.AttackName))
predictions = predict(model=self.raw_model, samples=self.adv_samples, device=self.device).detach().cpu().numpy()
def soft_max(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
tmp_soft_max = []
for i in range(len(predictions)):
tmp_soft_max.append(soft_max(predictions[i]))
self.softmax_prediction = np.array(tmp_soft_max)
# help function
def successful(self, adv_softmax_preds, nature_true_preds, targeted_preds, target_flag):
"""
:param adv_softmax_preds: the softmax prediction for the adversarial example
:param nature_true_preds: for the un-targeted attack, it should be the true label for the nature example
:param targeted_preds: for the targeted attack, it should be the specified targets label that selected
:param target_flag: True if it is a targeted attack, False if it is a un-targeted attack
:return:
"""
if target_flag:
if np.argmax(adv_softmax_preds) == np.argmax(targeted_preds):
return True
else:
return False
else:
if
|
np.argmax(adv_softmax_preds)
|
numpy.argmax
|
# L3_drivingPatterns.py
# Team Number:
# Hardware TM:
# Software TM:
# Date:
# Code purpose:
# indicate d1 and d2 distances:
# Import Internal Programs
import L2_speed_control as sc
import L2_inverse_kinematics as inv
# Import External programs
import numpy as np
import time
def task2():
myVelocities =
|
np.array([1, 1])
|
numpy.array
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/640 # meters per pixel in x dimension
bot_left = (190,720)
top_left = (600,445)
top_right = (680, 445)
bot_right = (1120,720)
dest_bot_left = (300,720)
dest_top_left = (300,0)
dest_top_right = (1240-300,0)
dest_bot_right = (1240-300,720)
class Line():
def __init__(self):
# line detected in last iteration?
self.detected = False
# x value of the last n fits of the line
self.recent_xfitted = []
# average x values of the fitted line over the last n iterations
self.bestx = None
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# radius of curvature of the line in some units
self.radius_of_curvature = None
# distance in meters of vehicle center from the line
self.line_base_pos = None
# difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
class userdata:
def __init__(self):
self.left_fit = []
self.right_fit = []
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
binary_output =
|
np.zeros_like(scaled_sobel)
|
numpy.zeros_like
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for forked classes and functions from `tf.contrib.rnn`."""
import itertools
from absl.testing import parameterized
from magenta.contrib import rnn as contrib_rnn
import numpy as np
import tensorflow.compat.v1 as tf
rnn_cell = tf.nn.rnn_cell
tf.disable_eager_execution()
# pylint:disable=invalid-name
class RNNCellTest(tf.test.TestCase):
def testInputProjectionWrapper(self):
with self.cached_session() as sess:
with tf.variable_scope(
"root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 3])
cell = contrib_rnn.InputProjectionWrapper(
rnn_cell.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([tf.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testAttentionCellWrapperFailures(self):
with self.assertRaisesRegexp(
TypeError, contrib_rnn.ASSERT_LIKE_RNNCELL_ERROR_REGEXP):
contrib_rnn.AttentionCellWrapper(None, 0)
num_units = 8
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got 0"):
contrib_rnn.AttentionCellWrapper(
lstm_cell, 0, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got -1"):
contrib_rnn.AttentionCellWrapper(
lstm_cell, -1, state_is_tuple=state_is_tuple)
with tf.Graph().as_default():
lstm_cell = rnn_cell.BasicLSTMCell(num_units, state_is_tuple=True)
with self.assertRaisesRegexp(
ValueError, "Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: *"):
contrib_rnn.AttentionCellWrapper(
lstm_cell, 4, state_is_tuple=False)
def testAttentionCellWrapperZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
with self.cached_session() as sess:
with tf.variable_scope(
"state_is_tuple_" + str(state_is_tuple)):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = tf.zeros([batch_size, num_units], dtype=np.float32)
attn_state_zeros = tf.zeros(
[batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = tf.zeros(
[
batch_size,
num_units * 2 + attn_length * num_units + num_units
],
dtype=np.float32)
inputs = tf.zeros(
[batch_size, input_size], dtype=tf.float32)
output, state = cell(inputs, zero_state)
self.assertEqual(output.get_shape(), [batch_size, num_units])
if state_is_tuple:
self.assertEqual(len(state), 3)
self.assertEqual(len(state[0]), 2)
self.assertEqual(state[0][0].get_shape(), [batch_size, num_units])
self.assertEqual(state[0][1].get_shape(), [batch_size, num_units])
self.assertEqual(state[1].get_shape(), [batch_size, num_units])
self.assertEqual(state[2].get_shape(),
[batch_size, attn_length * num_units])
tensors = [output] + list(state)
else:
self.assertEqual(state.get_shape(), [
batch_size,
num_units * 2 + num_units + attn_length * num_units
])
tensors = [output, state]
zero_result = sum(
[tf.reduce_sum(tf.abs(x)) for x in tensors])
sess.run(tf.global_variables_initializer())
self.assertLess(sess.run(zero_result), 1e-6)
def testAttentionCellWrapperValues(self):
num_units = 8
attn_length = 16
batch_size = 3
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
with self.cached_session() as sess:
with tf.variable_scope(
"state_is_tuple_" + str(state_is_tuple)):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = tf.constant(
0.1 * np.ones([batch_size, num_units], dtype=np.float32),
dtype=tf.float32)
attn_state_zeros = tf.constant(
0.1 * np.ones(
[batch_size, attn_length * num_units], dtype=np.float32),
dtype=tf.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = tf.constant(
0.1 * np.ones(
[
batch_size,
num_units * 2 + num_units + attn_length * num_units
],
dtype=np.float32),
dtype=tf.float32)
inputs = tf.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=tf.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
concat_state = tf.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
else:
concat_state = state
sess.run(tf.global_variables_initializer())
output, state = sess.run([output, concat_state])
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertGreater(
float(np.linalg.norm((output[0, :] - output[i, :]))), 1e-6)
self.assertGreater(
float(np.linalg.norm((state[0, :] - state[i, :]))), 1e-6)
def _testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[1.068372, 0.45496, -0.678277, 0.340538],
[1.018088, 0.378983, -0.572179, 0.268591]],
dtype=np.float32)
expected_state = np.array(
[[
0.74946702, 0.34681597, 0.26474735, 1.06485605, 0.38465962,
0.11420801, 0.10272158, 0.30925757, 0.63899988, 0.7181077,
0.47534478, 0.33715725, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.14606023, 0.34370938, -0.79251152,
0.51843399
], [
0.5179342, 0.48682183, -0.25426468, 0.96810579, 0.28809637,
0.13607743, -0.11446252, 0.26792109, 0.78047138, 0.63460857,
0.49122369, 0.52007174, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 1.1362772, 0.34361625, -0.78150457,
0.70582712
]],
dtype=np.float32)
seed = 12345
tf.set_random_seed(seed)
rnn_scope = None
for state_is_tuple in [False, True]:
with tf.Session() as sess:
with tf.variable_scope(
"state_is_tuple",
reuse=state_is_tuple,
initializer=tf.glorot_uniform_initializer()):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we restore the scope of the
# RNNCells after the first use below.
if rnn_scope is not None:
(cell._scope, lstm_cell._scope) = rnn_scope # pylint: disable=protected-access,unpacking-non-sequence
zeros1 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = tf.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = tf.concat([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we store the scope of the
# first RNNCell for reuse above.
if rnn_scope is None:
rnn_scope = (cell._scope, lstm_cell._scope) # pylint: disable=protected-access
if state_is_tuple:
state = tf.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
class StackBidirectionalRNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
super().setUp()
def _createStackBidirectionalDynamicRNN(self,
use_gpu,
use_shape,
use_state_tuple,
initial_states_fw=None,
initial_states_bw=None,
scope=None):
del use_gpu
del use_state_tuple
self.layers = [2, 3]
input_size = 5
batch_size = 2
max_length = 8
initializer = tf.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = tf.placeholder(tf.int64)
self.cells_fw = [
rnn_cell.LSTMCell( # pylint:disable=g-complex-comprehension
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
self.cells_bw = [
rnn_cell.LSTMCell( # pylint:disable=g-complex-comprehension
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
inputs = max_length * [
tf.placeholder(
tf.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
inputs_c = tf.stack(inputs)
inputs_c = tf.transpose(inputs_c, [1, 0, 2])
outputs, st_fw, st_bw = contrib_rnn.stack_bidirectional_dynamic_rnn(
self.cells_fw,
self.cells_bw,
inputs_c,
initial_states_fw=initial_states_fw,
initial_states_bw=initial_states_bw,
dtype=tf.float32,
sequence_length=sequence_length,
scope=scope)
# Outputs has shape (batch_size, max_length, 2* layer[-1].
output_shape = [None, max_length, 2 * self.layers[-1]]
if use_shape:
output_shape[0] = batch_size
self.assertAllEqual(outputs.get_shape().as_list(), output_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, st_fw, st_bw, sequence_length
def _testStackBidirectionalDynamicRNN(self, use_gpu, use_shape,
use_state_tuple):
with self.session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalDynamicRNN(use_gpu, use_shape,
use_state_tuple))
tf.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward states of the first layer has
# to be the same.
# For the next layers, since the input is a concat of forward and backward
# outputs of the previous layers the symmetry is broken and the following
# states and outputs differ.
# We cannot access the intermediate values between layers but we can
# check that the forward and backward states of the first layer match.
self.assertAllClose(s_fw[0], s_bw[0])
out = np.swapaxes(out, 0, 1)
# If outputs are not concat between layers the output of the forward
# and backward would be the same but symmetric.
# Check that is not the case.
# Due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
# First sequence in batch is length=2
# Check that the time=0 forward output is not equal to time=1 backward.
self.assertNotEqual(out[0][0][0], out[1][0][3])
self.assertNotEqual(out[0][0][1], out[1][0][4])
self.assertNotEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is not equal to time=0 backward.
self.assertNotEqual(out[1][0][0], out[0][0][3])
self.assertNotEqual(out[1][0][1], out[0][0][4])
self.assertNotEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is not equal to time=2 backward.
self.assertNotEqual(out[0][1][0], out[2][1][3])
self.assertNotEqual(out[0][1][1], out[2][1][4])
self.assertNotEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is not equal to time=1 backward.
self.assertNotEqual(out[1][1][0], out[1][1][3])
self.assertNotEqual(out[1][1][1], out[1][1][4])
self.assertNotEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is not equal to time=0 backward.
self.assertNotEqual(out[2][1][0], out[0][1][3])
self.assertNotEqual(out[2][1][1], out[0][1][4])
self.assertNotEqual(out[2][1][2], out[0][1][5])
def _testStackBidirectionalDynamicRNNStates(self, use_gpu):
# Check that the states are correctly initialized.
# - Create a net and iterate for 3 states. Keep the state (state_3).
# - Reset states, and iterate for 5 steps. Last state is state_5.
# - Reset the sets to state_3 and iterate for 2 more steps,
# last state will be state_5'.
# - Check that the state_5 and state_5' (forward and backward) are the
# same for the first layer (it does not apply for the second layer since
# it has forward-backward dependencies).
with self.session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
batch_size = 2
# Create states placeholders.
initial_states_fw = [
tf.placeholder(
tf.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
initial_states_bw = [
tf.placeholder(
tf.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
# Create the net
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalDynamicRNN(
use_gpu,
use_shape=True,
use_state_tuple=False,
initial_states_fw=initial_states_fw,
initial_states_bw=initial_states_bw))
tf.global_variables_initializer().run()
# Run 3 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]}
# Initialize to empty state.
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net and run 5 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]}
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net to state_3 and run 2 more steps.
feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]}
for i, _ in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = st_3_fw[i]
feed_dict[initial_states_bw[i]] = st_3_bw[i]
_, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Check that the 3+2 and 5 first layer states.
self.assertAllEqual(st_5_fw[0], st_5p_fw[0])
self.assertAllEqual(st_5_bw[0], st_5p_bw[0])
def testBidirectionalRNN(self):
# Generate 2^3 option values
# from [True, True, True] to [False, False, False]
options = itertools.product([True, False], repeat=3)
for option in options:
self._testStackBidirectionalDynamicRNN(
use_gpu=option[0], use_shape=option[1], use_state_tuple=option[2])
# Check States.
self._testStackBidirectionalDynamicRNNStates(use_gpu=False)
self._testStackBidirectionalDynamicRNNStates(use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts with the proper scope.
tf.global_variables_initializer()
all_vars = tf.global_variables()
prefix = prefix or "stack_bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("StackRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testBidirectionalDynamicRNNScope(self):
def factory(scope):
return self._createStackBidirectionalDynamicRNN(
use_gpu=True, use_shape=True, use_state_tuple=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMBlockCellTest(tf.test.TestCase, parameterized.TestCase):
TEST_CASES = ({
"testcase_name": "Fp32",
"dtype": tf.float32,
"rtol": 1e-6,
"atol": 1e-6
}, {
"testcase_name": "Fp16",
"dtype": tf.float16,
"rtol": 8e-3,
"atol": 8e-4
})
def testNoneDimsWithDynamicRNN(self):
with self.session(use_gpu=True, graph=tf.Graph()) as sess:
batch_size = 4
num_steps = 5
input_dim = 6
cell_size = 7
cell = contrib_rnn.LSTMBlockCell(cell_size)
x = tf.placeholder(tf.float32, shape=(None, None, input_dim))
output, _ = tf.nn.dynamic_rnn(
cell, x, time_major=True, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_dim)
sess.run(output, feed)
def testLSTMBlockCell(self):
with self.session(use_gpu=True, graph=tf.Graph()) as sess:
with tf.variable_scope(
"root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[contrib_rnn.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 2]),
m1.name: 0.1 * np.ones([1, 2]),
m2.name: 0.1 * np.ones([1, 2]),
m3.name: 0.1 * np.ones([1, 2])
})
self.assertLen(res, 5)
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
# These numbers are from testBasicLSTMCell and only test c/h.
self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
def testCompatibleNames(self):
with self.session(use_gpu=True, graph=tf.Graph()):
cell = rnn_cell.LSTMCell(10)
pcell = rnn_cell.LSTMCell(10, use_peepholes=True)
inputs = [tf.zeros([4, 5])] * 6
tf.nn.static_rnn(cell, inputs, dtype=tf.float32, scope="basic")
tf.nn.static_rnn(pcell, inputs, dtype=tf.float32, scope="peephole")
basic_names = {
v.name: v.get_shape()
for v in tf.trainable_variables()
}
with self.session(use_gpu=True, graph=tf.Graph()):
cell = contrib_rnn.LSTMBlockCell(10)
pcell = contrib_rnn.LSTMBlockCell(10, use_peephole=True)
inputs = [tf.zeros([4, 5])] * 6
tf.nn.static_rnn(cell, inputs, dtype=tf.float32, scope="basic")
tf.nn.static_rnn(pcell, inputs, dtype=tf.float32, scope="peephole")
block_names = {
v.name: v.get_shape()
for v in tf.trainable_variables()
}
self.assertEqual(basic_names, block_names)
def testLSTMBasicToBlockCell(self):
with self.session(use_gpu=True) as sess:
x = tf.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = tf.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with tf.variable_scope("block", initializer=initializer):
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[contrib_rnn.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlockCellPeeping(self):
with self.session(use_gpu=True) as sess:
x = tf.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = tf.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[
rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)
for _ in range(2)
],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with tf.variable_scope("block", initializer=initializer):
m0 = tf.zeros([1, 2])
m1 = tf.zeros([1, 2])
m2 = tf.zeros([1, 2])
m3 = tf.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[contrib_rnn.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([tf.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
class LayerNormBasicLSTMCellTest(tf.test.TestCase):
# NOTE: all the values in the current test case have been calculated.
def testBasicLSTMCell(self):
with self.cached_session() as sess:
with tf.variable_scope(
"root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
c0 = tf.zeros([1, 2])
h0 = tf.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = tf.zeros([1, 2])
h1 = tf.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: contrib_rnn.LayerNormBasicLSTMCell(2)
cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([tf.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_state0_c = np.array([[-1.0, 1.0]])
expected_state0_h = np.array([[-0.38079708, 0.38079708]])
expected_state1_c =
|
np.array([[-1.0, 1.0]])
|
numpy.array
|
import unittest
from mock import MagicMock
import mock
import numpy as np
from bilby.core.likelihood import (
Likelihood,
GaussianLikelihood,
PoissonLikelihood,
StudentTLikelihood,
Analytical1DLikelihood,
ExponentialLikelihood,
AnalyticalMultidimensionalCovariantGaussian,
AnalyticalMultidimensionalBimodalCovariantGaussian,
JointLikelihood,
)
class TestLikelihoodBase(unittest.TestCase):
def setUp(self):
self.likelihood = Likelihood()
def tearDown(self):
del self.likelihood
def test_repr(self):
self.likelihood = Likelihood(parameters=["a", "b"])
expected = "Likelihood(parameters=['a', 'b'])"
self.assertEqual(expected, repr(self.likelihood))
def test_base_log_likelihood(self):
self.assertTrue(np.isnan(self.likelihood.log_likelihood()))
def test_base_noise_log_likelihood(self):
self.assertTrue(np.isnan(self.likelihood.noise_log_likelihood()))
def test_base_log_likelihood_ratio(self):
self.assertTrue(np.isnan(self.likelihood.log_likelihood_ratio()))
def test_meta_data_unset(self):
self.assertEqual(self.likelihood.meta_data, None)
def test_meta_data_set_fail(self):
with self.assertRaises(ValueError):
self.likelihood.meta_data = 10
def test_meta_data(self):
meta_data = dict(x=1, y=2)
self.likelihood.meta_data = meta_data
self.assertEqual(self.likelihood.meta_data, meta_data)
class TestAnalytical1DLikelihood(unittest.TestCase):
def setUp(self):
self.x = np.arange(start=0, stop=100, step=1)
self.y = np.arange(start=0, stop=100, step=1)
def test_func(x, parameter1, parameter2):
return parameter1 * x + parameter2
self.func = test_func
self.parameter1_value = 4
self.parameter2_value = 7
self.analytical_1d_likelihood = Analytical1DLikelihood(
x=self.x, y=self.y, func=self.func
)
self.analytical_1d_likelihood.parameters["parameter1"] = self.parameter1_value
self.analytical_1d_likelihood.parameters["parameter2"] = self.parameter2_value
def tearDown(self):
del self.x
del self.y
del self.func
del self.analytical_1d_likelihood
del self.parameter1_value
del self.parameter2_value
def test_init_x(self):
self.assertTrue(np.array_equal(self.x, self.analytical_1d_likelihood.x))
def test_set_x_to_array(self):
new_x = np.arange(start=0, stop=50, step=2)
self.analytical_1d_likelihood.x = new_x
self.assertTrue(np.array_equal(new_x, self.analytical_1d_likelihood.x))
def test_set_x_to_int(self):
new_x = 5
self.analytical_1d_likelihood.x = new_x
expected_x = np.array([new_x])
self.assertTrue(np.array_equal(expected_x, self.analytical_1d_likelihood.x))
def test_set_x_to_float(self):
new_x = 5.3
self.analytical_1d_likelihood.x = new_x
expected_x = np.array([new_x])
self.assertTrue(np.array_equal(expected_x, self.analytical_1d_likelihood.x))
def test_init_y(self):
self.assertTrue(np.array_equal(self.y, self.analytical_1d_likelihood.y))
def test_set_y_to_array(self):
new_y = np.arange(start=0, stop=50, step=2)
self.analytical_1d_likelihood.y = new_y
self.assertTrue(np.array_equal(new_y, self.analytical_1d_likelihood.y))
def test_set_y_to_int(self):
new_y = 5
self.analytical_1d_likelihood.y = new_y
expected_y = np.array([new_y])
self.assertTrue(np.array_equal(expected_y, self.analytical_1d_likelihood.y))
def test_set_y_to_float(self):
new_y = 5.3
self.analytical_1d_likelihood.y = new_y
expected_y = np.array([new_y])
self.assertTrue(np.array_equal(expected_y, self.analytical_1d_likelihood.y))
def test_init_func(self):
self.assertEqual(self.func, self.analytical_1d_likelihood.func)
def test_set_func(self):
def new_func(x):
return x
with self.assertRaises(AttributeError):
# noinspection PyPropertyAccess
self.analytical_1d_likelihood.func = new_func
def test_parameters(self):
expected_parameters = dict(
parameter1=self.parameter1_value, parameter2=self.parameter2_value
)
self.assertDictEqual(
expected_parameters, self.analytical_1d_likelihood.parameters
)
def test_n(self):
self.assertEqual(len(self.x), self.analytical_1d_likelihood.n)
def test_set_n(self):
with self.assertRaises(AttributeError):
# noinspection PyPropertyAccess
self.analytical_1d_likelihood.n = 2
def test_model_parameters(self):
sigma = 5
self.analytical_1d_likelihood.sigma = sigma
self.analytical_1d_likelihood.parameters["sigma"] = sigma
expected_model_parameters = dict(
parameter1=self.parameter1_value, parameter2=self.parameter2_value
)
self.assertDictEqual(
expected_model_parameters, self.analytical_1d_likelihood.model_parameters
)
def test_repr(self):
expected = "Analytical1DLikelihood(x={}, y={}, func={})".format(
self.x, self.y, self.func.__name__
)
self.assertEqual(expected, repr(self.analytical_1d_likelihood))
class TestGaussianLikelihood(unittest.TestCase):
def setUp(self):
self.N = 100
self.sigma = 0.1
self.x = np.linspace(0, 1, self.N)
self.y = 2 * self.x + 1 + np.random.normal(0, self.sigma, self.N)
def test_function(x, m, c):
return m * x + c
self.function = test_function
def tearDown(self):
del self.N
del self.sigma
del self.x
del self.y
del self.function
def test_known_sigma(self):
likelihood = GaussianLikelihood(self.x, self.y, self.function, self.sigma)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
likelihood.log_likelihood()
self.assertEqual(likelihood.sigma, self.sigma)
def test_known_array_sigma(self):
sigma_array = np.ones(self.N) * self.sigma
likelihood = GaussianLikelihood(self.x, self.y, self.function, sigma_array)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
likelihood.log_likelihood()
self.assertTrue(type(likelihood.sigma) == type(sigma_array))
self.assertTrue(all(likelihood.sigma == sigma_array))
def test_set_sigma_None(self):
likelihood = GaussianLikelihood(self.x, self.y, self.function, sigma=None)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
self.assertTrue(likelihood.sigma is None)
with self.assertRaises(TypeError):
likelihood.log_likelihood()
def test_sigma_float(self):
likelihood = GaussianLikelihood(self.x, self.y, self.function, sigma=None)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
likelihood.parameters["sigma"] = 1
likelihood.log_likelihood()
self.assertTrue(likelihood.sigma == 1)
def test_sigma_other(self):
likelihood = GaussianLikelihood(self.x, self.y, self.function, sigma=None)
with self.assertRaises(ValueError):
likelihood.sigma = "test"
def test_repr(self):
likelihood = GaussianLikelihood(self.x, self.y, self.function, sigma=self.sigma)
expected = "GaussianLikelihood(x={}, y={}, func={}, sigma={})".format(
self.x, self.y, self.function.__name__, self.sigma
)
self.assertEqual(expected, repr(likelihood))
class TestStudentTLikelihood(unittest.TestCase):
def setUp(self):
self.N = 100
self.nu = self.N - 2
self.sigma = 1
self.x = np.linspace(0, 1, self.N)
self.y = 2 * self.x + 1 + np.random.normal(0, self.sigma, self.N)
def test_function(x, m, c):
return m * x + c
self.function = test_function
def tearDown(self):
del self.N
del self.sigma
del self.x
del self.y
del self.function
def test_known_sigma(self):
likelihood = StudentTLikelihood(
self.x, self.y, self.function, self.nu, self.sigma
)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
likelihood.log_likelihood()
self.assertEqual(likelihood.sigma, self.sigma)
def test_set_nu_none(self):
likelihood = StudentTLikelihood(self.x, self.y, self.function, nu=None)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
self.assertTrue(likelihood.nu is None)
def test_log_likelihood_nu_none(self):
likelihood = StudentTLikelihood(self.x, self.y, self.function, nu=None)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
with self.assertRaises((ValueError, TypeError)):
# ValueError in Python2, TypeError in Python3
likelihood.log_likelihood()
def test_log_likelihood_nu_zero(self):
likelihood = StudentTLikelihood(self.x, self.y, self.function, nu=0)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
with self.assertRaises(ValueError):
likelihood.log_likelihood()
def test_log_likelihood_nu_negative(self):
likelihood = StudentTLikelihood(self.x, self.y, self.function, nu=-1)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
with self.assertRaises(ValueError):
likelihood.log_likelihood()
def test_setting_nu_positive_does_not_change_class_attribute(self):
likelihood = StudentTLikelihood(self.x, self.y, self.function, nu=None)
likelihood.parameters["m"] = 2
likelihood.parameters["c"] = 0
likelihood.parameters["nu"] = 98
self.assertTrue(likelihood.nu == 98)
def test_lam(self):
likelihood = StudentTLikelihood(self.x, self.y, self.function, nu=0, sigma=0.5)
self.assertAlmostEqual(4.0, likelihood.lam)
def test_repr(self):
nu = 0
sigma = 0.5
likelihood = StudentTLikelihood(
self.x, self.y, self.function, nu=nu, sigma=sigma
)
expected = "StudentTLikelihood(x={}, y={}, func={}, nu={}, sigma={})".format(
self.x, self.y, self.function.__name__, nu, sigma
)
self.assertEqual(expected, repr(likelihood))
class TestPoissonLikelihood(unittest.TestCase):
def setUp(self):
self.N = 100
self.mu = 5
self.x = np.linspace(0, 1, self.N)
self.y = np.random.poisson(self.mu, self.N)
self.yfloat = np.copy(self.y) * 1.0
self.yneg = np.copy(self.y)
self.yneg[0] = -1
def test_function(x, c):
return c
def test_function_array(x, c):
return np.ones(len(x)) * c
self.function = test_function
self.function_array = test_function_array
self.poisson_likelihood = PoissonLikelihood(self.x, self.y, self.function)
def tearDown(self):
del self.N
del self.mu
del self.x
del self.y
del self.yfloat
del self.yneg
del self.function
del self.function_array
del self.poisson_likelihood
def test_init_y_non_integer(self):
with self.assertRaises(ValueError):
PoissonLikelihood(self.x, self.yfloat, self.function)
def test_init__y_negative(self):
with self.assertRaises(ValueError):
PoissonLikelihood(self.x, self.yneg, self.function)
def test_neg_rate(self):
self.poisson_likelihood.parameters["c"] = -2
with self.assertRaises(ValueError):
self.poisson_likelihood.log_likelihood()
def test_neg_rate_array(self):
likelihood = PoissonLikelihood(self.x, self.y, self.function_array)
likelihood.parameters["c"] = -2
with self.assertRaises(ValueError):
likelihood.log_likelihood()
def test_init_y(self):
self.assertTrue(np.array_equal(self.y, self.poisson_likelihood.y))
def test_set_y_to_array(self):
new_y = np.arange(start=0, stop=50, step=2)
self.poisson_likelihood.y = new_y
self.assertTrue(np.array_equal(new_y, self.poisson_likelihood.y))
def test_set_y_to_positive_int(self):
new_y = 5
self.poisson_likelihood.y = new_y
expected_y = np.array([new_y])
self.assertTrue(
|
np.array_equal(expected_y, self.poisson_likelihood.y)
|
numpy.array_equal
|
import sys
import numpy as np
from itertools import combinations
from pyemto.utilities.utils import rotation_matrix
import spglib as spg
try:
from pymatgen import Lattice, Structure
from pymatgen.vis.structure_vtk import StructureVis
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.util.coord import get_angle
except ImportError:
# pymatgen has not been installed
raise ImportError('emto_input_generator requires pymatgen>=4.4.0 to be installed!')
import os
import pyemto
import pyemto.common.common as common
class EMTO:
"""This class can be used to create EMTO input files from
an arbitrary structure. What is needed as input:
-primitive lattice vectors,
-basis vectors,
-list of atomic species that occupy the basis sites.
"""
def __init__(self, folder=None, EMTOdir=None):
""" """
if folder is None:
self.folder = os.getcwd()
else:
self.folder = folder
if EMTOdir is None:
self.EMTOdir = '/home/EMTO'
else:
self.EMTOdir = EMTOdir
self.sg2ibz = {1:14, 2:14, 3:12, 4:12, 5:13, 6:12, 7:12, 8:13, 9:13, 10:12,
11:12, 12:13, 13:12, 14:12, 15:13, 16:8, 17:8, 18:8, 19:8, 20:9,
21:9, 22:11, 23:10, 24:10, 25:8, 26:8, 27:8, 28:8, 29:8, 30:8,
31:8, 32:8, 33:8, 34:8, 35:9, 36:9, 37:9, 38:9, 39:9, 40:9,
41:9, 42:11, 43:11, 44:10, 45:10, 46:10, 47:8, 48:8, 49:8, 50:8,
51:8, 52:8, 53:8, 54:8, 55:8, 56:8, 57:8, 58:8, 59:8, 60:8,
61:8, 62:8, 63:9, 64:9, 65:9, 66:9, 67:9, 68:9, 69:11, 70:11,
71:10, 72:10, 73:10, 74:10, 75:5, 76:5, 77:5, 78:5, 79:6, 80:6,
81:5, 82:6, 83:5, 84:5, 85:5, 86:5, 87:6, 88:6, 89:5, 90:5,
91:5, 92:5, 93:5, 94:5, 95:5, 96:5, 97:6, 98:6, 99:5, 100:5,
101:5, 102:5, 103:5, 104:5, 105:5, 106:5, 107:6, 108:6, 109:6, 110:6,
111:5, 112:5, 113:5, 114:5, 115:5, 116:5, 117:5, 118:5, 119:6, 120:6,
121:6, 122:6, 123:5, 124:5, 125:5, 126:5, 127:5, 128:5, 129:5, 130:5,
131:5, 132:5, 133:5, 134:5, 135:5, 136:5, 137:5, 138:5, 139:6, 140:6,
141:6, 142:6, 143:4, 144:4, 145:4, 146:7, 147:4, 148:7, 149:4, 150:4,
151:4, 152:4, 153:4, 154:4, 155:7, 156:4, 157:4, 158:4, 159:4, 160:7,
161:7, 162:4, 163:4, 164:4, 165:4, 166:7, 167:7, 168:4, 169:4, 170:4,
171:4, 172:4, 173:4, 174:4, 175:4, 176:4, 177:4, 178:4, 179:4, 180:4,
181:4, 182:4, 183:4, 184:4, 185:4, 186:4, 187:4, 188:4, 189:4, 190:4,
191:4, 192:4, 193:4, 194:4, 195:1, 196:2, 197:3, 198:1, 199:3, 200:1,
201:1, 202:2, 203:2, 204:3, 205:1, 206:3, 207:1, 208:1, 209:2, 210:2,
211:3, 212:1, 213:1, 214:3, 215:1, 216:2, 217:3, 218:1, 219:2, 220:3,
221:1, 222:1, 223:1, 224:1, 225:2, 226:2, 227:2, 228:2, 229:3, 230:3}
self.sg2bl = {1:'simple triclinic', 2:'simple triclinic',
3:'simple monoclinic', 4:'simple monoclinic',
5:'base-centered monoclinic', 6:'simple monoclinic',
7:'simple monoclinic', 8:'base-centered monoclinic',
9:'base-centered monoclinic', 10:'simple monoclinic',
11:'simple monoclinic', 12:'base-centered monoclinic',
13:'simple monoclinic', 14:'simple monoclinic',
15:'base-centered monoclinic', 16:'simple orthorhombic',
17:'simple orthorhombic', 18:'simple orthorhombic',
19:'simple orthorhombic', 20:'base-centered orthorhombic',
21:'base-centered orthorhombic', 22:'face-centered orthorhombic',
23:'body-centered orthorhombic', 24:'body-centered orthorhombic',
25:'simple orthorhombic', 26:'simple orthorhombic',
27:'simple orthorhombic', 28:'simple orthorhombic',
29:'simple orthorhombic', 30:'simple orthorhombic',
31:'simple orthorhombic', 32:'simple orthorhombic',
33:'simple orthorhombic', 34:'simple orthorhombic',
35:'base-centered orthorhombic', 36:'base-centered orthorhombic',
37:'base-centered orthorhombic', 38:'base-centered orthorhombic',
39:'base-centered orthorhombic', 40:'base-centered orthorhombic',
41:'base-centered orthorhombic', 42:'face-centered orthorhombic',
43:'face-centered orthorhombic', 44:'body-centered orthorhombic',
45:'body-centered orthorhombic', 46:'body-centered orthorhombic',
47:'simple orthorhombic', 48:'simple orthorhombic',
49:'simple orthorhombic', 50:'simple orthorhombic',
51:'simple orthorhombic', 52:'simple orthorhombic',
53:'simple orthorhombic', 54:'simple orthorhombic',
55:'simple orthorhombic', 56:'simple orthorhombic',
57:'simple orthorhombic', 58:'simple orthorhombic',
59:'simple orthorhombic', 60:'simple orthorhombic',
61:'simple orthorhombic', 62:'simple orthorhombic',
63:'base-centered orthorhombic', 64:'base-centered orthorhombic',
65:'base-centered orthorhombic', 66:'base-centered orthorhombic',
67:'base-centered orthorhombic', 68:'base-centered orthorhombic',
69:'face-centered orthorhombic', 70:'face-centered orthorhombic',
71:'body-centered orthorhombic', 72:'body-centered orthorhombic',
73:'body-centered orthorhombic', 74:'body-centered orthorhombic',
75:'simple tetragonal', 76:'simple tetragonal',
77:'simple tetragonal', 78:'simple tetragonal',
79:'body-centered tetragonal', 80:'body-centered tetragonal',
81:'simple tetragonal', 82:'body-centered tetragonal',
83:'simple tetragonal', 84:'simple tetragonal',
85:'simple tetragonal', 86:'simple tetragonal',
87:'body-centered tetragonal', 88:'body-centered tetragonal',
89:'simple tetragonal', 90:'simple tetragonal',
91:'simple tetragonal', 92:'simple tetragonal',
93:'simple tetragonal', 94:'simple tetragonal',
95:'simple tetragonal', 96:'simple tetragonal',
97:'body-centered tetragonal', 98:'body-centered tetragonal',
99:'simple tetragonal', 100:'simple tetragonal',
101:'simple tetragonal', 102:'simple tetragonal',
103:'simple tetragonal', 104:'simple tetragonal',
105:'simple tetragonal', 106:'simple tetragonal',
107:'body-centered tetragonal', 108:'body-centered tetragonal',
109:'body-centered tetragonal', 110:'body-centered tetragonal',
111:'simple tetragonal', 112:'simple tetragonal',
113:'simple tetragonal', 114:'simple tetragonal',
115:'simple tetragonal', 116:'simple tetragonal',
117:'simple tetragonal', 118:'simple tetragonal',
119:'body-centered tetragonal', 120:'body-centered tetragonal',
121:'body-centered tetragonal', 122:'body-centered tetragonal',
123:'simple tetragonal', 124:'simple tetragonal',
125:'simple tetragonal', 126:'simple tetragonal',
127:'simple tetragonal', 128:'simple tetragonal',
129:'simple tetragonal', 130:'simple tetragonal',
131:'simple tetragonal', 132:'simple tetragonal',
133:'simple tetragonal', 134:'simple tetragonal',
135:'simple tetragonal', 136:'simple tetragonal',
137:'simple tetragonal', 138:'simple tetragonal',
139:'body-centered tetragonal', 140:'body-centered tetragonal',
141:'body-centered tetragonal', 142:'body-centered tetragonal',
143:'hexagonal', 144:'hexagonal',
145:'hexagonal', 146:'rhombohedral',
147:'hexagonal', 148:'rhombohedral',
149:'hexagonal', 150:'hexagonal',
151:'hexagonal', 152:'hexagonal',
153:'hexagonal', 154:'hexagonal',
155:'rhombohedral', 156:'hexagonal',
157:'hexagonal', 158:'hexagonal',
159:'hexagonal', 160:'rhombohedral',
161:'rhombohedral', 162:'hexagonal',
163:'hexagonal', 164:'hexagonal',
165:'hexagonal', 166:'rhombohedral',
167:'rhombohedral', 168:'hexagonal',
169:'hexagonal', 170:'hexagonal',
171:'hexagonal', 172:'hexagonal',
173:'hexagonal', 174:'hexagonal',
175:'hexagonal', 176:'hexagonal',
177:'hexagonal', 178:'hexagonal',
179:'hexagonal', 180:'hexagonal',
181:'hexagonal', 182:'hexagonal',
183:'hexagonal', 184:'hexagonal',
185:'hexagonal', 186:'hexagonal',
187:'hexagonal', 188:'hexagonal',
189:'hexagonal', 190:'hexagonal',
191:'hexagonal', 192:'hexagonal',
193:'hexagonal', 194:'hexagonal',
195:'simple cubic', 196:'face-centered cubic',
197:'body-centered cubic', 198:'simple cubic',
199:'body-centered cubic', 200:'simple cubic',
201:'simple cubic', 202:'face-centered cubic',
203:'face-centered cubic', 204:'body-centered cubic',
205:'simple cubic', 206:'body-centered cubic',
207:'simple cubic', 208:'simple cubic',
209:'face-centered cubic', 210:'face-centered cubic',
211:'body-centered cubic', 212:'simple cubic',
213:'simple cubic', 214:'body-centered cubic',
215:'simple cubic', 216:'face-centered cubic',
217:'body-centered cubic', 218:'simple cubic',
219:'face-centered cubic', 220:'body-centered cubic',
221:'simple cubic', 222:'simple cubic',
223:'simple cubic', 224:'simple cubic',
225:'face-centered cubic', 226:'face-centered cubic',
227:'face-centered cubic', 228:'face-centered cubic',
229:'body-centered cubic', 230:'body-centered cubic'}
# BMDL, KSTR, SHAPE, KGRN and KFCD class instances
self.input_system = pyemto.System(folder=self.folder, EMTOdir=self.EMTOdir)
#
self.fit_angle_tol = 5e-6
self.fit_norm_ratio_tol = 5e-6
return
def calc_ws_radius(self, struct):
bohr2angst = 0.52917721
vol_unit = struct.volume/struct.num_sites
sws = (3*vol_unit/4.0/np.pi)**(1.0/3)/bohr2angst
return sws
def make_basis_array(self, struct):
"""Returns a 2D numpy array of the basis atom coordinates
in !!Cartesian!! coordinates.
"""
len_basis = struct.num_sites
emto_basis = np.zeros((len_basis, 3))
for i in range(len_basis):
emto_basis[i, :] = struct.sites[i].coords
return emto_basis
def make_sites_array(self, struct):
len_basis = struct.num_sites
emto_sites = []
for i in range(len_basis):
emto_sites.append(struct.sites[i].specie.number)
return emto_sites
def make_cpa_sites_array(self, struct):
len_basis = struct.num_sites
self.atoms_cpa = []
self.concs_cpa = []
self.splts_cpa = []
self.fixs_cpa = []
for i in range(len_basis):
atom_number = struct.sites[i].specie.number
for j in range(len(self.pmg_species)):
if atom_number == self.pmg_species[j]:
self.atoms_cpa.append(self.species[j])
self.concs_cpa.append(self.concs[j])
self.splts_cpa.append(self.splts[j])
self.fixs_cpa.append(self.fixs[j])
break
def get_equivalent_sites(self):
"""Find all the sites that have exactly the same species,
concentrations, and magnetic moments"""
splt_tol = 1e-6
conc_tol = 1e-6
species_sorted = []
splts_sorted = []
concs_sorted = []
for i in range(len(self.species)):
tmp1 = []
tmp2 = []
tmp3 = []
ind_sorted = np.argsort(self.species[i])
for ind in ind_sorted:
tmp1.append(self.species[i][ind])
tmp2.append(self.splts[i][ind])
tmp3.append(self.concs[i][ind])
species_sorted.append(tmp1)
splts_sorted.append(tmp2)
concs_sorted.append(tmp3)
eqv_sites = np.zeros((len(species_sorted), len(species_sorted)), dtype=np.int) + 9999
for i in range(len(species_sorted)-1):
for j in range(i+1, len(species_sorted)):
eqv_sites[i,j] = 1
if len(species_sorted[i]) != len(species_sorted[j]):
# Sites i and j contain different amound of atoms.
# For now, take them to be non-equivalent, although
# they could still be equivalent in the case that
# some element has been split into two or more parts
# concentration-wise (whole and the parts should have
# identical magnetic moments).
eqv_sites[i, j] = 0
else:
for a1, a2, splt1, splt2, conc1, conc2 in zip(species_sorted[i], species_sorted[j],
splts_sorted[i], splts_sorted[j], concs_sorted[i], concs_sorted[j]):
if a1 != a2 or np.abs(splt1 - splt2) > splt_tol or np.abs(conc1 - conc2) > conc_tol:
# Some pair of atoms (in the sorted lists) were not
# the same => sites i and j are not equivalent.
eqv_sites[i, j] = 0
break
output_sites = np.ones(len(species_sorted), dtype=np.int) * 9999
next_available = 1
for i in range(len(species_sorted)-1):
if output_sites[i] == 9999:
output_sites[i] = next_available
next_available += 1
for j in range(i+1, len(species_sorted)):
if eqv_sites[i, j] == 1:
output_sites[j] = output_sites[i]
if output_sites[-1] == 9999:
output_sites[-1] = next_available
return output_sites
def prepare_input_files(self, prims=None, basis=None, latpath=None,
coords_are_cartesian=False, latname=None,
species=None, find_primitive=True,
concs=None, splts=None, its=None, ws_wsts=None,
make_supercell=None, fixs=None,
**kwargs):
if prims is None:
sys.exit('EMTO.init_structure(): \'prims\' has to be given!')
if basis is None:
sys.exit('EMTO.init_structure(): \'basis\' has to be given!')
if latpath is None:
self.latpath = os.getcwd()
else:
self.latpath = latpath
if latname is None:
self.latname = 'structure'
else:
self.latname = latname
self.prims = np.array(prims)
self.basis = np.array(basis)
self.len_basis = len(self.basis[:, 0])
if species is None:
sys.exit('EMTO.init_structure(): \'species\' has to be given!')
else:
self.species = []
for i in range(len(species)):
if isinstance(species[i], list):
tmp = []
for j in range(len(species[i])):
tmp.append(species[i][j])
self.species.append(tmp)
else:
self.species.append([species[i]])
if splts is None:
# Assume a zero moments array
self.splts = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append(0.0)
self.splts.append(tmp)
else:
self.splts.append([0.0])
else:
self.splts = []
for i in range(len(splts)):
if isinstance(splts[i], list):
tmp = []
for j in range(len(splts[i])):
tmp.append(splts[i][j])
self.splts.append(tmp)
else:
self.splts.append([splts[i]])
if fixs is None:
# Assume a zero moments array
self.fixs = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append('N')
self.fixs.append(tmp)
else:
self.fixs.append(['N'])
else:
self.fixs = []
for i in range(len(fixs)):
if isinstance(fixs[i], list):
tmp = []
for j in range(len(fixs[i])):
tmp.append(fixs[i][j])
self.fixs.append(tmp)
else:
self.fixs.append([fixs[i]])
if concs is None:
# Assume a zero moments array
self.concs = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append(1.0/len(self.species[i]))
self.concs.append(tmp)
else:
self.concs.append([1.0])
else:
self.concs = []
for i in range(len(concs)):
if isinstance(concs[i], list):
tmp = []
tmp_sum = 0.0
for j in range(len(concs[i])):
tmp.append(concs[i][j])
tmp_sum += concs[i][j]
print(tmp_sum)
if tmp_sum < 1.1:
if np.abs(tmp_sum - 1.0) > 1.e-6:
sys.exit('Concentrations {0} for site {1} do not add up to 1.0!!!'.format(concs[i], i+1))
else:
if np.abs(tmp_sum - 100.0) > 1.e-3:
sys.exit('Concentrations {0} for site {1} do not add up to 100!!!'.format(concs[i], i+1))
self.concs.append(tmp)
else:
self.concs.append([concs[i]])
# Check that all species, concs, and splts arrays have the same dimensions
for a, b in combinations([self.basis, self.species, self.concs, self.splts, self.fixs], 2):
if len(a) != len(b):
print(a, 'len = ', len(a))
print(b, 'len = ', len(b))
sys.exit('The above input arrays have inconsistent lengths!!!')
for a, b in combinations([self.species, self.concs, self.splts, self.fixs], 2):
for sublist1, sublist2 in zip(a, b):
if len(sublist1) != len(sublist2):
print(sublist1, 'len = ', len(sublist1))
print(sublist2, 'len = ', len(sublist2))
sys.exit('The above input array elements have inconsistent lengths!!!')
self.find_primitive = find_primitive
if self.find_primitive:
self.pmg_species = self.get_equivalent_sites()
else:
self.pmg_species = np.linspace(1, len(self.species), len(self.species), dtype=np.int)
#
self.coords_are_cartesian = coords_are_cartesian
self.ibz = None
self.make_supercell = make_supercell
#
self.pmg_input_lattice = Lattice(self.prims)
self.pmg_input_struct = Structure(self.pmg_input_lattice, self.pmg_species, self.basis,
coords_are_cartesian=self.coords_are_cartesian)
#
if self.make_supercell is not None:
self.pmg_input_struct.make_supercell(self.make_supercell)
#
self.sws = self.calc_ws_radius(self.pmg_input_struct)
#
self.finder = SpacegroupAnalyzer(self.pmg_input_struct, symprec=0.0001, angle_tolerance=0.0001)
self.stm = StructureMatcher(ltol=0.001, stol=0.001, angle_tol=0.001, attempt_supercell=True)
#
print("Input structure information:")
print(self.pmg_input_struct)
print("Volume: ", self.pmg_input_struct.volume)
print("Lattice vectors:")
print(self.pmg_input_struct.lattice.matrix)
print("")
#
# spglib
spg_cell = (
self.pmg_input_lattice.matrix,
self.pmg_input_struct.frac_coords,
self.pmg_species
)
self.spg_space_group = spg.get_spacegroup(spg_cell)
self.spg_space_group_number = int(self.spg_space_group.split()[-1].lstrip('(').rstrip(')'))
self.spg_space_group_symbol = self.spg_space_group
self.spg_prim_lat, self.spg_prim_pos, self.spg_prim_species = spg.standardize_cell(spg_cell,
to_primitive=True)
self.prim_struct = Structure(Lattice(self.spg_prim_lat), self.spg_prim_species, self.spg_prim_pos)
self.spg_ibz = self.sg2ibz[self.spg_space_group_number]
self.ibz = self.spg_ibz
mesh = [kwargs['nkx'], kwargs['nky'], kwargs['nkz']]
#print()
#print('#'*60)
mapping, grid = spg.get_ir_reciprocal_mesh(mesh, spg_cell, is_time_reversal=True, is_shift=(0, 0, 0))
uniques, counts = np.unique(mapping, return_counts=True)
all_weights = []
kpoints = []
weights = []
for xx in mapping:
all_weights.append(counts[np.argwhere(uniques == xx).flatten()[0]])
for xx, yy in zip(uniques, counts):
kpoints.append(grid[np.argwhere(mapping == xx).flatten()[0]])
weights.append(yy)
#for xx, yy, zz in zip(mapping, grid, all_weights):
# print(xx, yy, zz)
#print()
#for kp, ww in zip(kpoints, weights):
# print(kp, ww)
#print()
#print('NKVEC = ', len(kpoints))
#print('#'*60)
#print()
#print(spg_prim_pos)
#print(spg_prim_species)
#
#print("Detected standard conventional structure:")
#print(self.conv_struct)
#print("Volume: ",self.conv_struct.volume)
#print("Lattice vectors:")
#print(self.conv_struct.lattice.matrix)
#print("")
print("Detected standardized structure:")
print(self.prim_struct)
print("Volume: ", self.prim_struct.volume)
print("Lattice vectors:")
print(self.prim_struct.lattice.matrix)
print("")
#
self.primaa = self.prim_struct.lattice.matrix[0, :]
self.primbb = self.prim_struct.lattice.matrix[1, :]
self.primcc = self.prim_struct.lattice.matrix[2, :]
self.output_basis = self.make_basis_array(self.prim_struct)
# Below we calculate the transformation that maps
# self.primaX to lattice vectors used by EMTO.
# This transform depends on the type of the Bravais lattice,
# so each case must be treated separately.
if self.spg_ibz == 1:
norm_tmp = np.linalg.norm(self.primaa)
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1, 0, 0])
self.emto_primb = np.array([0, 1, 0])
self.emto_primc = np.array([0, 0, 1])
self.emto_basis = self.output_basis
elif self.spg_ibz == 2:
norm_tmp = 2*self.primaa[1]
self.output_prima = self.primcc/norm_tmp
self.output_primb = self.primaa/norm_tmp
self.output_primc = self.primbb/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, 0.5, 0])
self.emto_primb = np.array([0, 0.5, 0.5])
self.emto_primc = np.array([0.5, 0, 0.5])
self.emto_basis = self.output_basis
elif self.spg_ibz == 3:
norm_tmp = 2*self.primaa[1]
self.output_prima = self.primcc/norm_tmp
self.output_primb = self.primaa/norm_tmp
self.output_primc = self.primbb/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, 0.5, -0.5])
self.emto_primb = np.array([-0.5, 0.5, 0.5])
self.emto_primc = np.array([0.5, -0.5, 0.5])
self.emto_basis = self.output_basis
elif self.spg_ibz == 4:
rot1 = rotation_matrix([0.0, 0.0, 1.0], 0./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])
self.output_boa = 0.0
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([1., 0, 0])
self.emto_primb = np.array([-0.5, np.sqrt(3.)/2, 0])
self.emto_primc = np.array([0., 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 5:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1.0, 0.0, 0.0])
self.emto_primb = np.array([0.0, 1.0, 0.0])
self.emto_primc = np.array([0.0, 0.0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 6:
self.output_prima = self.primbb
self.output_primb = self.primcc
self.output_primc = self.primaa
# Apply transformation on the basis atoms
self.output_basis = self.output_basis
self.output_boa = 0.0
self.output_coa = 2*self.output_prima[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, -0.5, self.output_coa/2])
self.emto_primb = np.array([0.5, 0.5, -self.output_coa/2])
self.emto_primc = np.array([-0.5, 0.5, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 7:
alpha = self.prim_struct.lattice.alpha
kulma = np.arctan((self.primaa[0]+self.primbb[0]+self.primcc[0])/
(self.primaa[2]+self.primbb[2]+self.primcc[2]))
rot1 = rotation_matrix([0.0, -1.0, 0.0], kulma)
rot2 = np.array([[-np.sqrt(3.0)/2, -0.5, 0.0],
[0.5, -np.sqrt(3.0)/2, 0.0],
[0.0, 0.0, 1.0]])
self.output_prima = np.dot(rot2, np.dot(rot1, self.primaa))
self.output_primb = np.dot(rot2,
|
np.dot(rot1, self.primbb)
|
numpy.dot
|
import pygame
import quaternion
import numpy as np
from operator import itemgetter
class Node:
"""A node is an edge of the cuboid"""
def __init__(self, coords, color):
self.x = coords[0]
self.y = coords[1]
self.z = coords[2]
self.color = color
class Face:
"""A face of the cuboid is defined using the indices of four nodes"""
def __init__(self, nodeIdxs, color):
self.nodeIdxs = nodeIdxs
self.color = color
class Cuboid:
"""The cuboid"""
def __init__(self, quaternion):
self.nodes = []
self.faces = []
self.q = quaternion
def set_nodes(self, nodes):
self.nodes = nodes
def set_faces(self, faces):
self.faces = faces
def set_quaternion(self, q):
self.q = q
def rotate_quaternion(self, w, dt):
self.q = dt/2 * self.q * np.quaternion(0, w[0], w[1], w[2]) + self.q
def rotate_point(self, point):
return quaternion.rotate_vectors(self.q, point)
def convert_to_computer_frame(self, point):
computerFrameChangeMatrix = np.array([[-1, 0, 0], [0, 0, -1], [0, -1, 0]])
return np.matmul(computerFrameChangeMatrix, point)
def get_euler_attitude(self):
def _rad2deg(rad):
return rad / np.pi * 180
m = quaternion.as_rotation_matrix(self.q)
test = -m[2, 0]
if test > 0.99999:
yaw = 0
pitch = np.pi / 2
roll = np.arctan2(m[0, 1], m[0, 2])
elif test < -0.99999:
yaw = 0
pitch = -np.pi / 2
roll =
|
np.arctan2(-m[0, 1], -m[0, 2])
|
numpy.arctan2
|
import tensorflow as tf
import numpy as np
from scipy import signal
from scipy.ndimage import gaussian_filter
from PIL import Image, ImageDraw
import random
import glob, os
import csv
from multiprocessing import Pool
import subprocess
import time
width = 512
height = 512
scale = 2
np.random.seed(os.getpid() + int(time.time()))
random.seed(os.getpid() + int(time.time()))
class BaseData:
def __init__(self):
self.load_idmap()
def load_idmap(self):
self.glyph_id = {}
self.glyphs = {}
self.glyph_type = {}
self.glyph_id[''] = 0
self.glyphs[0] = ''
with open(os.path.join('data','codepoints.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
codehex = row[1]
if len(codehex) > 7:
code = eval('"' + ''.join(['\\u' + codehex[i*4:i*4+4] for i in range(len(codehex) // 4)]) + '"')
else:
code = chr(int(codehex, 16))
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
with open(os.path.join('data','id_map.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
code = bytes.fromhex(row[2]).decode()
if code in self.glyph_id:
k = self.glyph_id[code]
else:
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
k = i
self.glyph_type[k] = int(row[3])
self.id_count = len(self.glyph_id)
def sub_load(args):
exe = os.path.join('data','load_font','load_font.exe')
if not os.path.exists(exe):
exe = os.path.join('data','load_font','load_font')
proc = subprocess.Popen([
exe,
args[0],
'128',
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret = {}
for c in args[1]:
if len(c) == 1:
charbuf = c.encode("utf-32-le")
proc.stdin.write(charbuf[:4])
proc.stdin.flush()
result = proc.stdout.read(32)
code = result[:4]
rows = int.from_bytes(result[4:8], 'little')
width = int.from_bytes(result[8:12], 'little')
boundingWidth = int.from_bytes(result[12:16], 'little', signed=True)
boundingHeight = int.from_bytes(result[16:20], 'little', signed=True)
horiBearingX = int.from_bytes(result[20:24], 'little', signed=True)
horiBearingY = int.from_bytes(result[24:28], 'little', signed=True)
horiAdvance = int.from_bytes(result[28:32], 'little', signed=True)
if rows * width == 0:
continue
assert(charbuf == code)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
horiBearingX = horiBearingX / 64
horiBearingY = horiBearingY / 64
horiAdvance = horiAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': horiAdvance,
'image': img,
}
}
result = proc.stdout.read(28)
rows = int.from_bytes(result[:4], 'little')
width = int.from_bytes(result[4:8], 'little')
boundingWidth = int.from_bytes(result[8:12], 'little', signed=True)
boundingHeight = int.from_bytes(result[12:16], 'little', signed=True)
vertBearingX = int.from_bytes(result[16:20], 'little', signed=True)
vertBearingY = int.from_bytes(result[20:24], 'little', signed=True)
vertAdvance = int.from_bytes(result[24:28], 'little', signed=True)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
vertBearingX = vertBearingX / 64
vertBearingY = vertBearingY / 64
vertAdvance = vertAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'vertBearingX': vertBearingX,
'vertBearingY': vertBearingY,
'vertAdvance': vertAdvance,
'image': img,
}
ret[(args[0],c)] = value
else:
pass
proc.stdin.close()
return ret
def sub_load_image(path):
dirnames = glob.glob(os.path.join(path, '*'))
ret = {}
for d in dirnames:
c_code = os.path.basename(d)
char = str(bytes.fromhex(c_code), 'utf-8')
count = 0
for f in glob.glob(os.path.join(d, '*.png')):
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%count,char)] = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': 96.0,
'image': img,
},
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
}
count += 1
vert_imgs = glob.glob(os.path.join(d, 'vert', '*.png'))
if 0 < len(vert_imgs) <= count:
for i in range(count):
f = vert_imgs[i % len(vert_imgs)]
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%i,char)]['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
elif 0 < len(vert_imgs):
vcount = 0
for f in vert_imgs:
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%vcount,char)] = {
'horizontal': ret[('hand%06d'%(vcount % count),char)]['horizontal'],
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': vertBearingY,
'vertBearingY': vertBearingX,
'vertAdvance': 96.0,
'image': img,
}
}
vcount += 1
return ret
def gaussian_kernel(kernlen=7, xstd=1., ystd=1.):
gkern1dx = signal.gaussian(kernlen, std=xstd).reshape(kernlen, 1)
gkern1dy = signal.gaussian(kernlen, std=ystd).reshape(kernlen, 1)
gkern2d = np.outer(gkern1dy, gkern1dx)
return gkern2d
def apply_random_filter(images):
p = np.random.uniform(0., 1.)
if p < 0.25:
sigma = np.random.uniform(0., 1.75)
return gaussian_filter(images, sigma=sigma)
if p < 0.5:
sigma = np.random.uniform(0., 6.)
gauss = gaussian_filter(images, sigma=sigma)
gain = np.random.uniform(0., 5.)
return (1 + gain) * images - gain * gauss
return images
def is_Font_match(font, target):
if target.startswith('hand'):
return font.startswith('hand')
else:
return font == target
class FontData(BaseData):
def __init__(self):
super().__init__()
self.img_cache = {}
print('loading handwrite image')
self.img_cache.update(sub_load_image(os.path.join('data','handwritten')))
print('loading enfont')
enfont_files = sorted(glob.glob(os.path.join('data','enfont','*.ttf')) + glob.glob(os.path.join('data','enfont','*.otf')))
en_glyphs = [self.glyphs[key] for key in self.glyphs.keys() if self.glyph_type.get(key,-1) in [0,1,2,6]]
items = [(f, en_glyphs) for f in enfont_files]
total = len(enfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
print('loading jpfont')
jpfont_files = sorted(glob.glob(os.path.join('data','jpfont','*.ttf')) + glob.glob(os.path.join('data','jpfont','*.otf')))
items = [(f, list(self.glyphs.values())) for f in jpfont_files]
total = len(jpfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
for key in self.img_cache:
i = self.glyph_id[key[1]]
if i not in self.glyph_type:
self.glyph_type[i] = type_count_max
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
gtype_count = [0 for _ in range(type_count_max)]
type_count = [0 for _ in range(type_count_max)]
for key in self.img_cache:
t = self.glyph_type[self.glyph_id[key[1]]]
type_count[t] += 1
for k in self.glyph_type:
gtype_count[self.glyph_type[k]] += 1
self.image_keys = list(self.img_cache.keys())
self.test_keys = self.get_test_keys()
self.train_keys = self.get_train_keys()
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1, 1.0]
self.prob_map = [p/t for p,t in zip(self.prob_map, type_count)]
self.random_probs_train = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.random_probs_test = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_kanji = [0, 0, 0, 0, 0, 1.0, 0, 0, 1.0, 1.0, 0.5, 0]
self.prob_map_kanji = [p/t for p,t in zip(self.prob_map_kanji, type_count)]
self.kanji_probs_train = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.kanji_probs_test = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_num = [1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_num = [p/t for p,t in zip(self.prob_map_num, type_count)]
self.num_probs_train = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.num_probs_test = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_alpha = [0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_alpha = [p/t for p,t in zip(self.prob_map_alpha, type_count)]
self.alpha_probs_train = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.alpha_probs_test = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_hira = [0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_hira = [p/t for p,t in zip(self.prob_map_hira, type_count)]
self.hira_probs_train = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.hira_probs_test = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
self.train_keys_num = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.train_num_fonts = list(set([key[0] for key in self.train_keys_num]))
self.test_keys_num = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.test_num_fonts = list(set([key[0] for key in self.test_keys_num]))
self.train_keys_capital = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.train_capital_fonts = list(set([key[0] for key in self.train_keys_capital]))
self.test_keys_capital = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.test_capital_fonts = list(set([key[0] for key in self.test_keys_capital]))
self.train_keys_small = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.train_small_fonts = list(set([key[0] for key in self.train_keys_small]))
self.test_keys_small = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.test_small_fonts = list(set([key[0] for key in self.test_keys_small]))
self.train_keys_alpha = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.train_alpha_fonts = list(set([key[0] for key in self.train_keys_alpha]))
self.test_keys_alpha = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.test_alpha_fonts = list(set([key[0] for key in self.test_keys_alpha]))
self.train_keys_jp = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.test_keys_jp = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.train_jp_fonts = list(set([key[0] for key in self.train_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_jp_fonts])
self.train_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_jp_fonts]
self.test_jp_fonts = list(set([key[0] for key in self.test_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_jp_fonts])
self.test_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_jp_fonts]
self.train_keys_hira = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.test_keys_hira = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.train_hira_fonts = list(set([key[0] for key in self.train_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_hira_fonts])
self.train_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_hira_fonts]
self.test_hira_fonts = list(set([key[0] for key in self.test_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_hira_fonts])
self.test_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_hira_fonts]
self.train_keys_jpnum = [x for x in self.train_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.train_jp_fonts)]
self.test_keys_jpnum = [x for x in self.test_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.test_jp_fonts)]
self.train_jpnum_fonts = list(set([key[0] for key in self.train_keys_jpnum]))
self.train_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.train_jpnum_fonts]
self.test_jpnum_fonts = list(set([key[0] for key in self.test_keys_jpnum]))
self.test_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.test_jpnum_fonts]
self.prob_map_clustering = [
gtype_count[0] / type_count[0],
gtype_count[1] / type_count[1],
gtype_count[2] / type_count[2],
gtype_count[3] / type_count[3],
gtype_count[4] / type_count[4],
gtype_count[5] / type_count[5],
gtype_count[6] / type_count[6],
0.,
0.,
0.,
0.,
0.
]
self.random_background = glob.glob(os.path.join('data','background','*'))
self.max_std = 8.0
self.min_ker = 4
def get_test_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
test_keys = [k for k in keys if fontname(k[0]).startswith('Noto')]
return test_keys
def get_train_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
train_keys = [k for k in keys if not fontname(k[0]).startswith('Noto')]
return train_keys
def load_background_images(self):
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency (http://stackoverflow.com/a/1963146)
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)
alpha = im.convert('RGBA').getchannel('A')
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
# (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
im_file = random.choice(self.random_background)
im = Image.open(im_file)
im = remove_transparency(im).convert('RGB')
scale_min = max(width / im.width, height / im.height)
scale_max = max(scale_min + 0.5, 1.5)
s = np.random.uniform(scale_min, scale_max)
im = im.resize((int(im.width * s)+1, int(im.height * s)+1))
x1 = np.random.randint(0, im.width - width)
y1 = np.random.randint(0, im.height - height)
im_crop = im.crop((x1, y1, x1 + width, y1 + height))
img = np.asarray(im_crop).astype(np.float32)
img = img / 128. - 1.
if np.random.uniform() < 0.5:
img = img[::-1,:,:]
if np.random.uniform() < 0.5:
img = img[:,::-1,:]
brightness = np.random.uniform(-1.0, 1.0)
brightness = np.array([brightness,brightness,brightness])
img += brightness[None,None,:]
contrast = np.random.uniform(0.2, 1.8)
contrast = np.array([contrast,contrast,contrast])
img = img * contrast[None,None,:]
img = np.clip(img, -1.0, 1.0)
return img
def tateyokotext_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] == 0 else 0. for key in keys]
selection2 = [key for key in random.choices(keys, k=max_count*2, weights=probs2)]
base_line = width - text_size // 2
line_space = int(text_size * random.uniform(1.05, 2.0))
line_start = 0
line_end = 0
isnum = -1
i = 0
for key in selection:
if isnum < 0 or isnum > 1:
if np.random.uniform() < 0.1:
isnum = 0
else:
isnum = -1
if isnum < 0:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
horiBearingX = 0
else:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
key = selection2[i]
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
vertBearingX = -text_size * 0.5
vertBearingY = 0
vertAdvance = text_size
if line_end + vertAdvance >= height:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
base_line -= line_space
if base_line - text_size / 2 < 0:
break
line_start = 0
line_end = 0
if isnum >= 0:
t = (line_end + vertBearingY + text_size * 0.75 - horiBearingY) / height
else:
t = (line_end + vertBearingY) / height
if isnum > 0:
l = (base_line + horiBearingX) / width
else:
l = (base_line + vertBearingX + horiBearingX) / width
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
if isnum > 0:
l = int(np.clip(base_line + horiBearingX, 0, width - w))
else:
l = int(np.clip(base_line + vertBearingX + horiBearingX, 0, width - w))
if isnum >= 0:
t = int(np.clip(line_end + vertBearingY + text_size * 0.75 - horiBearingY, 0, height - h))
else:
t = int(np.clip(line_end + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
if isnum != 0:
line_end += vertAdvance
if isnum >= 0:
isnum += 1
i += 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def yoko_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
line_space = int(text_size * random.uniform(1.05, 2.0))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * width)
break_space = text_size * random.uniform(0.6, 1.5)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
base_line = line_space
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
if temp_lineend + horiAdvance < line_end:
linebuf.append((key, item))
temp_lineend += horiAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((line_start // scale, base_line // scale),
(line_end // scale, base_line // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
l = (line_start + horiBearingX) / width
t = (base_line - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
top = int(np.clip(base_line - horiBearingY, 0, height - h))
left = int(np.clip(line_start + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start += int(horiAdvance)
base_line += line_space
if base_line + text_size >= height:
if block_no == 0:
sep_end = base_line - line_space
base_line = line_space
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
t = line_space // 2 // scale
b = sep_end // scale
seps[t:b, l-1:l+2] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def tate_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
line_space = int(text_size * random.uniform(1.05, 2.0))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * height)
break_space = text_size * random.uniform(0.6, 1.0)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
base_line = width - line_space + text_size // 2
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(height, height if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
if temp_lineend + vertAdvance < line_end:
linebuf.append((key,item))
temp_lineend += vertAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
l = (base_line + vertBearingX) / width
t = (line_start + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
l = int(np.clip(base_line + vertBearingX, 0, width - w))
t = int(np.clip(line_start + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start += int(vertAdvance)
base_line -= line_space
if base_line - text_size / 2 < 0:
if block_no == 0:
sep_end = base_line + line_space
base_line = width - line_space + text_size // 2
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
right = (width - line_space + text_size // 2) // scale
left = sep_end // scale
seps[l-1:l+2, left:right] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def tatefurigana_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 12
max_pixel = 50
text_size = random.randint(min_pixel, max_pixel)
text_size2 = text_size * 2
line_space = int(text_size2 * random.uniform(1.45, 1.7))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * height)
break_space = text_size2 * random.uniform(0.6, 1.0)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] in [3,4] else 0. for key in keys]
selection2 = iter([key for key in random.choices(keys, k=max_count*2, weights=probs2)])
base_line = width - line_space + text_size2 // 2
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(height, height if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
vertBearingX = item['vertBearingX'] / 128 * text_size2
vertBearingY = item['vertBearingY'] / 128 * text_size2
vertAdvance = item['vertAdvance'] / 128 * text_size2
if temp_lineend + vertAdvance < line_end:
linebuf.append((key,item))
temp_lineend += vertAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
vertBearingX = item['vertBearingX'] / 128 * text_size2
vertBearingY = item['vertBearingY'] / 128 * text_size2
vertAdvance = item['vertAdvance'] / 128 * text_size2
l = (base_line + vertBearingX) / width
t = (line_start + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size2), 1)
h = max(int(item['rows'] / 128 * text_size2), 1)
l = int(np.clip(base_line + vertBearingX, 0, width - w))
t = int(np.clip(line_start + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start += int(vertAdvance)
# ふりがな処理
base_line2 = base_line + text_size2 // 2 + text_size // 2
line_start2 = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
if block_no == 0:
line_start2 += remain
line_end2 = line_start
line_start2p = line_start2
while line_start2 < line_end2:
key2 = next(selection2, None)
if key2 is None:
break
item = self.img_cache[key2]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
if np.random.uniform() < 0.2:
# ここは空ける
if line_start2 != line_start2p:
draw.line(((base_line2 // scale, line_start2p // scale),
(base_line2 // scale, line_start2 // scale)), fill=255, width=3)
line_start2 += int(vertAdvance)
line_start2p = line_start2
continue
# ここはふりがな
l = (base_line2 + vertBearingX) / width
t = (line_start2 + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
l = int(np.clip(base_line2 + vertBearingX, 0, width - w))
t = int(np.clip(line_start2 + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start2 += int(vertAdvance)
if line_start2 != line_start2p:
draw.line(((base_line2 // scale, line_start2p // scale),
(base_line2 // scale, line_start2 // scale)), fill=255, width=3)
base_line -= line_space
if base_line - text_size2 / 2 < 0:
if block_no == 0:
sep_end = base_line + line_space
base_line = width - line_space + text_size2 // 2
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
right = (width - line_space + text_size2 // 2) // scale
left = sep_end // scale
seps[l-1:l+2, left:right] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def yokofurigana_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 12
max_pixel = 50
text_size = random.randint(min_pixel, max_pixel)
text_size2 = text_size * 2
line_space = int(text_size2 * random.uniform(1.45, 1.7))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * width)
break_space = text_size2 * random.uniform(0.6, 1.5)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] in [3,4] else 0. for key in keys]
selection2 = iter([key for key in random.choices(keys, k=max_count*2, weights=probs2)])
base_line = line_space
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
horiBearingX = item['horiBearingX'] / 128 * text_size2
horiBearingY = item['horiBearingY'] / 128 * text_size2
horiAdvance = item['horiAdvance'] / 128 * text_size2
if temp_lineend + horiAdvance < line_end:
linebuf.append((key, item))
temp_lineend += horiAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((line_start // scale, base_line // scale),
(line_end // scale, base_line // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
horiBearingX = item['horiBearingX'] / 128 * text_size2
horiBearingY = item['horiBearingY'] / 128 * text_size2
horiAdvance = item['horiAdvance'] / 128 * text_size2
l = (line_start + horiBearingX) / width
t = (base_line - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size2), 1)
h = max(int(item['rows'] / 128 * text_size2), 1)
top = int(np.clip(base_line - horiBearingY, 0, height - h))
left = int(np.clip(line_start + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start += int(horiAdvance)
# ふりがな処理
base_line2 = base_line - text_size2
line_start2 = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
if block_no == 0:
line_start2 += remain
line_end2 = line_start
line_start2p = line_start2
while line_start2 < line_end2:
key2 = next(selection2, None)
if key2 is None:
break
item = self.img_cache[key2]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
if np.random.uniform() < 0.2:
# ここは空ける
if line_start2 != line_start2p:
draw.line(((line_start2p // scale, base_line // scale),
(line_start // scale, base_line // scale)), fill=255, width=3)
line_start2 += int(horiAdvance)
line_start2p = line_start2
continue
# ここはふりがな
l = (line_start2 + horiBearingX) / width
t = (base_line2 - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
top = int(np.clip(base_line2 - horiBearingY, 0, height - h))
left = int(np.clip(line_start2 + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start2 += int(horiAdvance)
if line_start2 != line_start2p:
draw.line(((line_start2p // scale, base_line // scale),
(line_start // scale, base_line // scale)), fill=255, width=3)
base_line += line_space
if base_line + text_size2 >= height:
if block_no == 0:
sep_end = base_line - line_space
base_line = line_space
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
t = line_space // 2 // scale
b = sep_end // scale
seps[t:b, l-1:l+2] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def null_images(self):
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
labels = np.stack([keymap, xsizes, ysizes, offsetx, offsety, lines, seps], -1)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_random_line(self):
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = Image.fromarray(seps)
draw1 = ImageDraw.Draw(seps)
images = Image.fromarray(images)
draw2 = ImageDraw.Draw(images)
linew = int(np.clip(np.random.uniform() * 20, scale, 20))
x1 = np.random.normal() * width / 2 + width / 2
y1 = np.random.normal() * height / 2 + height / 2
x2 = np.random.normal() * width / 2 + width / 2
y2 = np.random.normal() * height / 2 + height / 2
draw1.line(((x1 // scale, y1 // scale), (x2 // scale, y2 // scale)), fill=255, width=linew//scale+1)
draw2.line(((x1, y1), (x2, y2)), fill=255, width=linew)
labels = np.stack([keymap, xsizes, ysizes, offsetx, offsety, lines, np.asarray(seps) / 255.], -1)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
images = np.asarray(images) / 255.
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_images_random(self, keys, probs):
max_count = 64
angle_max = 15.0
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
selection = [key for key in random.choices(keys, k=np.random.randint(2,max_count), weights=probs)]
i = 0
boxprev = np.zeros([0, 4])
if random.random() < 0.1:
margin = 20
line_c = random.randint(0,3)
lw = random.randint(2, 10)
if line_c == 0:
x = random.randrange(width // 2, width)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, :px] = 1
images[y:y+lw, :x] = 255
boxprev = np.concatenate([boxprev, [[0, (x + margin)/width, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 1:
x = random.randrange(0, width // 2)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, px:] = 1
images[y:y+lw, x:] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, 1, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 2:
y = random.randrange(height // 2, height)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[:py, px:px+lw//scale+1] = 1
images[:y, x:x+lw] = 255
boxprev =
|
np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, 0, (y + margin)/height]]])
|
numpy.concatenate
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 11:47:40 2021
Create GEMINI precipitation input from FAST data
@author: zettergm
"""
# imports
#import typing as T
import numpy as np
import xarray
import gemini3d.write as write
from gemini3d.config import datetime_range
import matplotlib.pyplot as plt
from fast import readfast,smoothfast
# global vars
pi=np.pi
filename="/Users/zettergm/Dropbox (Personal)/proposals/UNH_GDC/FASTdata/nightside.txt"
debug=True
def fast2GEMINI(cfg, xg):
# output dict.
pg={}
# read in the data
[invlat,eflux,chare]=readfast(filename)
# smooth data a bit prior to insertion into model
lsmooth=0
[efluxsmooth,charesmooth]=smoothfast(lsmooth,eflux,chare)
# basic grid info
gridmlat=90-xg["theta"]*180/pi
gridmlon=xg["phi"]*180/pi
mlatmin=np.min(gridmlat)
mlatmax=np.max(gridmlat)
mlonmin=np.min(gridmlon)
mlonmax=np.max(gridmlon)
# precipitation input grids
llon=128
llat=invlat.size
mlon=np.linspace(mlonmin,mlonmax,llon)
mlat=invlat
mlonctr=np.average(mlon)
mlatctr=np.average(mlat)
# fast data may need to be sorted along the latitude axis
isort=np.argsort(mlat)
mlat=mlat[isort]
efluxsmooth=efluxsmooth[isort]
charesmooth=charesmooth[isort]
# for convenience recenter grid on what the user has made
#dmlat=np.average(gridmlat)-mlatctr
dmlat=0
mlat=mlat+dmlat
# time grid for precipitation
time = datetime_range(cfg["time"][0], cfg["time"][0] + cfg["tdur"], cfg["dtprec"])
lt=len(time)
t=np.empty( (lt) )
for k in range(0,lt):
t[k]=time[k].timestamp()
meant=np.average(t)
# longitude shape
Q=np.empty( (lt,llon,llat) )
E0=np.empty( (lt,llon,llat) )
siglon=15
sigt=100
for k in range(0,lt):
tshape=np.exp(-(t[k]-meant)**2/2/sigt**2)
for ilon in range(0,llon):
lonshape=
|
np.exp(-(mlon[ilon]-mlonctr)**2/2/siglon**2)
|
numpy.exp
|
import os
import numpy as np
import scipy
import crcmod.predefined
from scipy import integrate
import matplotlib.pyplot as plt
from bitstring import ConstBitStream
def createRandomData(filepath, datasize):
np.random.seed()
file = open(filepath, "w")
data = np.random.randint(2, size=datasize).tolist()
# print(data)
for i in range(datasize):
file.write(str(data[i]))
def dec2arr(dec_value, length):
''' Receive a decimal value and transform to a BINARY numpy ARRAY with size of length '''
bin_str = bin(dec_value)[2:]
bin_array = np.zeros(length, dtype=np.int)
bin_array[length-len(bin_str):length] = np.array(list(bin_str))
return bin_array
def arr2dec(bin_arr):
''' Receive a BINARY numpy ARRAY and transform to a decimal value '''
dec = 0
factor = 1
for i in bin_arr[::-1]:
dec += i * factor
factor *= 2
return dec
def arr2str(arr):
''' Receive a BINARY numpy ARRAY and transform to a string with 0s and 1s '''
string = ''.join([str(x) for x in arr])
return string
def generateCRC(bin_array, mode='crc-8'):
''' Crc of ENCODED 16-based BINARY numpy ARRAY data '''
crc_func = crcmod.predefined.Crc(mode)
hexData = hex(int(arr2str(bin_array), 2))[2:]
crc_func.update(hexData.encode())
crc_result = hex(crc_func.crcValue)
return crc_result
def generateCarrierWave(fs, fc):
one_second = np.linspace(0, 1, fs)
carrier = np.sin(2 * np.pi * fc * one_second)
return carrier
def generatePreambleWave(fs, wave_len_per_bit):
one_second =
|
np.linspace(0, 1, fs)
|
numpy.linspace
|
"""
October 03, 2020
Author: <NAME>
"""
import tensorflow as tf
import numpy as np
import cv2
# MTCNN face detector
#from mtcnn.mtcnn import MTCNN
#detector = MTCNN()
# RetinaFace face detector
detector_model = tf.saved_model.load('./tf_retinaface_mbv2/')
def one_face(frame, bbs, pointss):
# process only one face (center ?)
offsets = [(bbs[:,0]+bbs[:,2])/2-frame.shape[1]/2,
(bbs[:,1]+bbs[:,3])/2-frame.shape[0]/2]
offset_dist = np.sum(np.abs(offsets),0)
index = np.argmin(offset_dist)
bb = bbs[index]
points = pointss[:,index]
return bb, points
def draw_landmarks(frame, bb, points):
# draw rectangle and landmarks on face
cv2.rectangle(frame,(int(bb[0]),int(bb[1])),(int(bb[2]),int(bb[3])),orange,2)
cv2.circle(frame, (int(points[0]), int(points[5])), 2, (255,0,0), 2)# eye
cv2.circle(frame, (int(points[1]), int(points[6])), 2, (255,0,0), 2)
cv2.circle(frame, (int(points[2]), int(points[7])), 2, (255,0,0), 2)# nose
cv2.circle(frame, (int(points[3]), int(points[8])), 2, (255,0,0), 2)# mouth
cv2.circle(frame, (int(points[4]), int(points[9])), 2, (255,0,0), 2)
w = int(bb[2])-int(bb[0])# width
h = int(bb[3])-int(bb[1])# height
w2h_ratio = w/h# ratio
eye2box_ratio = (points[0]-bb[0]) / (bb[2]-points[1])
cv2.putText(frame, "Width (pixels): {}".format(w), (10,30), font, font_size, red, 1)
cv2.putText(frame, "Height (pixels): {}".format(h), (10,40), font, font_size, red, 1)
if w2h_ratio < 0.7 or w2h_ratio > 0.9:
#cv2.putText(frame, "width/height: {0:.2f}".format(w2h_ratio), (10,40), font, font_size, blue, 1)
cv2.putText(frame, "Narrow Face", (10,60), font, font_size, red, 1)
if eye2box_ratio > 1.5 or eye2box_ratio < 0.88:
#cv2.putText(frame, "leye2lbox/reye2rbox: {0:.2f}".format((points[0]-bb[0]) / (bb[2]-points[1])), (10,70), font, font_size, red, 1)
cv2.putText(frame, "Acentric Face", (10,70), font, font_size, red, 1)
def find_smile(pts):
dx_eyes = pts[1] - pts[0]# between pupils
dx_mout = pts[4] - pts[3]# between mouth corners
smile_ratio = dx_mout/dx_eyes
return smile_ratio
def find_roll(pts):
return pts[6] - pts[5]
def find_yaw(pts):
le2n = pts[2] - pts[0]
re2n = pts[1] - pts[2]
return le2n - re2n
def find_pitch(pts):
eye_y = (pts[5] + pts[6]) / 2
mou_y = (pts[8] + pts[9]) / 2
e2n = eye_y - pts[7]
n2m = pts[7] - mou_y
return e2n/n2m
def find_pose(points):
X = points[0:5]
Y = points[5:10]
angle = np.arctan((Y[1]-Y[0])/(X[1]-X[0]))/np.pi*180
alpha = np.cos(np.deg2rad(angle))
beta = np.sin(np.deg2rad(angle))
# rotated points
Xr = np.zeros((5))
Yr = np.zeros((5))
for i in range(5):
Xr[i] = alpha*X[i]+beta*Y[i]+(1-alpha)*X[2]-beta*Y[2]
Yr[i] = -beta*X[i]+alpha*Y[i]+beta*X[2]+(1-alpha)*Y[2]
# average distance between eyes and mouth
dXtot = (Xr[1]-Xr[0]+Xr[4]-Xr[3])/2
dYtot = (Yr[3]-Yr[0]+Yr[4]-Yr[1])/2
# average distance between nose and eyes
dXnose = (Xr[1]-Xr[2]+Xr[4]-Xr[2])/2
dYnose = (Yr[3]-Yr[2]+Yr[4]-Yr[2])/2
# relative rotation 0% is frontal 100% is profile
Xfrontal = np.abs(np.clip(-90+90/0.5*dXnose/dXtot,-90,90))
Yfrontal = np.abs(np.clip(-90+90/0.5*dYnose/dYtot,-90,90))
return Xfrontal, Yfrontal# horizontal and vertical angles
def face_detector(image, image_shape_max=640, score_min=None, pixel_min=None, pixel_max=None, Ain_min=None):
'''
Performs face detection using retinaface method with speed boost and initial quality checks based on whole image size
Parameters
----------
image : uint8
image for face detection.
image_shape_max : int, optional
maximum size (in pixels) of image. The default is None.
score_min : float, optional
minimum detection score (0 to 1). The default is None.
pixel_min : int, optional
mininmum face size based on heigth of bounding box. The default is None.
pixel_max : int, optional
maximum face size based on heigth of bounding box. The default is None.
Ain_min : float, optional
minimum area of face in bounding box. The default is None.
Returns
-------
float array
landmarks.
float array
bounding boxes.
flaot array
detection scores.
float array
face area in bounding box.
'''
image_shape = image.shape[:2]
# perform image resize for faster detection
if image_shape_max:
scale_factor = max([1, max(image_shape)/image_shape_max])
else:
scale_factor = 1
if scale_factor > 1:
scaled_image = cv2.resize(image, (0, 0), fx=1/scale_factor, fy=1/scale_factor)
bbs_all, points_all = retinaface(scaled_image)
bbs_all[:,:4]*=scale_factor
points_all*=scale_factor
else:
bbs_all, points_all = retinaface(image)
bbs=bbs_all.copy()
points=points_all.copy()
# check detection score
if score_min:
mask=np.array(bbs[:,4]>score_min)
bbs=bbs[mask]
points=points[mask]
if len(bbs)==0:
return [],[],[],[]
# check pixel height
if pixel_min:
pixel=bbs[:,3]-bbs[:,1]
mask=np.array(pixel>pixel_min)
bbs=bbs[mask]
points=points[mask]
if len(bbs)==0:
return [],[],[],[]
if pixel_max:
pixel=bbs[:,3]-bbs[:,1]
mask=
|
np.array(pixel<pixel_max)
|
numpy.array
|
import numpy as np
import healpy as hp
import pymaster as nmt
import matplotlib.pyplot as plt
#This script describes the functionality of the flat-sky version of pymaster
#Dimensions:
#First, a flat-sky field is defined by four quantities:
# - Lx and Ly: the size of the patch in the x and y dimensions (in radians)
Lx=72.*np.pi/180; Ly=48.*np.pi/180;
# - Nx and Ny: the number of pixels in the x and y dimensions
Nx=602; Ny=410;
#Gaussian simulations:
#pymaster allows you to generate random realizations of both spherical and
#flat fields given a power spectrum. These are returned as 2D arrays with
#shape (Ny,Nx)
l,cl_tt,cl_ee,cl_bb,cl_te=np.loadtxt('cls.txt',unpack=True);
beam=np.exp(-(0.25*np.pi/180*l)**2);
cl_tt*=beam; cl_ee*=beam; cl_bb*=beam; cl_te*=beam;
mpt,mpq,mpu=nmt.synfast_flat(Nx,Ny,Lx,Ly,[cl_tt,cl_ee,cl_bb,cl_te],pol=True)
#You can have a look at the maps using matplotlib's imshow:
plt.figure(); plt.imshow(mpt,interpolation='nearest',origin='lower'); plt.colorbar()
plt.figure(); plt.imshow(mpq,interpolation='nearest',origin='lower'); plt.colorbar()
plt.figure(); plt.imshow(mpu,interpolation='nearest',origin='lower'); plt.colorbar()
plt.show()
#Masks:
#Let's now create a mask:
mask=np.ones_like(mpt).flatten()
xarr=np.ones(Ny)[:,None]*np.arange(Nx)[None,:]*Lx/Nx
yarr=np.ones(Nx)[None,:]*np.arange(Ny)[:,None]*Ly/Ny
#First we dig a couple of holes
def dig_hole(x,y,r) :
rad=(np.sqrt((xarr-x)**2+(yarr-y)**2)).flatten()
return
|
np.where(rad<r)
|
numpy.where
|
import numpy as np
from BDSpace.Coordinates.transforms import reduce_angle
from BDSpace.Figure import Figure
class SphericalShape(Figure):
def __init__(self, name='Spherical shape', coordinate_system=None,
r_inner=0.0, r_outer=1.0, phi=np.pi/2):
self.__r_inner = None
self.r_inner = max(min(r_inner, r_outer), 0.0)
self.__r_outer = None
self.r_outer = max(max(r_inner, r_outer), 0.0)
self.__phi = None
self.phi = phi
super(SphericalShape, self).__init__(name, coordinate_system=coordinate_system)
@property
def r_inner(self):
return self.__r_inner
@r_inner.setter
def r_inner(self, r_inner):
self.__r_inner = np.float64(r_inner)
@property
def r_outer(self):
return self.__r_outer
@r_outer.setter
def r_outer(self, r_outer):
self.__r_outer = np.float64(r_outer)
@property
def phi(self):
return self.__phi
@phi.setter
def phi(self, phi):
self.__phi = reduce_angle(np.float64(phi))
class SphericalWedge(SphericalShape):
def __init__(self, name='Spherical wedge', coordinate_system=None,
r_inner=0.0, r_outer=1.0, phi=np.pi/2, theta=np.array([0.0, np.pi/2])):
self.__theta = None
self.theta = theta
super(SphericalWedge, self).__init__(name, coordinate_system=coordinate_system,
r_inner=r_inner, r_outer=r_outer, phi=phi)
@property
def theta(self):
return self.__theta
@theta.setter
def theta(self, theta):
max_theta = reduce_angle(max(theta))
if max_theta > np.pi:
max_theta = 2 * np.pi - max_theta
min_theta = reduce_angle(min(theta))
if min_theta > np.pi:
min_theta = 2 * np.pi - min_theta
theta_range = [min_theta, max_theta]
self.__theta = np.array(theta_range, dtype=np.float64)
def inner_volume(self):
if self.phi == 2 * np.pi and (self.theta[1] - self.theta[0]) == np.pi:
return 4/3 * np.pi * self.r_inner**3
else:
return 0
def external_volume(self):
if self.phi == 2 * np.pi and (self.theta[1] - self.theta[0]) == np.pi:
return 4/3 * np.pi * self.r_outer**3
else:
v_inner = 1/3 * self.phi * self.r_inner**3 * (np.cos(self.theta[0]) - np.cos(self.theta[1]))
v_outer = 1/3 * self.phi * self.r_outer**3 * (np.cos(self.theta[0]) - np.cos(self.theta[1]))
return v_outer - v_inner
def inner_surface_area(self):
if self.phi == 2 * np.pi and (self.theta[1] - self.theta[0]) == np.pi:
return 4 * np.pi * self.r_inner**2
else:
return 0
def external_surface_area(self):
if self.phi == 2 * np.pi and (self.theta[1] - self.theta[0]) == np.pi:
return 4 * np.pi * self.r_outer**2
else:
s_inner = self.phi * self.r_inner**2 * (np.cos(self.theta[0] - np.cos(self.theta[1])))
s_outer = self.phi * self.r_outer**2 * (np.cos(self.theta[0] - np.cos(self.theta[1])))
if self.phi < 2 * np.pi:
s_sides = (self.theta[1] - self.theta[0]) * (self.r_outer**2 - self.r_inner**2)
else:
s_sides = 0
s_sides += (self.phi / 2) * (self.r_outer**2 - self.r_inner**2) * np.sin(self.theta[0])
s_sides += (self.phi / 2) * (self.r_outer**2 - self.r_inner**2) * np.sin(self.theta[1])
return s_inner + s_outer + s_sides
class SphericalCone(SphericalWedge):
def __init__(self, name='Spherical cone', coordinate_system=None,
r_inner=0.0, r_outer=1.0, theta=np.pi/4):
super(SphericalCone, self).__init__(name, coordinate_system=coordinate_system,
r_inner=r_inner, r_outer=r_outer, phi=2*np.pi, theta=np.array([0, theta]))
class Sphere(SphericalCone):
def __init__(self, name='Sphere', coordinate_system=None, r_inner=0.0, r_outer=1.0):
super(Sphere, self).__init__(name, coordinate_system=coordinate_system,
r_inner=r_inner, r_outer=r_outer, theta=np.pi)
class SphericalSegmentWedge(SphericalShape):
def __init__(self, name='Spherical section', coordinate_system=None, r_inner=0.0, r_outer=1.0,
h1=0.0, h2=1.0, phi=np.pi/2):
super(SphericalSegmentWedge, self).__init__(name, coordinate_system=coordinate_system,
r_inner=r_inner, r_outer=r_outer, phi=phi)
self.__h1 = None
self.h1 = max(min(h1, h2), -self.r_outer)
self.__h2 = None
self.h2 = min(max(h1, h2), self.r_outer)
@property
def h1(self):
return self.__h1
@h1.setter
def h1(self, h1):
self.__h1 =
|
np.float64(h1)
|
numpy.float64
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
__all__ = [
'to_json',
'from_json',
'to_disk',
'from_disk',
]
import json.tool
import numpy as np
from matrixprofile import core
from matrixprofile.io.protobuf.protobuf_utils import (
to_mpf,
from_mpf
)
# Supported file extensions
SUPPORTED_EXTS = set([
'json',
'mpf',
])
# Supported file formats
SUPPORTED_FORMATS = set([
'json',
'mpf',
])
def JSONSerializer(obj):
"""
Default JSON serializer to write numpy arays and other non-supported
data types.
Borrowed from:
https://stackoverflow.com/a/52604722
"""
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def from_json(profile):
"""
Converts a JSON formatted string into a profile data structure.
Parameters
----------
profile : str
The profile as a JSON formatted string.
Returns
-------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
"""
dct = json.load(profile)
# handle pmp and convert to appropriate types
if core.is_pmp_obj(dct):
dct['pmp'] = np.array(dct['pmp'], dtype='float64')
dct['pmpi'] = np.array(dct['pmpi'], dtype=int)
dct['data']['ts'] =
|
np.array(dct['data']['ts'], dtype='float64')
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Unittests for pointcloud
@author: simlk
"""
import os
# import sys
import unittest
# import time
import logging
import numpy as np
import tempfile
import json
# import ctypes
from thatsDEM2 import pointcloud, osr_utils
LOG = logging.getLogger(__name__)
class TestPointcloud(unittest.TestCase):
def test_pointcloud_constructor1(self):
LOG.info("Testing pointcloud constructor")
pc = pointcloud.Pointcloud(np.ones((2, 2)), np.ones(
2), some_attr=np.ones(2, dtype=np.uint8))
self.assertIn("some_attr", pc.attributes)
self.assertTrue(pc.some_attr.dtype == np.uint8)
def test_pointcloud_constructor_bad(self):
LOG.info("Testing pointcloud constructor -bad")
with self.assertRaises(AssertionError):
pc = pointcloud.Pointcloud(np.ones((2, 2)), np.ones(
2), some_attr=np.ones(4, dtype=np.uint8))
def test_pointcloud_empty_like(self):
LOG.info("Testing pointcloud empty_like factory function")
pc = pointcloud.Pointcloud(
np.ones((2, 2)), np.ones(2), some_attr=np.ones(2))
empty = pointcloud.empty_like(pc)
self.assertSetEqual(pc.attributes, empty.attributes)
self.assertEqual(empty.size, 0)
def test_extend_pointcloud1(self):
LOG.info("Testing pointcloud extension - bad")
pc1 = pointcloud.Pointcloud(
np.ones((2, 2)), np.ones(2), some_attr=np.ones(2))
pc2 = pointcloud.Pointcloud(np.ones((2, 2)), np.ones(2))
with self.assertRaises(ValueError):
pc1.extend(pc2)
def test_extend_pointcloud2(self):
LOG.info("Testing pointcloud extension - ok")
pc1 = pointcloud.Pointcloud(
np.ones((2, 2)), np.ones(2), some_attr=np.ones(2))
pc2 = pointcloud.Pointcloud(
np.ones((2, 2)), np.ones(2), some_attr=np.ones(2) * 3)
pc1.extend(pc2)
self.assertEqual(pc1.size, 4)
self.assertIn("some_attr", pc1.attributes)
def test_extend_pointcloud3(self):
LOG.info("Testing pointcloud extension - least common")
pc1 = pointcloud.Pointcloud(np.ones((2, 2)), np.ones(
2), some_attr=np.ones(2), some_other=np.ones(2))
pc2 = pointcloud.Pointcloud(
np.ones((2, 2)), np.ones(2), some_attr=np.ones(2) * 3)
pc1.extend(pc2, least_common=True)
self.assertSetEqual(pc1.attributes, {"some_attr"})
def test_thin_pointcloud(self):
LOG.info("Testing thin pointcloud")
pc = pointcloud.Pointcloud(np.ones((5, 2)), np.ones(
5), some_attr=np.ones(5), some_other=np.ones(5))
M = np.array([1, 0, 1, 1, 0]).astype(np.bool)
pc.thin(M)
self.assertEqual(pc.size, M.sum())
self.assertSetEqual(pc.attributes, {"some_attr", "some_other"})
def test_cut_pointcloud(self):
LOG.info("Testing cut poincloud")
pc = pointcloud.Pointcloud(np.ones((5, 2)), np.ones(5),
some_attr=np.ones(5), some_other=np.ones(5))
M = np.array([1, 0, 1, 1, 0]).astype(np.bool)
pc = pc.cut(M)
self.assertEqual(pc.size, M.sum())
self.assertSetEqual(pc.attributes, {"some_attr", "some_other"})
def test_lidar_pointcloud(self):
LOG.info("Testing lidar pointcloud")
pc = pointcloud.LidarPointcloud(np.ones((3, 2)), np.ones(3), c=[
2, 2, 3], some_attr=np.ones(3))
self.assertEqual(set(pc.get_classes()), set([2, 3]))
self.assertEqual(pc.cut_to_class(2).size, 2)
def test_lidar_pointcloud_chained_cut(self):
LOG.info("Testing lidar pointcloud chained cut")
# We want subclasses to return subclasses in cut
pc = pointcloud.LidarPointcloud(np.ones((3, 2)), np.ones(3), c=[
2, 2, 3], some_attr=np.ones(3))
pc2 = pc.cut_to_class(3).cut_to_box(-10, -10, 10, 10).cut_to_class(3)
self.assertEqual(pc2.size, 1)
def test_pointcloud_conversion(self):
LOG.info("Testing pointcloud conversion")
pc = pointcloud.Pointcloud(np.ones((5, 2)), np.ones(5),
c=np.ones(5, dtype=np.uint8))
lpc1 = pc.astype(pointcloud.LidarPointcloud)
lpc2 = lpc1.cut_to_class(1)
self.assertEqual(lpc1.size, lpc2.size)
def test_sort_pointcloud(self):
LOG.info("Test pointcloud sorting")
r = np.linspace(0, np.pi * 2, 100)
xy = np.column_stack((r * np.cos(r), r * np.sin(r))) * 5
c = np.arange(xy.shape[0])
pc = pointcloud.Pointcloud(xy, np.ones(xy.shape[0]), c=c)
pc.sort_spatially(1, keep_sorting=True)
self.assertTrue((c != pc.c).any())
pc.sort_back()
self.assertTrue((pc.c == c).all())
def test_pointcloud_might_overlap(self):
LOG.info("Test pointcloud sorting")
pc1 = pointcloud.Pointcloud.from_array(
|
np.ones((10, 10))
|
numpy.ones
|
import os
import numpy as np
import json
import re
class Categorizer(object):
def __init__(self):
self.obj = None
self.nan_filler = "Other"
def read_file_conditions(self, filename):
try:
# Get file location
file_path = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(os.path.dirname(file_path))
data_path = os.path.join(parent, "json_keys/" + filename)
except Exception as e:
print(e)
raise e
try:
# open file and retrieve object with json's information
with open(data_path, 'r') as file:
data = file.read()
self.obj = json.loads(data)
except Exception as e:
print(e)
raise e
def check_conditions(self, row):
result = self.nan_filler
is_condition_met = False
for label in self.obj:
for comparison in self.obj[label]:
if re.search(comparison, str(row)) is not None:
result = label
is_condition_met = True
break
if is_condition_met:
break
return result
def categorize(self, original_col, new_column_name, df):
func =
|
np.vectorize(self.check_conditions)
|
numpy.vectorize
|
import sampling
from plotting import plot_3well_potential, plot_3well_vector_field
import numpy as np
import os.path
# paths
my_path = os.path.abspath(os.path.dirname(__file__))
data_path = os.path.join(my_path, 'data')
figures_path = os.path.join(my_path, 'figures')
example_name = 'triplewell'
#triple well in 2D energy landscape V and gradient dV
factor = 0.25
V_param = lambda x, y, p: -1 * factor*(3*np.exp(-x**2-(y-(1./3))**2) \
- p*np.exp(-x**2-(y-(5./3))**2) - 5*np.exp(-(x-1)**2-y**2) \
- 5*np.exp(-(x+1)**2-y**2) + 0.2*(x**4) + 0.2*(y-1./3)**4)
dV_param_x = lambda x, y, p: -1 * factor*((-2*3*x)*np.exp(-x**2-(y-(1./3))**2) \
+ (p*2*x)*np.exp(-x**2-(y-(5./3))**2) \
+ (10*(x-1))*np.exp(-(x-1)**2-y**2) \
+ (10*(x+1))*np.exp(-(x+1)**2-y**2) + 0.8*(x**3))
dV_param_y = lambda x, y, p: -1 * factor*((-2*3*(y-1./3)) \
* np.exp(-x**2-(y-(1./3))**2) \
+ (p*2*(y-(5./3)))*np.exp(-x**2-(y-(5./3))**2) \
+ (10*y)*np.exp(-(x-1)**2-y**2) \
+ (10*y)*np.exp(-(x+1)**2-y**2) + 0.8*(y-1./3)**3)
V0 = lambda x, y: -1 * V_param(x, y, 3)
dV0 = lambda x, y: np.array([dV_param_x(x, y, 3), dV_param_y(x, y, 3)])
# triple well in 2D gradient dV plus circular forcing
M = 6 # length of period
# forcing is the vector field sin(t)*f[(-y,x)], where
# f applies some convolution, such that
factor_forced = 1.4
dV_forced = lambda x, y, m: np.array([dV_param_x(x, y, 3), dV_param_y(x, y, 3)]) \
+ factor_forced*np.cos(m*2.*np.pi/M)* np.array([-y, x])
# plot potential and gradient
title = 'Triple well Potential'
subtitles=[
r'$V(x, y)$',
]
plot_3well_potential(
potential=V0,
title=title,
file_path=os.path.join(figures_path, example_name + '_' + 'potential.png'),
subtitles=subtitles,
)
title = 'Triple well Gradient and Force'
subtitles=[
r'$-\nabla V(x, y)$',
r'$-\nabla V(x, y) + F(0, x, y)$',
r'$-\nabla V(x, y) + F(3, x, y)$',
]
plot_3well_vector_field(
vector_field=dV0,
vector_field_forced=dV_forced,
title=title,
file_path=os.path.join(figures_path, example_name + '_' \
+ 'vector_field.png'),
subtitles=subtitles,
)
#count matrix (triple well, no extra forcing)
interval = np.array([[-2, 2], [-1.2, 2.2]]) #size of state space
# discretization of state space into dx cells for transition matrix
dx_power = 1
dx = 2. / (10**dx_power)
# box centers in x and y direction
x = np.arange(interval[0, 0], interval[0, 1] + dx, dx)
y = np.arange(interval[1, 0], interval[1, 1] + dx, dx)
xv, yv = np.meshgrid(x, y)
xdim = np.shape(xv)[0] # discrete dimension in x and y direction
ydim = np.shape(xv)[1]
dim_st = xdim * ydim # dimension of the statespace
xn = np.reshape(xv, (xdim*ydim, 1))
yn = np.reshape(yv, (xdim*ydim, 1))
grid = np.squeeze(np.array([xn, yn]))
# row stochastic transition matrix
T = sampling.transitionmatrix_2D(
force=dV0,
sigma=1.0, # lag time of transition matrix is lag*dt
dt=0.02, # dt for Euler Maruyama discretization
lag=15,
Nstep=10000, # number of seeds per box for count matrix
interval=interval,
x=x,
y=y,
dx=dx,
)
# row stochastic transition matrix
T_small_noise = sampling.transitionmatrix_2D(
force=dV0,
sigma=0.26,
dt=0.02,
lag=15,
Nstep=40000,
interval=interval,
x=x,
y=y,
dx=dx,
)
sigma=1.0
# transition matrix for triple well plus circular forcing
T_m = np.zeros((M, dim_st, dim_st))
for m in np.arange(M):
T_m[m, :, :] = sampling.transitionmatrix_2D(lambda x, y : \
dV_forced(x, y, m), sigma=1.0, dt=0.02, lag=15, Nstep=10000, interval=interval, x=x, y=y, dx=dx )
# defining A and B
# define by center and radius!
A_center = np.array([-1, 0])
B_center =
|
np.array([1, 0])
|
numpy.array
|
import math
import multiprocessing
import itertools
import glob
import sys
import time
import re
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits as pyfits
from scipy.optimize import fmin_powell
from scipy.interpolate import RectBivariateSpline
from . import kepio, kepmsg, kepkey, kepplot, kepfit, kepfunc
from .utils import PyKEArgumentHelpFormatter
__all__ = ['kepprfphot']
def kepprfphot(infile, prfdir, columns, rows, fluxes, border=0,
background=False, focus=False, ranges='0,0', xtol=1e-4,
ftol=1e-2, qualflags=False, outfile=None, plot=False, overwrite=False,
verbose=False, logfile='kepprfphot.log'):
"""
kepprfphot -- Fit a PSF model to time series observations within a Target
Pixel File
Parameters
----------
nfile : str
The name of a MAST standard format FITS file containing Kepler Target
Pixel data within the first data extension.
columns : str or list
A starting guess for the CCD column position(s) of the source(s) that
are to be fit. The model is unlikely to converge if the guess is too
far away from the correct location. A rule of thumb is to provide a
guess within 1 CCD pixel of the true position. If more than one source
is being modeled then the column positions of each are separated by a
comma. The same number of sources in the columns, rows and fluxes field
is a requirement of this task.
rows : str or list
A starting guess for the CCD row position(s) of the source(s) that are
to be fit. The model is unlikely to converge if the guess is too far
away from the correct location. A rule of thumb is to provide a guess
within 1 CCD pixel of the true position. If more than one source is
being modeled then the row positions of each are separated by a comma.
The same number of sources in the columns, rows and fluxes field is a
requirement of this task.
fluxes : str or list
A starting guess for the flux(es) of the source(s) that are to be fit.
Fit convergence is not particularly reliant on the accuracy of these
guesses, but the fit will converge faster the more accurate the guess.
If more than one source is being modeled then the row positions of
each are separated by a comma. The same number of sources in the
columns, rows and fluxes field is a requirement of this task.
prfdir : str
The full or relative directory path to a folder containing the Kepler
PSF calibration. Calibration files can be downloaded from the Kepler
focal plane characteristics page at the MAST here:
http://archive.stsci.edu/missions/kepler/fpc/prf/.
border : int
If a background is included in the fit then it is modeled as a
two-dimensional polynomial. This parameter is the polynomial order.
A zero-order polynomial is generally recommended.
background : bool
Whether to include a background component in the model. If ``True``
the background will be represented by a two-dimensional polynomial of
order border. This functionality is somewhat experimental, with one eye
upon potential background gradients across large masks or on those
detectors more prone to pattern noise. Generally it is recommended to
set background as ``False``.
focus : bool
Whether to include pixel scale and focus rotation with the fit
parameters of the model. This is also an experimental function. This
approach does not attempt to deal with inter- or intra-pixel
variations. The recommended use is currently to set focus as ``False``.
ranges : str
The user can choose specific time ranges of data on which to work. This
could, for example, avoid removing known stellar flares from a dataset
Time ranges are supplied as comma-separated pairs of Barycentric Julian
Dates (BJDs). Multiple ranges are separated by a semi-colon.
An example containing two time ranges is::
'2455012.48517,2455014.50072;2455022.63487,2455025.08231'
If the user wants to correct the entire time series then providing
ranges = '0,0' will tell the task to operate on the whole time series.
xtol : float
The dimensionless, relative model parameter convergence criterion for
the fit algorithm.
ftol : float
The dimensionless, relative model residual convergence criterion for
the fit algorithm.
qualflags : bool
If qualflags is ``False``, archived observations flagged with any
quality issue will not be fit.
outfile : str
kepprfphot creates two types of output file containing fit results and
diagnostics. ``outfile.png`` contains a time series plot of fit
parameters, residuals and chi-squared. ``outfile.fits`` contains a
table of the same properties, consistent with Kepler archive light
curve files. The FITS column PSF_FLUX contains the flux time-series in
units of e-/s derived by integrating under the best-fit PRF model.
PSF_BKG provides the best-fit background (if calculated) averaged over
all mask pixels in units of e-/s/pixel. PSF_CENTR1 provides the
best-fit PSF centroid position in the CCD column direction, in CCD
pixel units. Similarly, PSF_CENTR2 provides the best-fit PSF centroid
position in the CCD row direction, in CCD pixel units. If calculated,
PSF_FOCUS1 and PSF_FOCUS2 provide scale factors in the column and row
dimensions by which the CCD pixel scale is adjusted to approximate
focus variation. PSF_ROTATION provides the angle by which the scaled
PSF model was rotated on the focal plane in order to yield a best fit.
The table column PSF_RESIDUAL provides the sum of all mask pixels
after the best-fit model has been subtracted from the data. PSF_CHI2
delivers the best-fit chi-squred statistic for each observation.
plot : bool
Plot fit results to the screen?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ kepprfphot kplr012557548-2012004120508_lpd-targ.fits.gz --columns 95
--rows 1020 --fluxes 1.0 --border 0 --prfdir ../kplr2011265_prf --xtol 1e-7 --ftol 1e-7
--plot --verbose
--------------------------------------------------------------
KEPPRFPHOT -- infile=kplr012557548-2012004120508_lpd-targ.fits.gz
columns=95 rows=1020 fluxes=1.0 border=0 background=False
focus=False prfdir=../kplr2011265_prf ranges=0,0 xtol=1e-07 ftol=1e-07
qualflags=False plot=True overwrite=True verbose=True logfile=kepprfphot.log
KEPPRFPHOT started at: Wed Jun 14 15:33:30 2017
KepID: 12557548
RA (J2000): 290.96622
Dec (J2000): 51.50472
KepMag: 15.692
SkyGroup: 4
Season: 1
Channel: 32
Module: 10
Output: 4
19% nrow = 740 t = 0.1 sec
.. image:: ../_static/images/api/kepprfphot.png
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPPRFPHOT -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' columns={}'.format(columns)
+ ' rows={}'.format(rows)
+ ' fluxes={}'.format(fluxes)
+ ' border={}'.format(border)
+ ' background={}'.format(background)
+ ' focus={}'.format(focus)
+ ' prfdir={}'.format(prfdir)
+ ' ranges={}'.format(ranges)
+ ' xtol={}'.format(xtol)
+ ' ftol={}'.format(ftol)
+ ' qualflags={}'.format(qualflags)
+ ' plot={}'.format(plot)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPPRFPHOT started at', logfile, verbose)
f = fluxes
x = columns
y = rows
nsrc = len(f)
if len(x) != nsrc or len(y) != nsrc:
errmsg = ("ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and "
"fluxes must have the same number of sources")
kepmsg.err(logfile, errmsg, verbose)
guess = list(f) + list(x) + list(y)
if background:
if border == 0:
guess.append(0.0)
else:
for i in range((border + 1) * 2):
guess.append(0.0)
if focus:
guess = guess + [1.0, 1.0, 0.0]
# overwrite output file
for i in range(nsrc):
outfilename = '{0}_{1}.fits'.format(outfile, i)
if overwrite:
kepio.overwrite(outfilename, logfile, verbose)
if kepio.fileexists(outfilename):
errmsg = 'ERROR -- KEPPRFPHOT: {} exists. Use --overwrite'.format(outfilename)
kepmsg.err(logfile, errmsg, verbose)
# open TPF FITS file
try:
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, barytime = \
kepio.readTPF(infile, 'TIME', logfile, verbose)
except:
message = 'ERROR -- KEPPRFPHOT: is %s a Target Pixel File? ' % infile
kepmsg.err(logfile,message,verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
kepio.readTPF(infile,'TIMECORR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, cadno = \
kepio.readTPF(infile,'CADENCENO',logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
kepio.readTPF(infile,'FLUX', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
kepio.readTPF(infile,'FLUX_ERR', logfile, verbose)
try:
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, poscorr1 = \
kepio.readTPF(infile, 'POS_CORR1', logfile, verbose)
except:
poscorr1 = np.zeros((len(barytime)), dtype='float32')
poscorr1[:] = np.nan
try:
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, poscorr2 = \
kepio.readTPF(infile, 'POS_CORR2', logfile, verbose)
except:
poscorr2 = np.zeros((len(barytime)), dtype='float32')
poscorr2[:] = np.nan
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, qual = \
kepio.readTPF(infile,'QUALITY',logfile,verbose)
struct = pyfits.open(infile)
tstart, tstop, bjdref, cadence = kepio.timekeys(struct, infile, logfile, verbose)
# input file keywords and mask map
cards0 = struct[0].header.cards
cards1 = struct[1].header.cards
cards2 = struct[2].header.cards
maskmap = np.copy(struct[2].data)
npix = np.size(np.nonzero(maskmap)[0])
# print target data
if verbose:
print('')
print(' KepID: {}'.format(kepid))
print(' RA (J2000): {}'.format(ra))
print('Dec (J2000): {}'.format(dec))
print(' KepMag: {}'.format(kepmag))
print(' SkyGroup: {}'.format(skygroup))
print(' Season: {}'.format(season))
print(' Channel: {}'.format(channel))
print(' Module: {}'.format(module))
print(' Output: {}'.format(output))
print('')
# read PRF file and interpolate
result = kepfunc.read_and_interpolate_prf(prfdir=prfdir, module=module,
output=output, column=column,
row=row, xdim=xdim, ydim=ydim,
verbose=verbose, logfile=logfile)
splineInterpolation = result[0]
DATx = result[1]
DATy = result[2]
PRFx = result[4]
PRFy = result[5]
# construct mesh for background model
bx = np.arange(1., float(xdim + 1))
by = np.arange(1., float(ydim + 1))
xx, yy = np.meshgrid(np.linspace(bx.min(), bx.max(), xdim),
np.linspace(by.min(), by.max(), ydim))
# Get time ranges for new photometry, flag good data
barytime += bjdref
tstart, tstop = kepio.timeranges(ranges, logfile, verbose)
incl = np.zeros((len(barytime)), dtype='int')
for rownum in range(len(barytime)):
for winnum in range(len(tstart)):
if (barytime[rownum] >= tstart[winnum]
and barytime[rownum] <= tstop[winnum]
and (qual[rownum] == 0 or qualflags)
and np.isfinite(barytime[rownum])
and np.isfinite(np.nansum(fluxpixels[rownum, :]))):
incl[rownum] = 1
if not np.in1d(1,incl):
message = ('ERROR -- KEPPRFPHOT: No legal data within the'
' range {}'.format(ranges))
kepmsg.err(logfile, message, verbose)
# filter out bad data
n = 0
nincl = (incl == 1).sum()
tim = np.zeros((nincl), 'float64')
tco = np.zeros((nincl), 'float32')
cad = np.zeros((nincl), 'float32')
flu = np.zeros((nincl, len(fluxpixels[0])), 'float32')
fer = np.zeros((nincl, len(fluxpixels[0])), 'float32')
pc1 = np.zeros((nincl), 'float32')
pc2 = np.zeros((nincl), 'float32')
qua = np.zeros((nincl), 'float32')
for rownum in range(len(barytime)):
if incl[rownum] == 1:
tim[n] = barytime[rownum]
tco[n] = tcorr[rownum]
cad[n] = cadno[rownum]
flu[n,:] = fluxpixels[rownum]
fer[n,:] = errpixels[rownum]
pc1[n] = poscorr1[rownum]
pc2[n] = poscorr2[rownum]
qua[n] = qual[rownum]
n += 1
barytime = tim * 1.0
tcorr = tco * 1.0
cadno = cad * 1.0
fluxpixels = flu * 1.0
errpixels = fer * 1.0
poscorr1 = pc1 * 1.0
poscorr2 = pc2 * 1.0
qual = qua * 1.0
# initialize plot arrays
t = np.array([], dtype='float64')
fl, dx, dy, bg, fx, fy, fa, rs, ch = [], [], [], [], [], [], [], [], []
for i in range(nsrc):
fl.append(np.array([], dtype='float32'))
dx.append(np.array([], dtype='float32'))
dy.append(np.array([], dtype='float32'))
# Preparing fit data message
progress = np.arange(nincl)
if verbose:
txt = 'Preparing...'
sys.stdout.write(txt)
sys.stdout.flush()
# single processor version
oldtime = 0.0
for rownum in range(np.min([80, len(barytime)])):
try:
if barytime[rownum] - oldtime > 0.5:
ftol = 1.0e-10; xtol = 1.0e-10
except:
pass
args = (fluxpixels[rownum, :], errpixels[rownum, :], DATx, DATy, nsrc,
border, xx, yy, PRFx, PRFy, splineInterpolation, guess, ftol,
xtol, focus, background, rownum, 80, float(x[i]),
float(y[i]), False)
guess = PRFfits(args)
ftol = ftol
xtol = xtol
oldtime = barytime[rownum]
# Fit the time series: multi-processing
anslist = []
cad1 = 0
cad2 = 50
for i in range(int(nincl/50) + 1):
try:
fluxp = fluxpixels[cad1:cad2, :]
errp = errpixels[cad1:cad2, :]
progress = np.arange(cad1, cad2)
except:
fluxp = fluxpixels[cad1:nincl, :]
errp = errpixels[cad1:nincl, :]
progress = np.arange(cad1, nincl)
try:
args = itertools.izip(fluxp, errp, itertools.repeat(DATx),
itertools.repeat(DATy),
itertools.repeat(nsrc),
itertools.repeat(border),
itertools.repeat(xx),
itertools.repeat(yy),
itertools.repeat(PRFx),
itertools.repeat(PRFy),
itertools.repeat(splineInterpolation),
itertools.repeat(guess),
itertools.repeat(ftol),
itertools.repeat(xtol),
itertools.repeat(focus),
itertools.repeat(background), progress,
itertools.repeat(np.arange(cad1,nincl)[-1]),
itertools.repeat(float(x[0])),
itertools.repeat(float(y[0])),
itertools.repeat(True))
p = multiprocessing.Pool()
model = [0.0]
model = p.imap(PRFfits, args, chunksize=1)
p.close()
p.join()
cad1 += 50; cad2 += 50
ans = np.array([np.array(item) for item in zip(*model)])
try:
anslist = np.concatenate((anslist, ans.transpose()), axis=0)
except:
anslist = ans.transpose()
guess = anslist[-1]
ans = anslist.transpose()
except:
pass
# single processor version
oldtime = 0.0; ans = []
for rownum in range(nincl):
proctime = time.time()
try:
if barytime[rownum] - oldtime > 0.5:
ftol = 1.0e-10; xtol = 1.0e-10
except:
pass
args = (fluxpixels[rownum, :], errpixels[rownum, :], DATx, DATy, nsrc,
border, xx, yy, PRFx, PRFy, splineInterpolation, guess, ftol,
xtol, focus, background, rownum, nincl, float(x[0]),
float(y[0]), True)
guess = PRFfits(args)
ans.append(guess)
ftol = ftol; xtol = xtol; oldtime = barytime[rownum]
ans = np.array(ans).transpose()
# unpack the best fit parameters
flux, OBJx, OBJy = [], [], []
na = np.shape(ans)[1]
for i in range(nsrc):
flux.append(ans[i, :])
OBJx.append(ans[nsrc + i, :])
OBJy.append(ans[nsrc * 2 + i, :])
try:
bterms = border + 1
if bterms == 1:
b = ans[nsrc * 3, :]
else:
b = np.array([])
bkg = []
for i in range(na):
bcoeff = np.array([ans[nsrc * 3:nsrc * 3 + bterms, i],
ans[nsrc * 3 + bterms:nsrc * 3 + bterms * 2, i]])
bkg.append(kepfunc.polyval2d(xx, yy, bcoeff))
b = np.append(b, np.nanmean(bkg[-1].reshape(bkg[-1].size)))
except:
b = np.zeros(na)
if focus:
wx = ans[-3, :]
wy = ans[-2, :]
angle = ans[-1, :]
else:
wx = np.ones(na)
wy = np.ones(na)
angle = np.zeros(na)
# constuct model PRF in detector coordinates
residual, chi2 = [], []
for i in range(na):
f =
|
np.empty(nsrc)
|
numpy.empty
|
"""
par.py
G.Rice 6/20/2012
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
V0.4.4 20170829
This module includes a number of different classes and methods for working with
Kongsberg all files, each of which are intended to serve at least one of three
purposes. These are
1) Provide access to the Kongsberg records for viewing or data extraction.
2) Provide simplified access to a combination of Kongsberg data.
3) Display information from Kongsberg records or data.
The primary classes in this module to be accessed directly are
allRead - used to get data records or blocks of records.
useall - inherites from allRead, but performs higher level jobs.
resolve_file_depths - get new xyz data from the range / angle data.
Each of these classes is described in more detail with their own docstrings.
Some standalone methods that use the above classes for data access are also
included at the end of this module. The methods of interest are
build_BSCorr
plot_extinction
noise_from_passive_wc
and are also described in more detail in their docstrings.
"""
import numpy as np
from numpy import sin, cos, pi
from numpy import fft
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import pyproj
from mpl_toolkits.basemap import Basemap
import datetime as dtm
import sys, os, copy
import pickle
import re
from glob import glob
try:
import tables as tbl
have_tables = True
except ImportError:
have_tables = False
try:
import svp
have_svp_module = True
except ImportError:
have_svp_module = False
# suppress numpy rank warnings
import warnings
warnings.simplefilter('ignore', np.RankWarning)
plt.ion()
class allRead:
"""
This is the primary class for working with Kongsberg data all files and
providing access to the data records. The concept behind this class is
that the class is a file, and it lets to move around to different records
in the file. The class can contain a map of where all the records are in
the file, a record or a dictionary of records belonging to a ping, and
dictionary of navigation and attitude data, or other types of data such as
runtime parameters.
The current record can be found in a class variable called 'packet'. This
contains the "header" information that exists for all records, such as
time, record type, record size. The data for the record is contained in a
data record type specific subpacket type inside of the variable packet
called 'subpack'. Each of these are their own classes with their own
variables and methods for working with their own data.
allRead methods of interest:
getrecord
getwatercolumn
display
getnav
plot_navarray
getruntime
getsscast
getping
It is worth noting here that the getrecord method calls the mapfile
method if a file map does not already exist. The file map is also an
allRead class variable called 'map'. The map class has a number of methods
of its own, most notibly the method 'printmap' which displays the records
available in file and what percetage of the file they consume. The labels
for these records (record number) is listed in this map and can be used as
a reference when working from the commandline.
"""
def __init__(self, infilename, verbose = False, byteswap = False):
"""Make a instance of the allRead class."""
self.infilename = infilename
self.byteswap = byteswap
self.infile = open(infilename, 'rb')
self.mapped = False
self.packet_read = False
self.eof = False
self.error = False
self.infile.seek(0,2)
self.filelen = self.infile.tell()
self.infile.seek(0)
def close(self, clean = False):
"""
Close the file from which the data is being read.
"""
self.infile.close()
if clean:
mapfilename = self.infilename + '.par'
navfilename = self.infilename + '.nav'
try:
os.remove(mapfilename)
os.remove(navfilename)
except:
pass
def __enter__(self):
"""
Start function for with statement
Now this will work:
with allRead(file):
....
The file will be closed automatically
"""
return self
def __exit__(self,*args):
"""
Exit function for with statement
"""
self.close()
def read(self):
"""
Reads the header.
"""
if self.infile.tell() == self.filelen:
self.eof = True
if not self.eof:
if self.byteswap:
packetsize = 4 + np.fromfile(self.infile, dtype=np.uint32, count=1)[0].newbyteorder()
else:
packetsize = 4 + np.fromfile(self.infile, dtype=np.uint32, count=1)[0]
self.infile.seek(-4, 1)
if self.filelen >= self.infile.tell() + packetsize:
self.packet = Datagram(self.infile.read(packetsize), self.byteswap)
self.packet_read = True
if not self.packet.valid:
self.error = True
print("Record without proper STX or ETX found.")
else:
self.eof = True
self.error = True
print("Broken packet found at", self.infile.tell())
print("Final packet size", packetsize)
def get(self):
"""
Decodes the data section of the datagram if a packet has been read but
not decoded. If excecuted the packet_read flag is set to False.
"""
if self.packet_read and not self.packet.decoded:
try:
self.packet.decode()
except NotImplementedError as err:
print(err.message)
self.packet_read = False
def mapfile(self, print_map = False, show_progress = False):
"""
Maps the datagrams in the file.
"""
progress = 0
if not self.mapped:
self.map = mappack()
self.reset()
if show_progress:
print('Mapping file; ', end=' ')
while not self.eof:
loc = self.infile.tell()
self.read()
dtype = self.packet.header[2]
dsize = self.packet.header[0]
time = self.packet.gettime()
if dtype == 107:
try:
self.get()
pingcounter = self.packet.subpack.header['PingCounter']
self.map.add(str(dtype), loc, time, dsize, pingcounter)
except:
print("Water column record at " + str(loc) + " skipped.")
else:
self.map.add(str(dtype), loc, time, dsize)
current = 100 * loc / self.filelen
if current - progress >= 1:
progress = current
if show_progress:
sys.stdout.write(
'\b\b\b\b\b\b\b\b\b\b%(percent)02d percent' %{'percent':progress})
self.reset()
# make map into an array and sort by the time stamp
self.map.finalize()
# set the number of watercolumn packets into the map object
if '107' in self.map.packdir:
pinglist = list(set(self.map.packdir['107'][:,3]))
self.map.numwc = len(pinglist)
if self.error:
pass
else:
if show_progress:
print('\b\b\b\b\b\b\b\b\b\b\b\b finished mapping file.')
if print_map:
self.map.printmap()
self.mapped = True
else:
pass
def quickmap(self, print_map = False, record_types=[], chunkmb = 20):
"""
quickmap(print_map**, record_types**, chunkmb)
Search for the file for records and map their position for easy access.
If only specific records are desired, they can be supplied as a list,
causing only these records to exist in the file map. If specific
record types are not supplied then all (supported) record types are
searched for and indexed.
This method is a faster way to map a file then mapfile but shold
produce the same results.
At this time the supported em systems includes the em3002, em2040(c),
em710, em712, em302, em304, em122, and em124. The em model number is
used as part of the search for records, and therefore the particular
system needs to be supported within this module.
*args
-------
None
**kwargs
-------
print_map = False. If set to True the map.printmap method is called to
display the records available in the map.
record_types = []. If a list of record id integers are provided, only
these records will be indexed and added to the file map. The
record ids are the integer representations of the records listed in
the Kongsberg EM data definition, or if the record is not listed
as a integer, it is the integer representation of the hexidecimal
id.
chunkmb = 20. This is the number of megabytes to read into memory for
record searching.
All hail the B.
"""
# set up the em model ids
ems = [3002, 2040, 2045, 710, 712, 302, 304, 122, 124]
ems_hex = ['\\xba\\x0b', '\\xf8\\x07', '\\xfd\\x07', '\\xc6\\x02', '\\xc8\\x02',
'\\x2e\\x01', '\x30\\x01', '\\x7a\\x00', '\\x7c\\x00']
em_set = False
# define what to look for in the header once a record is found.
hdr_dtype = np.dtype([('Bytes','I'),('Start','B'),('Type','B'),
('Model','H'),('Date','I'),('Time','I'),
('PingCount','H')])
hdr_size = hdr_dtype.itemsize
# set up the mapping object.
self.map = mappack()
# initialize the data reading process
self.reset() # start at beginning of file
chunksize = chunkmb * 1024 * 1024
data = self.infile.read(chunksize)
next_data = self.infile.read(chunksize)
fileoffset = 0
search_block_pos = 0
# Create a regular expression that wants a \x02 followed by a record type specified by the user or in the master map.dtypes key list
compiled_expr = self._build_quickmap_regex(record_types)
# Look for a match as long as there is enough data left in the file
# Read the file til the end or we are close enough there can't be any real data remaining
while len(data) > hdr_size:
while 1:
m = compiled_expr.search(data, search_block_pos)
if m: # If a record header was potentially found, then let's try to read and store the position down below
# - but there is a chance this is a false positive since we are jsut looking for a two byte combo that could randomly occur
search_block_pos = m.start()
# print "not found at expected loc."
# print "re found block type ", ord(data[search_block_pos + 1]), "at", search_block_pos
else: # no potential record block was found so set the flag that we need more data
search_block_pos = -1
# print "not found at expected location, no more \x02's in chunk either"
# this is where the record would be relative to the pattern
str_block_pos = search_block_pos - 4
# if we found something, do stuff with it if not too close to the end of the chunk
if search_block_pos >= 0 and search_block_pos < len(data) - hdr_size:
possible_hdr = data[str_block_pos:str_block_pos + hdr_size]
# parse the data chunk
hdr = np.frombuffer(possible_hdr, dtype=hdr_dtype)[0]
if hdr[0] > chunksize or hdr[0] < hdr_size: # unrealistic block sizes
search_block_pos += 1
continue # the next record is not a legit header so I doubt this was a real record we found
if em_set or hdr['Type'] in ems:
str_file_pos = fileoffset + str_block_pos
# convert the time to something simple to store for now
tmp_time = hdr['Date'] + hdr['Time'] / 864000
if hdr['Type'] == 107:
self.map.add(str(hdr['Type']), str_file_pos, tmp_time, hdr[0], hdr[-1])
else:
self.map.add(str(hdr['Type']), str_file_pos, tmp_time, hdr[0])
if not em_set and hdr['Model'] in ems:
em_set = True
ems_idx = ems.index(hdr['Model'])
ems_str = ems_hex[ems_idx]
compiled_expr = self._build_quickmap_regex(record_types, ems_str)
search_block_pos += hdr[0]
# Now check if the search block is too close to the end and we need more data to finish reading the header
# OR if the expected position of the next search block is in the next data chunk
if search_block_pos > len(data) - hdr_size:
if search_block_pos < len(data): # the header is close to the end of the chunk, so store it and read more so we can process it
carry_over = data[search_block_pos:]
search_block_pos = 0
else: # next record block is in the next chunk of file, set the search position to where that data is supposed to be
carry_over = ""
search_block_pos = search_block_pos - len(data)
break
# if we didn't find anything, move to the next file chunk
if search_block_pos < 0:
search_block_pos = 0
carry_over = ""
break
# read the next chunk of data
fileoffset = self.infile.tell() - len(carry_over)
data = carry_over + next_data
next_data = self.infile.read(chunksize)
self.map.finalize()
def _build_quickmap_regex(self, record_types, model=None):
"""
Build and return the search string to find records in the file based on
the provided records ids and the model if available. The pattern is a
\x02 (STX) followed by the desired record types specified by the user
or in the master map.dtypes key list.
Both the compiled regular expression and a list of the record ids as
characters is returned.
"""
all_id_types = list(self.map.dtypes.keys())
if not record_types:
recordid = all_id_types
else:
recordid = record_types
# Create a regular expression that wants a \x02 followed by a record type specified by the user or in the master map.dtypes key list
rec_ids_exp = r"[\x" + r"\x".join(["%02x" % v for v in recordid]) + "]"
search_exp = r"\x02" + rec_ids_exp
# if we know the em system in this file, add it to the pattern
if model is not None:
search_exp = search_exp + model
compiled_expr = re.compile(search_exp)
return compiled_expr
def loadfilemap(self, mapfilename = '', verbose = True):
"""
Loads the packdir if the map object packdir has been saved previously.
"""
if mapfilename == '':
mapfilename = self.infilename + '.par'
try:
self.map = mappack()
self.map.load(mapfilename)
self.mapped = True
if verbose:
print('Loaded file map ' + mapfilename)
except IOError:
print(mapfilename + ' map file not found.')
def savefilemap(self, verbose = True):
"""
Saves the mappack packdir dictionary for faster operations on a file in
the future. The file is saved under the same name as the loaded file
but with a 'par' extension.
"""
if self.mapped:
mapfilename = self.infilename + '.par'
self.map.save(mapfilename)
if verbose:
print('file map saved to ' + mapfilename)
else:
print('no map to save.')
def getrecord(self, recordtype, recordnum):
"""
Gets the record number of the described record type. The subpacket
object is returned for easier access to the desired data.
"""
self.eof = False
if not self.mapped:
self.mapfile(show_progress = True)
if str(recordtype) in self.map.packdir:
loc = int(self.map.packdir[str(recordtype)][recordnum][0])
# deal with moving within large files
if loc > 2147483646:
loc -= 2e9
self.infile.seek(2e9)
while loc > 2147483646:
loc -= 2e9
self.infile.seek(2e9,1)
self.infile.seek(loc,1)
else:
self.infile.seek(loc)
self.read()
self.get()
return self.packet.subpack
else:
print("record " + str(recordtype) + " not available.")
return None
def findpacket(self, recordtype, verbose = False):
"""
Find the next record of the requested type.
"""
self.read()
while not self.eof:
if verbose:
print(self.packet.dtype)
if recordtype == self.packet.dtype:
break
else:
self.read()
self.get()
def getwatercolumn(self,recordnum):
"""
This method is designed to get a watercolumn packet by the ping number
where ping 0 is the first in the file. Separate records are
reassembled for the whole ping and stored as the current subpack class
as if it were a single record.
"""
# dt is for looking for packets with different time stamps.
if not self.mapped:
self.mapfile()
pinglist = list(set(self.map.packdir['107'][:,3]))
pinglist.sort()
if recordnum >= len(pinglist):
print(str(len(pinglist)) + ' water column records available.')
return None
else:
pingnum = pinglist[recordnum]
inx = np.nonzero(self.map.packdir['107'][:,3] == pingnum)[0]
ping = self.getrecord(107,inx[0])
numbeams = ping.header['Total#Beams']
recordsremaining = list(range(ping.header['#OfDatagrams']))
recordsremaining.pop(ping.header['Datagram#']-1)
totalsamples, subbeams = ping.ampdata.shape
rx = np.zeros(numbeams, dtype = Data107.nrx_dtype)
# Initialize array to NANs. Source:http://stackoverflow.com/a/1704853/1982894
ampdata = np.empty((totalsamples, numbeams), dtype = np.float32)
ampdata.fill(np.NAN)
rx[:subbeams] = ping.rx
ampdata[:,:subbeams] = ping.ampdata
beamcount = subbeams
if len(inx) > 1:
for n in inx[1:]:
ping = self.getrecord(107, n)
recordnumber = recordsremaining.index(ping.header['Datagram#']-1)
recordsremaining.pop(recordnumber)
numsamples, subbeams = ping.ampdata.shape
if numsamples > totalsamples:
temp = np.empty((numsamples - totalsamples, numbeams), dtype = np.float32)
temp.fill(np.NAN)
ampdata = np.append(ampdata, temp, axis = 0)
totalsamples = numsamples
rx[beamcount:beamcount+subbeams] = ping.rx
ampdata[:numsamples,beamcount:beamcount+subbeams] = ping.ampdata
beamcount += subbeams
if len(recordsremaining) > 0:
print("Warning: Not all WC records have the same time stamp!")
sortidx = np.argsort(rx['BeamPointingAngle'])
self.packet.subpack.rx = rx[sortidx]
self.packet.subpack.ampdata = ampdata[:,sortidx]
self.packet.subpack.header[2] = 1
self.packet.subpack.header[3] = 1
self.packet.subpack.header[6] = numbeams
return self.packet.subpack
def display(self):
"""
Prints the current record header and record type header to the command
window. If the record type header display method also contains a plot
function a plot will also be displayed.
"""
if self.packet_read:
self.packet.display()
elif 'packet' in self.__dict__:
self.packet.display()
if self.packet.decoded:
self.packet.subpack.display()
else:
print('No record currently read.')
def reset(self):
"""
Puts the file pointer to the start and the eof to False.
"""
self.infile.seek(0)
self.packet_read = False
self.eof = False
if 'packet' in self.__dict__:
del self.packet
def getnav(self, tstamps, postype = 80, att_type = 65, degrees = True):
"""
For each provided time stamp (single or array) an array
of navigation data is returned for each of the provided time stamps.
The returned array set consists of time, x(deg), y(deg), roll (deg),
pitch(deg), heave (meters), and heading (deg). Time stamps are to be
POSIX time stamps, and are assumed to be in UTC. Set the 'degrees'
keyword to False have the returned attitude informaiton in radians.
"""
# make incoming tstamp shape more flexible
tstamps = np.asarray(tstamps)
ndim = tstamps.shape
if len(ndim) == 0:
tstamps = np.array([tstamps])
elif len(ndim) == 2:
tstamps = tstamps[0]
numpts = len(tstamps)
# make an array of all the needed data
if 'navarray' not in self.__dict__:
self._build_navarray()
# get the data to be used and reformat it for easy use
go = True
if str(postype) in self.navarray:
if postype == 80:
pos = self.navarray[str(postype)]
elif postype == 'GGK':
temp = self.navarray['GGK']
len_pos = len(temp)
pos = np.zeros((len_pos, 4))
for m,n in enumerate(temp.dtype.names[:-1]):
pos[:,m] = temp[n].astype('float')
pos[:,1:3] = pos[:,2:0:-1] # swap lat and lon
else:
go = False
if str(att_type) in self.navarray:
if att_type == 65:
att = self.navarray[str(att_type)]
elif att_type == 110:
att = self.navarray[str(att_type)][:5]
else:
go = False
# find bounding times for getting all needed nav data
if go:
mintime = max(att[0,0], pos[0,0])
maxtime = min(att[-1,0], pos[-1,0])
navpts = np.zeros((numpts,8))
# look for time stamps in the time range
idx_range = np.nonzero((tstamps <= maxtime) & (tstamps >= mintime))[0]
if len(idx_range) > 0:
# for time stamps in the time range, find that nav and att
for i in idx_range:
ts = tstamps[i]
if ts > pos[0,0] and ts < pos[-1,0]:
prev = np.nonzero(pos[:,0] <= ts)[0][-1]
temp = self._interp_points(tstamps[i], pos[prev,:], pos[prev + 1,:])
if len(temp) > 3:
navpts[i,[0,1,2,7]] = temp
else:
navpts[i,:3] = temp
navpts[i,7] = np.nan
else:
navpts[i,[0,1,2,7]] = np.nan
if ts > att[0,0] and ts < att[-1,0]:
prev =
|
np.nonzero(att[:,0] <= tstamps[i])
|
numpy.nonzero
|
# -*- coding: utf-8 -*-
# @Author: pranit
# @Date: 2018-04-20 09:59:48
# @Last Modified by: pranit
# @Last Modified time: 2018-05-17 02:16:39
from time import time
import ast
import pickle
import numpy as np
import pandas as pd
import multiprocessing as mp
from preprocessor import NltkPreprocessor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix, classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from confusion_matrix import cn_matrix
class SentimentAnalyzer:
def __init__(self):
self.clf = [
('MNB', MultinomialNB(alpha = 1.0, fit_prior = False)),
('LR', LogisticRegression(C = 5.0, penalty = 'l2', solver = 'liblinear', max_iter = 100, dual = True)),
('SVM', LinearSVC(C = 0.55, penalty = 'l2', max_iter = 1000, dual = True)),
('RF', RandomForestClassifier(n_jobs = -1, n_estimators = 100, min_samples_split = 40, max_depth = 90, min_samples_leaf = 3))
]
self.clf_names = ['Multinomial NB', 'Logistic Regression', 'Linear SVC', 'Random Forest']
def getInitialData(self, data_file, do_pickle):
print('Fetching initial data...')
t = time()
i = 0
df = {}
with open(data_file, 'r') as file_handler:
for review in file_handler.readlines():
df[i] = ast.literal_eval(review)
i += 1
reviews_df = pd.DataFrame.from_dict(df, orient = 'index')
if do_pickle:
reviews_df.to_pickle('pickled/product_reviews.pickle')
print('Fetching data completed!')
print('Fetching time: ', round(time()-t, 3), 's\n')
def preprocessData(self, reviews_df, do_pickle):
print('Preprocessing data...')
t = time()
reviews_df.drop(columns = ['reviewSummary'], inplace = True)
reviews_df['reviewRating'] = reviews_df.reviewRating.astype('int')
reviews_df = reviews_df[reviews_df.reviewRating != 3] # Ignoring 3-star reviews -> neutral
reviews_df = reviews_df.assign(sentiment = np.where(reviews_df['reviewRating'] >= 4, 1, 0)) # 1 -> Positive, 0 -> Negative
nltk_preprocessor = NltkPreprocessor()
with mp.Pool() as pool:
reviews_df = reviews_df.assign(cleaned = pool.map(nltk_preprocessor.tokenize, reviews_df['reviewText'])) # Parallel processing
if do_pickle:
reviews_df.to_pickle('pickled/product_reviews_preprocessed.pickle')
print('Preprocessing data completed!')
print('Preprocessing time: ', round(time()-t, 3), 's\n')
def trainTestSplit(self, reviews_df_preprocessed):
print('Splitting data using Train-Test split...')
t = time()
X = reviews_df_preprocessed.iloc[:, -1].values
y = reviews_df_preprocessed.iloc[:, -2].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42, shuffle = True)
print('Splitting data completed!')
print('Splitting time: ', round(time()-t, 3), 's\n')
return X_train, X_test, y_train, y_test
def kFoldSplit(self, reviews_df_preprocessed):
print('Splitting data using K-Fold Cross Validation...')
t = time()
X = reviews_df_preprocessed.iloc[:, -1].values
y = reviews_df_preprocessed.iloc[:, -2].values
kf = KFold(n_splits = 5, random_state = 42, shuffle = True)
train_test_indices = kf.split(X, y)
print('Splitting data completed!')
print('Splitting time: ', round(time()-t, 3), 's\n')
return train_test_indices, X, y
def trainData(self, X_train, y_train, classifier, num_features = 2522):
pipeline = []
model = []
steps = [
('vect', TfidfVectorizer(ngram_range = (1,2), use_idf = True, sublinear_tf = True, lowercase = False, stop_words = None, preprocessor = None)),
('select_best', SelectKBest(score_func = chi2, k = num_features))
]
for name, clf in classifier:
steps.append(('clf', clf))
pl = Pipeline(steps)
pipeline.append(pl)
print('Training data... Classifier ' + str(name))
t = time()
model.append((name, pl.fit(X_train, y_train)))
print('Training data completed!')
print('Training time: ', round(time()-t, 3), 's\n')
steps.pop()
return pipeline, model
def predictData(self, X_test, model):
prediction = []
for name, m in model:
print('Predicting Test data... Classifier ' + str(name))
t = time()
prediction.append((name, m.predict(X_test)))
print('Prediction completed!')
print('Prediction time: ', round(time()-t, 3), 's\n')
return prediction
def evaluate(self, y_test, prediction):
clf_accuracy = []
clf_precision = []
clf_recall = []
clf_f1 = []
clf_roc_auc = []
clf_cm = []
clf_cr = []
for name, pred in prediction:
print('Evaluating results... Classifier ' + str(name))
t = time()
clf_accuracy.append(accuracy_score(y_test, pred))
clf_precision.append(precision_score(y_test, pred))
clf_recall.append(recall_score(y_test, pred))
clf_f1.append(f1_score(y_test, pred))
clf_roc_auc.append(roc_auc_score(y_test, pred))
clf_cm.append(confusion_matrix(y_test, pred))
clf_cr.append(classification_report(y_test, pred, target_names = ['negative', 'positive'], digits = 6))
print('Results evaluated!')
print('Evaluation time: ', round(time()-t, 3), 's\n')
return clf_accuracy, clf_precision, clf_recall, clf_f1, clf_roc_auc, clf_cm, clf_cr
def holdoutStrategy(self, reviews_df_preprocessed, do_pickle, do_train_data):
print('\nHoldout Strategy...\n')
if do_train_data:
X_train, X_test, y_train, y_test = self.trainTestSplit(reviews_df_preprocessed)
pipeline, model = self.trainData(X_train, y_train, self.clf)
if do_pickle:
with open('pickled/features_train.pickle', 'wb') as features_train:
pickle.dump(X_train, features_train)
with open('pickled/features_test.pickle', 'wb') as features_test:
pickle.dump(X_test, features_test)
with open('pickled/labels_train.pickle', 'wb') as labels_train:
pickle.dump(y_train, labels_train)
with open('pickled/labels_test.pickle', 'wb') as labels_test:
pickle.dump(y_test, labels_test)
with open('pickled/pipeline_holdout.pickle', 'wb') as pipeline_holdout:
pickle.dump(pipeline, pipeline_holdout)
with open('pickled/model_holdout.pickle', 'wb') as model_holdout:
pickle.dump(model, model_holdout)
with open('pickled/features_train.pickle', 'rb') as features_train:
X_train = pickle.load(features_train)
with open('pickled/features_test.pickle', 'rb') as features_test:
X_test = pickle.load(features_test)
with open('pickled/labels_train.pickle', 'rb') as labels_train:
y_train = pickle.load(labels_train)
with open('pickled/labels_test.pickle', 'rb') as labels_test:
y_test = pickle.load(labels_test)
with open('pickled/pipeline_holdout.pickle', 'rb') as pipeline_holdout:
pipeline = pickle.load(pipeline_holdout)
with open('pickled/model_holdout.pickle', 'rb') as model_holdout:
model = pickle.load(model_holdout)
prediction = self.predictData(X_test, model)
clf_accuracy, clf_precision, clf_recall, clf_f1, clf_roc_auc, clf_cm, clf_cr = self.evaluate(y_test, prediction)
if do_pickle:
with open('pickled/metrics_cm_holdout.pickle', 'wb') as metrics_cm:
pickle.dump(clf_cm, metrics_cm)
with open('pickled/metrics_cr_holdout.pickle', 'wb') as metrics_cr:
pickle.dump(clf_cr, metrics_cr)
metrics_list = {
'Classifier': self.clf_names,
'Accuracy': clf_accuracy,
'Precision': clf_precision,
'Recall': clf_recall,
'F1-score': clf_f1,
'ROC AUC': clf_roc_auc
}
metrics_df = pd.DataFrame.from_dict(metrics_list)
for i in range(0, len(self.clf)):
if i == 0:
print('======================================================\n')
print('Evaluation metrics of Classifier ' + self.clf_names[i] + ':')
print('Confusion Matrix: \n{}\n'.format(clf_cm[i]))
print('Classification Report: \n{}'.format(clf_cr[i]))
(cn_matrix()).show(clf_cm[i])
print('======================================================\n')
print('Comparison of different metrics for the various Classifiers used:\n')
print(metrics_df)
if do_pickle:
with open('pickled/metrics_dataframe.pickle', 'wb') as df:
pickle.dump(metrics_df, df)
def crossValidationStrategy(self, reviews_df_preprocessed, do_pickle):
print('\nK-Fold Cross Validation Strategy...\n')
train_test_indices, X, y = self.kFoldSplit(reviews_df_preprocessed)
accuracy = []
precision = []
recall = []
f1 = []
roc_auc = []
cm = []
for i in range(0, len(self.clf)):
accuracy.append([])
precision.append([])
recall.append([])
f1.append([])
roc_auc.append([])
cm.append(np.zeros((2,2), dtype = 'int32'))
for train_idx, test_idx in train_test_indices:
X_train, y_train = X[train_idx], y[train_idx]
X_test, y_test = X[test_idx], y[test_idx]
_, model = self.trainData(X_train, y_train, self.clf)
prediction = self.predictData(X_test, model)
clf_accuracy, clf_precision, clf_recall, clf_f1, clf_roc_auc, clf_cm, _ = self.evaluate(y_test, prediction)
for j in range(0, len(self.clf)):
accuracy[j].append(clf_accuracy[j])
precision[j].append(clf_precision[j])
recall[j].append(clf_recall[j])
f1[j].append(clf_f1[j])
roc_auc[j].append(clf_roc_auc[j])
cm[j] += clf_cm[j]
acc = []
prec = []
rec = []
f1_score = []
auc = []
for i in range(0, len(self.clf)):
if i == 0:
print('======================================================\n')
print('Evaluation metrics of Classifier ' + self.clf_names[i] + ':')
print('Accuracy: {}'.format(np.mean(accuracy[i])))
print('Precision: {}'.format(np.mean(precision[i])))
print('Recall: {}'.format(np.mean(recall[i])))
print('F1-score: {}'.format(np.mean(f1[i])))
print('ROC AUC: {}'.format(np.mean(roc_auc[i])))
print('Confusion Matrix: \n{}\n'.format(cm[i]))
print('======================================================\n')
acc.append(
|
np.mean(accuracy[i])
|
numpy.mean
|
import rosbag
import numpy as np
import matplotlib.pyplot as plt
import sys
def get_data(bag_filename):
time_command = []
time_pose = []
time_euler = []
x_command = []
y_command = []
z_command = []
roll = []
pitch = []
yaw = []
x_pose = []
y_pose = []
z_pose = []
bag = rosbag.Bag(bag_filename)
topics = set([
"/command/pose",
"/ground_truth_to_tf/euler",
"/ground_truth_to_tf/pose"
])
for topic, msg, t in bag.read_messages(topics=topics):
if topic == '/command/pose':
time_command.append(msg.header.stamp.to_sec())
x_command.append(msg.pose.position.x)
y_command.append(msg.pose.position.y)
z_command.append(msg.pose.position.z)
if topic == '/ground_truth_to_tf/euler':
time_euler.append(msg.header.stamp.to_sec())
roll.append(msg.vector.x)
pitch.append(msg.vector.y)
yaw.append(msg.vector.z)
if topic == '/ground_truth_to_tf/pose':
time_pose.append(msg.header.stamp.to_sec())
x_pose.append(msg.pose.position.x)
y_pose.append(msg.pose.position.y)
z_pose.append(msg.pose.position.z)
bag.close()
return time_command, time_pose, time_euler, x_command, y_command, \
z_command, roll, pitch, yaw, x_pose, y_pose, z_pose
if __name__ == '__main__':
ref_x = 1.0
ref_y = 0.0
ref_z = 1.0
bag_filename = sys.argv[1]
time_command, time_pose, time_euler, x_command, y_command, \
z_command, roll, pitch, yaw, x_pose, y_pose, z_pose = get_data(bag_filename)
time_command = np.asarray(time_command)
x_command = np.asarray(x_command)
y_command = np.asarray(y_command)
z_command = np.asarray(z_command)
time_euler = np.asarray(time_euler)
roll = np.asarray(roll)
pitch = np.asarray(pitch)
yaw = np.asarray(yaw)
time_pose = np.asarray(time_pose)
x_pose = np.asarray(x_pose)
y_pose = np.asarray(y_pose)
z_pose = np.asarray(z_pose)
step_index = np.where(x_command > (ref_x / 2.0))[0][0]
step_time = time_command[step_index]
time_command = time_command - step_time
time_pose = time_pose - step_time
time_euler = time_euler - step_time
start_index = np.where(time_pose > 0)[0][0]
end_index = np.where(time_pose > 8)[0][0]
SAE = np.sum(np.abs(x_pose[start_index:end_index] - ref_x)) + \
np.sum(np.abs(y_pose[start_index:end_index] - ref_y)) + \
np.sum(
|
np.abs(z_pose[start_index:end_index] - ref_z)
|
numpy.abs
|
__copyright__ = "Copyright © 2018-2021 <NAME>"
__license__ = "SPDX-License-Identifier: MIT"
import io
import numpy as np
from numpy import all, min, max
from vtk import (
vtkXMLRectilinearGridReader,
vtkXMLImageDataReader,
vtkXMLStructuredGridReader,
vtkXMLUnstructuredGridReader,
)
from vtk.util.numpy_support import vtk_to_numpy
from conftest import get_vtk_data
from uvw import (
ImageData,
RectilinearGrid,
StructuredGrid,
UnstructuredGrid,
ParaViewData,
DataArray,
)
from uvw.unstructured import CellType
def test_rectilinear_grid(field_data,
compression_fixture,
format_fixture,
ordering_fixture):
coords, r, e_r, field, order = field_data
dim = r.ndim
f = io.StringIO()
compress = compression_fixture.param
format = format_fixture.param
rect = RectilinearGrid(f, coords, compression=compress, byte_order=order)
rect.addPointData(
DataArray(r, range(dim), 'point', ordering_fixture.param),
vtk_format=format
).addCellData(
DataArray(e_r, range(dim), 'cell', ordering_fixture.param),
vtk_format=format
).addFieldData(
DataArray(field, [0], 'field', ordering_fixture.param),
vtk_format=format
)
rect.write()
reader = vtkXMLRectilinearGridReader()
# Testing the xml pretty print output as well
pretty_sstream = io.StringIO(str(rect.writer))
for ss in [f, pretty_sstream]:
vtk_r, vtk_e_r, vtk_f = get_vtk_data(reader, ss)
vtk_r = vtk_r.reshape(r.shape, order='F')
vtk_e_r = vtk_e_r.reshape(e_r.shape, order='F') \
.transpose(ordering_fixture.transp(dim))
assert all(vtk_r == r)
assert all(vtk_e_r == e_r)
assert all(vtk_f == field)
def test_image_data(field_data,
compression_fixture,
format_fixture,
ordering_fixture):
coords, r, e_r, field, order = field_data
dim = r.ndim
f = io.StringIO()
compress = compression_fixture.param
format = format_fixture.param
with ImageData(
f,
[(min(x), max(x)) for x in coords],
[x.size for x in coords],
compression=compress,
byte_order=order) as fh:
fh.addPointData(
DataArray(r, range(dim), 'point', ordering_fixture.param),
vtk_format=format
).addCellData(
DataArray(e_r, range(dim), 'cell', ordering_fixture.param),
vtk_format=format
).addFieldData(
DataArray(field, [0], 'field', ordering_fixture.param),
vtk_format=format
)
reader = vtkXMLImageDataReader()
vtk_r, vtk_e_r, vtk_f = get_vtk_data(reader, f)
vtk_r = vtk_r.reshape(r.shape, order='F')
vtk_e_r = vtk_e_r.reshape(e_r.shape, order='F') \
.transpose(ordering_fixture.transp(dim))
assert all(vtk_r == r)
assert all(vtk_e_r == e_r)
assert
|
all(vtk_f == field)
|
numpy.all
|
from __future__ import division
import tensorflow as tf
import numpy as np
import random
import os
class ConvexHullData(object):
def __init__(self, input_size, output_size):
self.input_size = input_size
self.output_size = output_size
self.input_data = tf.placeholder(tf.float32, [None, None, input_size], name='input_data') # [batch, seq_len, size]
self.target_output = tf.placeholder(tf.int32, [None, None], name='target')
self.sequence_length = tf.placeholder(tf.int32, [None], name='sequence_length')
self.loss_mask = tf.placeholder(tf.float32, [None, None])
self.original_loss_mask = tf.placeholder(tf.float32, [None, None])
self.scale_factor = tf.placeholder(tf.float32, (), name="scale_factor")
self.processed_target_data = tf.one_hot(self.target_output, self.output_size, dtype=tf.float32)
self.processed_loss_mask = tf.expand_dims(self.loss_mask, axis=-1)
self.eps = tf.constant(np.finfo(np.float32).eps, dtype=tf.float32)
def show_task_loss(self, logit):
original_loss = tf.reduce_sum(
self.original_loss_mask * tf.nn.softmax_cross_entropy_with_logits(labels=self.processed_target_data,
logits=logit)
) / (tf.reduce_sum(self.original_loss_mask) + self.eps)
scale_loss = self.scale_factor * original_loss
return original_loss, scale_loss
def show_self_supervised_loss(self, logit):
loss = tf.reduce_sum(
self.processed_loss_mask * tf.pow(self.input_data - logit, 2)
) / (tf.reduce_sum(tf.cast(tf.not_equal(self.loss_mask, 0.0), dtype=tf.float32) + self.eps))
return loss
def task_loss(self, logit):
loss = tf.reduce_mean(
self.original_loss_mask * tf.nn.softmax_cross_entropy_with_logits(labels=self.processed_target_data,
logits=logit))
return self.scale_factor * loss
def self_supervised_loss(self, logit):
loss = tf.reduce_mean(self.processed_loss_mask * tf.pow(self.input_data - logit, 2))
return loss
class ConvexHull(object):
def __init__(self, batch_size, p_re, max_n=20, test_n=5, curriculum=False, mode="train"):
assert mode in ['train', 'test']
self.batch_size = batch_size
self.p_re = p_re
self.max_n = max_n
self.cur_index = -1
self.mode = mode
data_path = 'Convexhull_data'
if mode == "train":
train_dir = os.path.join(data_path, 'all_lengths_data_shuffled.txt')
self.train_samples = self.read_file(train_dir, same_len=True)
if curriculum:
self.cur_index = 0
self.train_samples.sort(key=lambda x: len(x["inputs"]))
if mode == "test":
assert test_n in [5, 10]
test_dir = os.path.join(data_path, 'convex_hull_'+str(test_n)+'_test.txt')
self.test_samples = self.read_file(test_dir)
self.in_dim = max_n + 2 + 1 # (N_one_hot; node value; end signal)
self.end_token = 0 # max_n + 1
def read_file(self, filepath, same_len=False):
all_data_blen =[]
if same_len:
all_data_blen = {}
with open(filepath) as fp:
for line in fp:
xs = []
ys = []
all_items = line.strip().split()
after_output = False
i = 0
while i < len(all_items):
if not after_output:
if all_items[i] == "output":
after_output = True
else:
xs.append([all_items[i], all_items[i+1]])
i += 1
else:
ys.append(all_items[i])
i += 1
if len(xs) <= self.max_n:
if same_len:
if len(xs) not in all_data_blen:
all_data_blen[len(xs)] = []
all_data_blen[len(xs)].append({"inputs": np.array(xs, dtype=np.float32), "outputs": np.array(ys, dtype=np.int32)})
else:
all_data_blen.append({"inputs": np.array(xs, dtype=np.float32), "outputs": np.array(ys, dtype=np.int32)})
return all_data_blen
def get_train_sample_wlen(self):
if self.cur_index < 0:
chosen_key = random.choice(list(self.train_samples.keys()))
samples = np.random.choice(self.train_samples[chosen_key], self.batch_size)
data = self.prepare_sample_batch(samples)
return data
else:
find = self.cur_index
tind = self.cur_index+self.batch_size
if tind > len(self.train_samples):
tind = len(self.train_samples)
find = tind-self.batch_size
self.cur_index = 0
else:
self.cur_index += self.batch_size
samples = self.train_samples[find:tind]
data = self.prepare_sample_batch(samples)
return data
def get_test_sample_wlen(self):
if self.cur_index < 0 or self.cur_index >= len(self.test_samples):
self.cur_index = 0
samples = self.test_samples[self.cur_index:self.cur_index+self.batch_size]
self.cur_index += self.batch_size
data = self.prepare_sample_batch(samples, random_mode=False)
return data
def prepare_sample_batch(self, samples, random_mode=True):
batch_size = len(samples)
input_seq_len = np.array([len(s['inputs']) for s in samples], dtype=np.int32)
output_seq_len = np.array([len(s['outputs']) for s in samples], dtype=np.int32) + 1 # output end signal
seq_len = input_seq_len + output_seq_len + 1 # input end signal
max_seq_len = np.max(seq_len)
input_vecs = np.zeros((batch_size, max_seq_len, self.in_dim), dtype=np.float32)
output_vecs = np.zeros((batch_size, max_seq_len), dtype=np.int32)
original_mask = np.zeros((batch_size, max_seq_len), dtype=np.float32)
weight_mask = np.random.choice(2, np.size(original_mask), p=[1-self.p_re, self.p_re]).astype(np.float32)
weight_mask = weight_mask.reshape(batch_size, -1)
for i, (s, i_s, o_s) in enumerate(zip(samples, input_seq_len, output_seq_len)):
inputs = s['inputs']
outputs = s['outputs']
input_label = list(range(1, len(inputs)+1))
label_mapping = list(range(1, len(inputs)+1))
if random_mode:
random.shuffle(label_mapping)
label_mapping = dict(zip(list(range(1, len(inputs)+1)), label_mapping))
else:
label_mapping = dict(zip(label_mapping, label_mapping))
input_label = [label_mapping[label] for label in input_label]
input_label.append(0) # end signal
outputs = [label_mapping[label] for label in outputs]
outputs.append(self.end_token)
input_vecs[i, :i_s, :2] = inputs
input_label = self.one_hot(input_label, self.max_n+1)
input_vecs[i, :i_s+1, 2:] = input_label
output_vecs[i, i_s+1:i_s+1+o_s] = outputs
original_mask[i, i_s + 1:i_s + 1 + o_s] = 1.0
weight_mask[i, i_s:] = 0.0
if self.p_re:
scale_factor = np.sum(weight_mask) / np.sum(original_mask)
if scale_factor < 1.0:
scale_factor = 1.0
else:
weight_mask =
|
np.zeros_like(original_mask, dtype=np.float32)
|
numpy.zeros_like
|
# -*- coding: utf-8 -*-
from __future__ import division
from . import BPT
import numpy as np
import polarTransform
import peakutils.peak
import matplotlib.pyplot as plt
from astropy import wcs
from astropy.io import fits
from astropy.table import Table
import koi
#plt.rc('font', size=20)
plt.rc('text', usetex=True)
def manga_SBP_single(ell_fix, redshift, pixel_scale, zeropoint, ax=None, offset=0.0,
x_min=1.0, x_max=40.0, alpha=1, physical_unit=False, show_dots=False,
vertical_line=False, vertical_pos=100, linecolor='firebrick', linestyle='-', label='SBP'):
"""Display the 1-D profiles."""
if ax is None:
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.0, right=1.0,
bottom=0.0, top=1.0,
wspace=0.00, hspace=0.00)
ax1 = fig.add_axes([0.08, 0.07, 0.85, 0.88])
ax1.tick_params(direction='in')
else:
ax1 = ax
ax1.tick_params(direction='in')
# Calculate physical size at this redshift
import slug
phys_size = slug.phys_size(redshift,is_print=False)
# 1-D profile
if physical_unit is True:
x = ell_fix['sma']*pixel_scale*phys_size
y = -2.5*np.log10((ell_fix['intens'] + offset)/(pixel_scale)**2)+zeropoint
y_upper = -2.5*np.log10((ell_fix['intens'] + offset + ell_fix['int_err'])/(pixel_scale)**2)+zeropoint
y_lower = -2.5*np.log10((ell_fix['intens'] + offset - ell_fix['int_err'])/(pixel_scale)**2)+zeropoint
upper_yerr = y_lower-y
lower_yerr = y-y_upper
asymmetric_error = [lower_yerr, upper_yerr]
xlabel = r'$R/\mathrm{kpc}$'
ylabel = r'$\mu\,[\mathrm{mag/arcsec^2}]$'
else:
x = ell_fix['sma']*pixel_scale
y = -2.5*np.log10((ell_fix['intens'] + offset)/(pixel_scale)**2)+zeropoint
y_upper = -2.5*np.log10((ell_fix['intens'] + offset + ell_fix['int_err'])/(pixel_scale)**2)+zeropoint
y_lower = -2.5*np.log10((ell_fix['intens'] + offset - ell_fix['int_err'])/(pixel_scale)**2)+zeropoint
upper_yerr = y_lower-y
lower_yerr = y-y_upper
asymmetric_error = [lower_yerr, upper_yerr]
xlabel = r'$R/\mathrm{arcsec}$'
ylabel = r'$\mu\,[\mathrm{mag/arcsec^2}]$'
# ax1.grid(linestyle='--', alpha=0.4, linewidth=2)
if show_dots is True:
ax1.errorbar((x),
y,
yerr=asymmetric_error,
color='k', alpha=0.2, fmt='o',
capsize=4, capthick=1, elinewidth=1)
if label is not None:
ax1.plot(x, y, color=linecolor, linewidth=4, linestyle=linestyle,
label=r'$\mathrm{'+label+'}$', alpha=alpha)
else:
ax1.plot(x, y, color=linecolor, linewidth=4, linestyle=linestyle, alpha=alpha)
ax1.fill_between(x, y_upper, y_lower, color=linecolor, alpha=0.3*alpha)
ax1.axvline(x=vertical_pos, ymin=ax1.get_ylim()[0], ymax=ax1.get_ylim()[1],
color='gray', linestyle='--', linewidth=3)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax1.set_xlim(x_min, x_max)
ax1.set_xlabel(xlabel, fontsize=30)
ax1.set_ylabel(ylabel, fontsize=30)
ax1.invert_yaxis()
if label is not None:
ax1.legend(fontsize=25, frameon=False, loc='upper right')
if physical_unit is True:
ax4 = ax1.twiny()
ax4.tick_params(direction='in')
lin_label = [1, 2, 5, 10, 30, 50, 100, 150, 300]
lin_pos = [i for i in lin_label]
ax4.set_xticks(lin_pos)
ax4.set_xlim(ax1.get_xlim())
ax4.set_xlabel(r'$\mathrm{kpc}$', fontsize=30)
ax4.xaxis.set_label_coords(1, 1.05)
ax4.set_xticklabels([r'$\mathrm{'+str(i)+'}$' for i in lin_label], fontsize=25)
for tick in ax4.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
if vertical_line is True:
ax1.axvline(x=vertical_pos, ymin=0, ymax=1,
color='gray', linestyle='--', linewidth=3)
if ax is None:
return fig
return ax1
# Calculate physical size of a given redshift
def phys_size(redshift, is_print=True, h=0.7, Omegam=0.3, Omegal=0.7):
'''Calculate the corresponding physical size per arcsec of a given redshift.
Requirement:
-----------
cosmology: https://github.com/esheldon/cosmology
Parameters:
-----------
redshift: float
Returns:
-----------
physical_size: float, in 'kpc/arcsec'
'''
import cosmology
cosmos = cosmology.Cosmo(H0=100*h, omega_m=Omegam, flat=True, omega_l=Omegal, omega_k=None)
ang_distance = cosmos.Da(0.0, redshift)
physical_size = ang_distance/206265*1000 # kpc/arcsec
if is_print:
print ('At redshift', redshift, ', 1 arcsec =', physical_size, 'kpc')
return physical_size
# Generate url of MaNGA Pipe3D datacube
def gen_url_manga(plate, mangaid):
'''Generate url of MaNGA Pipe3D datacube.
Parameters:
-----------
plate: int, such as 8077
mangaid: string, such as 'manga-8077-12705'
Return:
-------
url: string
'''
return [
'https://data.sdss.org/sas/dr14/manga/spectro/pipe3d/v2_1_2/2.1.2/'
+ str(plate)
+ '/' + mangaid + '.Pipe3D.cube.fits.gz'
]
# Calculate the mean profile and its errors in a fan-shaped area (which I called 'pizza')
def eat_pizza(init, theta, polarimage, xinput, r_max, dx, phys_scale, pa, ba):
'''Calculate the mean profile and its errors in a fan-shaped area (which I called 'pizza').
Parameters:
-----------
init: float, angle of starting point of the pizza
theta: float, angle ot the fan-shaped area (pizza)
polarimage: 2-D np.array, the polar-transformed image, usually returned by `polarTransform.convertToPolarImage`
xinput: 1-D np.array, usually ranges from 0 to r_max * phys_scale. The unit is `kpc`.
Return:
-------
: mean profile points corresponding to the `xinput`
: std value of the mean profile
'''
from scipy import interpolate
for i in range(init, init + theta):
phi = np.deg2rad(i)
x = (np.arange(0, r_max) * dx * phys_scale * (np.sqrt((np.cos(pa - phi))**2 + ((np.sin(pa - phi) / ba)**2))))
y = polarimage[i%360, :]
f = interpolate.interp1d(x, y, kind='cubic', fill_value='extrapolate')
if i==init:
ystack = f(xinput)
else:
ystack = np.vstack((ystack, f(xinput)))
return ystack.mean(axis=0), np.std(ystack, axis=0)
# Find nearset position
def find_nearest(array, value):
''' Find nearest value is an array '''
idx = (np.abs(array-value)).argmin()
return idx
def show_ha_profile(obj, x_input, y_output, y_std, r_max):
fig, [ax1, ax2] = plt.subplots(1, 2 ,figsize=(16, 6))
plt.rc('font', size=20)
plt.rc('text', usetex=True)
cubeset = fits.open('/Users/jiaxuanli/Research/MaNGA/v2_1_2/' + obj['mangaid'].rstrip(' ') + '.Pipe3D.cube.fits.gz')
ha_im = cubeset[3].data[45] # H-alpha image
mask = np.isnan(cubeset[1].data[18])
dx = -cubeset[3].header['CD1_1']*3600
dy = cubeset[3].header['CD2_2']*3600
x_center = np.int(cubeset[3].header['CRPIX1']) - 1
y_center = np.int(cubeset[3].header['CRPIX2']) - 1
x_extent = (np.array([0., ha_im.shape[0]]) - (ha_im.shape[0] - x_center)) * dx * (-1)
y_extent = (np.array([0., ha_im.shape[1]]) - (ha_im.shape[1] - y_center)) * dy
extent = [x_extent[0], x_extent[1], y_extent[0], y_extent[1]] # arcsec
phys_scale = koi.phys_size(obj['redshift'], h=0.71, Omegam=0.27, Omegal=0.73)
cubeset.close()
## ax1
im1 = ax1.imshow(np.ma.array(ha_im, mask=mask), origin='lower', extent=extent, cmap='Spectral_r')
fig.colorbar(im1, label=r'$10^{-16}\ \mathrm{erg/s/cm}^2$', fraction=0.045, pad=0.04, ax=ax1)
tks = [5 * i for i in range(-int((round(extent[0]/5) - 1)), int(round(extent[0]/5)))]
ax1.set_yticks(tks)
ax1.set_xticks(tks)
ax1.text(0.5, 0.03, r'$\mathrm{H\alpha\ flux},\ \texttt{' + obj['mangaid'] + '}$',
bbox=dict(edgecolor='k', alpha=0.1),
horizontalalignment='center',
verticalalignment='bottom',
transform=ax1.transAxes)
ax1.set_xlabel(r'$\mathrm{arcsec}$')
ax1.set_ylabel(r'$\mathrm{arcsec}$')
ax1.tick_params(direction='in')
ax1.set_ylim(1.2*extent[1], -0.98*extent[1])
#plt.title(r'H$\alpha$ Flux of '+id)
#angscale = 360 / polarImage.shape[1] # degree/pix, for the polarImage
angscale = 1
## ax2
y_upper = y_output + y_std
y_lower = y_output - y_std
profile, = ax2.plot(x_input, y_output, linewidth=2, linestyle='-', c="firebrick", zorder=9)
ax2.fill_between(x_input, y_upper, y_lower, color='orangered', alpha=0.3)
peak_indices = peakutils.peak.indexes(y_output, thres=0, min_dist=0)
#scatter_high = ax2.scatter(x_input[peak_indices[:2]], y_output[peak_indices[:2]], zorder=10,
# s=200, marker=(5,1,0), facecolors='yellow', edgecolors='red')
if len(peak_indices)==0:
peaks_reliable = np.nan
else:
peaks = x_input[peak_indices]
reliable_mask = (abs(peaks - r_max * dx * phys_scale / 2) < r_max * dx * phys_scale / 3)
if sum(reliable_mask) > 0:
reliable_inx = np.argmax(y_output[peak_indices[reliable_mask]])
peaks_reliable = peaks[reliable_mask][reliable_inx]
#reliable_inx = np.where(x_input == peaks_reliable)[0][0]
scatter_high = ax2.scatter(peaks_reliable, np.max(y_output[peak_indices[reliable_mask]]), zorder=10,
s=200, marker=(5,1,0), facecolors='yellow', edgecolors='red')
print('peak reliable', peaks_reliable)
else: peaks_reliable = np.nan
trough_indices = peakutils.peak.indexes(1 - y_output, thres=0, min_dist=0)
scatter_low = ax2.scatter(x_input[trough_indices[0]], y_output[trough_indices[0]], zorder=10,
s=200, marker=(3, 1, 1), facecolors='lawngreen', edgecolors='blue')
#trough_set = np.dstack((x_input[trough_indices], y_output[trough_indices]))
troughs = x_input[trough_indices]
troughs_reliable = troughs[0]
print('trough reliable', troughs[0])
# PSF position
PSFposition = phys_scale * cubeset[3].header['RFWHM']
PSF = ax2.fill_between(np.array(x_input)[x_input < PSFposition],
y_lower[x_input < PSFposition],
0, facecolor='gray', alpha=0.15, interpolate=True, zorder=0)
# Legend
lines = ax2.get_lines()
#legend1= plt.legend([lines[i] for i in [0,1]], [r'Deprojected H$\alpha$ profile',r'Smoothed H$\alpha$ profile'],
# loc='upper left', frameon=False)
legend1= plt.legend([profile], [r'$\mathrm{H\alpha\ flux\ profile}$'],
loc='upper left', frameon=False, bbox_to_anchor=(0.1,0.99))
if sum(reliable_mask)!=0:
legend2= plt.legend([scatter_high, scatter_low, PSF],
map(lambda t: r'$\mathrm{' + t + '}$', ['Peak','Trough','PSF']),
loc='upper right',
frameon=False) # bbox_to_anchor=(0.9,0.75)
ax2.add_artist(legend2)
ax2.add_artist(legend1)
ax2.tick_params(direction='in')
plt.xlabel(r'$\mathrm{Radial\ distance\ (kpc)}$')
plt.ylabel(r'$\mathrm{H\alpha\ flux\ (}$'+r'$10^{-16}\ \mathrm{erg/s/cm}^2)$', fontsize=20)
plt.legend(frameon=False)
plt.ylim(0, 1.15*max(y_output))
plt.xlim(0, r_max * phys_scale * dx)
plt.subplots_adjust(wspace=0.4)
return fig, [ax1, ax2], peaks_reliable, troughs_reliable
def run_mcmc_for_radius(x, y, x_err, y_err, is_pivot=True):
import emcee
from scipy.stats import t
def model(theta, x):
return theta[0]*x+theta[1]
def lnLikelihood(theta, x, y, xerr, yerr):
a, b = theta
model_value = model(theta, x)
invy = 1/(yerr**2 + xerr**2) # +(model_value**2 * np.exp(2*lnf)))
invy = invy/(2*np.pi)
return -0.5*np.sum(invy*(model_value-y)**2)+0.5*np.sum(np.log(invy))
def lnPrior(theta):
a, b = theta
if -5<b<5:
return np.log(t.pdf(a, 1, 1)) + 0.0
return -np.inf
def lnProb(theta, x, y, xerr, yerr):
prior = lnPrior(theta)
if ~np.isfinite(prior):
return -np.inf
return prior + lnLikelihood(theta, x, y, xerr, yerr)
x_mask = ~np.isnan(x)
y_mask = ~np.isnan(y)
mask = np.logical_and(x_mask, y_mask)
x = x[mask]
y = y[mask]
x_err = x_err[mask]
y_err = y_err[mask]
x_pivot = np.median(x)
y_pivot = np.median(y)
print('x_pivot: ', x_pivot)
print('y_pivot: ', y_pivot)
if is_pivot:
x -= x_pivot
y -= y_pivot
# Least Square Method
A = np.vstack((np.ones_like(x), x)).T
C = np.diag(y_err * y_err)
cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
b_ls, a_ls = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
print('a_ls: ', a_ls)
print('b_ls: ', b_ls)
ndim, nwalkers = 2, 100
pos = emcee.utils.sample_ball([a_ls, b_ls], [1e-3 for i in range(ndim)], size=nwalkers)
import emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnProb, args=(x, y, x_err, y_err))
step = 1000
pos, prob, state = sampler.run_mcmc(pos, step)
samples = sampler.chain[:,-500:,:].reshape((-1,ndim))
samples = sampler.flatchain
#sampler.reset()
print("Mean acceptance fraction: {0:.3f}"
.format(np.mean(sampler.acceptance_fraction)))
return x, y, x_err, y_err, x_pivot, y_pivot, samples
def AGN_diagnosis(fits, this_ew, y1, x1, y2, x2, suptitle, size,
consider_RG=True, show_fig=False, is_print=False):
if consider_RG:
RGmask = (this_ew <= 3) # EW[Ha]<3 is Retired Galaxy
nonRGmask = (this_ew > 3)
else:
RGmask = np.zeros(len(this_ew))
nonRGmask = RGmask + 1
tot_mask = np.ones(fits['mangaid'].shape, dtype=bool)
#SF1=np.logical_and(np.logical_and(y<0.61/(x-0.05)+1.30, y<0.61/(x-0.47)+1.19),SNmask)
SF1 = np.logical_and(
nonRGmask,
np.logical_and(
x1 < 0,
np.logical_and(y1 < 0.61 / (x1 - 0.05) + 1.30,
y1 < 0.61 / (x1 - 0.47) + 1.19)))
#Composite=np.logical_and(np.logical_and(y>0.61/(x-0.05)+1.30, y<0.61/(x-0.47)+1.19),SNmask)
Composite = np.logical_and(
nonRGmask,
np.logical_and(y1 > 0.61 / (x1 - 0.05) + 1.30,
y1 < 0.61 / (x1 - 0.47) + 1.19))
#AGN1=np.logical_xor(np.logical_xor(SF1,SNmask),Composite)
AGN1 = np.logical_and(
nonRGmask,
np.logical_xor(
np.logical_xor(
SF1, tot_mask),
Composite))
if is_print:
print("AGN1 number is %d" % sum(AGN1))
print("SF1 number is %d" % sum(SF1))
print("Composite number is %d" % sum(Composite))
print("Retired number is %d" % sum(RGmask))
print("Total number is %d" % sum(AGN1 + SF1 + Composite + RGmask))
print('')
SF2 = np.logical_and(
nonRGmask, np.logical_and(y2 < 0.72 / (x2 - 0.32) + 1.30, x2 <= 0.05))
LINER = np.logical_and.reduce([
nonRGmask,
np.logical_and(np.logical_xor(SF2, tot_mask), y2 <= 1.89 * x2 + 0.76),
np.logical_or(AGN1, Composite)])
AGN2 = np.logical_and.reduce([
nonRGmask,
np.logical_and(np.logical_xor(SF2, tot_mask), y2 > 1.89 * x2 + 0.76),
np.logical_or(AGN1, Composite)])
if is_print:
print("SF2 number is %d" % sum(SF2))
print("LINER number is %d" % sum(LINER))
print("Seyfert number is %d" % sum(AGN2))
print("Retired number is %d" % sum(RGmask))
print("Total number is %d" % sum(AGN2 + SF2 + LINER + RGmask))
print('')
if show_fig:
fig = plt.figure(figsize=(size[0], size[1]))
############## fig1 ###################
ax1 = plt.subplot2grid((1, 2), (0, 0))
labels = 'SF', 'Composite', 'AGN', 'Retired'
sizes = [sum(SF1), sum(Composite), sum(AGN1), sum(RGmask)]
explode = (0, 0, 0, 0)
colors = 'lawngreen', 'skyblue', 'salmon', 'gray'
plt.pie(
sizes,
explode=explode,
labels=labels,
autopct='%1.1f%%',
shadow=False,
startangle=90,
colors=colors)
plt.title('BPT')
############## fig2 ###################
ax2 = plt.subplot2grid((1, 2), (0, 1))
labels = 'SF', 'LINER', 'Seyfert', 'Retired'
sizes = [sum(SF2), sum(LINER), sum(AGN2), sum(RGmask)]
explode = (0, 0, 0, 0)
colors = 'lawngreen', 'orange', 'r', 'gray'
plt.pie(
sizes,
explode=explode,
labels=labels,
autopct='%1.1f%%',
shadow=False,
startangle=90,
colors=colors)
plt.title('VO87')
#plt.savefig('Fig1.eps', dpi=400)
plt.suptitle(suptitle)
plt.show()
return AGN1, SF1, Composite, SF2, AGN2, LINER, RGmask, nonRGmask
def paper_BPT_and_RG(ring_fits, rthis_ew, rthis_x, rthis_y, rthis_x2, rthis_y2,
all_fits, tthis_ew,tthis_x, tthis_y, tthis_x2, tthis_y2, savefile=None):
'''
Plot BPT diagram for the paper.
Parameters:
-----------
rthis_ew: np.array, EW(Ha) of ring sample.
rthis_x, rthis_y, rthis_x2, rthis_y2: np.array, corresponds to NII/Ha, OIII/Hb, SII/Ha and OIII/Hb of ring sample, respectively.
tthis_ew: np.array, EW(Ha) of total sample.
tthis_x, tthis_y, tthis_x2, tthis_y2: np.array, corresponds to NII/Ha, OIII/Hb, SII/Ha and OIII/Hb of total sample, respectively.
savefile: string, the path of saving your figure.
Return:
-------
BPT and VO87 diagrams.
'''
rAGN1, rSF1, rComposite, rSF2, rAGN2, rLINER, rRGmask, rnonRGmask = koi.AGN_diagnosis(
ring_fits, rthis_ew, rthis_y, rthis_x, rthis_y2, rthis_x2,
'Ha ring sample \n Using 2.6kpc aperture and its EW', [9, 4], show_fig=False, is_print=False)
tAGN1, tSF1, tComposite, tSF2, tAGN2, tLINER, tRGmask, tnonRGmask = koi.AGN_diagnosis(
all_fits, tthis_ew, tthis_y, tthis_x, tthis_y2, tthis_x2,
'Total sample \n using 2.6kpc aperture and its EW', [9, 4], show_fig=False, is_print=False)
fig = plt.figure(figsize=(26, 6.5))
plt.rcParams['font.size'] = 25.0
ax1 = plt.subplot2grid((1, 4), (0, 0))
rRGmask = (rthis_ew <= 3)
rnonRGmask = (rthis_ew > 3)
plt.scatter(
rthis_x[rRGmask],
rthis_y[rRGmask],
c='gray',
s=5,
marker='o',
label='RG',
alpha=0.5)
plt.scatter(
rthis_x[np.logical_and(rnonRGmask, rAGN2)],
rthis_y[np.logical_and(rnonRGmask, rAGN2)],
c='red',
s=15,
marker='o',
label='Seyfert',
alpha=0.5)
plt.scatter(
rthis_x[np.logical_and(rnonRGmask, rLINER)],
rthis_y[np.logical_and(rnonRGmask, rLINER)],
c='orange',
s=15,
marker='o',
label='LINER',
alpha=0.5)
plt.scatter(
rthis_x[np.logical_and(rnonRGmask, rComposite)],
rthis_y[np.logical_and(rnonRGmask, rComposite)],
c='blue',
s=15,
marker='o',
label='Composite',
alpha=0.5)
plt.scatter(
rthis_x[np.logical_and(rnonRGmask, rSF1)],
rthis_y[np.logical_and(rnonRGmask, rSF1)],
c='green',
s=15,
marker='o',
label='SF',
alpha=0.5)
x = np.linspace(-1.28, 0.04, 100)
y1 = 0.61 / (x - 0.05) + 1.30
plt.plot(x, y1, 'r--', alpha=0.5)
x = np.linspace(-2.5, 0.3, 100)
y2 = 0.61 / (x - 0.47) + 1.19
plt.plot(x, y2, 'b--', alpha=0.5)
x = np.linspace(-0.18, 0.9, 100)
#y3 = 1.05*x + 0.45
#plt.plot(x,y3,'g--',alpha=0.5)
plt.text(-0.8, -0.1, 'SF')
plt.text(0.2, 0.9, 'AGN')
plt.text(-0.25, -0.85, 'Composite')
plt.xlim(-1.3, 0.6)
plt.ylim(-1.2, 1.3)
xmin, xmax = ax1.get_xlim()
ymin, ymax = ax1.get_ylim()
plt.text(
0.05 * (xmax - xmin) + xmin,
ymin + (ymax - ymin) * 0.90,
'(a)',
fontsize=25)
plt.ylabel(r'$\log$([O III]/H$\beta$)')
plt.xlabel(r'$\log$([N II]/H$\alpha$)')
ax1.yaxis.set_ticks_position('both')
ax1.tick_params(direction='in')
#plt.title('RG='+ str(sum(RGmask))+' and Non-RG='+str(sum(nonRGmask)))
leg = plt.legend(markerscale=1.4, fontsize=20, framealpha=0.5, edgecolor='k')
for l in leg.legendHandles:
l.set_alpha(0.8)
plt.grid('off')
plt.xticks([-1,-0.5,0,0.5])
ax2 = plt.subplot2grid((1, 4), (0, 1))
RGmask = (rthis_ew <= 3)
nonRGmask = (rthis_ew > 3)
plt.scatter(
rthis_x2[np.logical_and(rnonRGmask, rAGN2)],
rthis_y2[np.logical_and(rnonRGmask, rAGN2)],
c='red',
s=15,
marker='o',
label='Seyfert',
alpha=0.5)
plt.scatter(
rthis_x2[np.logical_and(rnonRGmask, rLINER)],
rthis_y2[np.logical_and(rnonRGmask, rLINER)],
c='orange',
s=15,
marker='o',
label='LINER',
alpha=0.5)
plt.scatter(
rthis_x2[np.logical_and(rnonRGmask, rComposite)],
rthis_y2[np.logical_and(rnonRGmask, rComposite)],
c='blue',
s=15,
marker='o',
label='Composite',
alpha=0.5)
plt.scatter(
rthis_x2[np.logical_and(rnonRGmask, rSF1)],
rthis_y2[np.logical_and(rnonRGmask, rSF1)],
c='green',
s=15,
marker='o',
label='SF',
alpha=0.5)
plt.scatter(
rthis_x2[rRGmask],
rthis_y2[rRGmask],
c='gray',
s=5,
marker='o',
label='RG',
alpha=0.3)
x = np.linspace(-0.3, 0.5, 100)
y1 = 1.89 * x + 0.76
plt.plot(x, y1, 'g--', alpha=0.5)
x = np.linspace(-2.5, 0.1, 100)
y2 = 0.72 / (x - 0.32) + 1.30
y3 = 0.48 / (x - 0.10) + 1.30
plt.plot(x, y2, 'b--', alpha=0.5)
#plt.plot(x,y3,'r--',alpha=0.5)
plt.text(-1, -0.8, 'SF \& Composite')
plt.text(-0.8, 0.9, 'Seyfert')
plt.text(0.2, 0.7, 'LINER')
plt.xlim(-1.2, 0.6)
plt.ylim(-1.2, 1.3)
xmin, xmax = ax2.get_xlim()
ymin, ymax = ax2.get_ylim()
plt.text(
0.05 * (xmax - xmin) + xmin,
ymin + (ymax - ymin) * 0.90,
'(b)',
fontsize=25)
#plt.ylabel(r'$\log$ OIII/H$\beta$')
plt.xlabel(r'$\log$([S II]/H$\alpha$)')
ax2.yaxis.set_ticks_position('both')
ax2.tick_params(direction='in')
#plt.title('RG='+ str(sum(RGmask))+' and Non-RG='+str(sum(nonRGmask)))
#plt.legend(frameon=False, fontsize=12)
plt.grid('off')
plt.xticks([-1,-0.5,0,0.5])
ax3 = plt.subplot2grid((1, 4), (0, 2))
tRGmask = (tthis_ew <= 3)
tnonRGmask = (tthis_ew > 3)
plt.scatter(
tthis_x[tRGmask],
tthis_y[tRGmask],
c='gray',
s=5,
marker='o',
label='RG',
alpha=0.3)
plt.scatter(
tthis_x[np.logical_and(tnonRGmask, tComposite)],
tthis_y[np.logical_and(tnonRGmask, tComposite)],
c='blue',
s=15,
marker='o',
label='Composite',
alpha=0.5)
plt.scatter(
tthis_x[np.logical_and(tnonRGmask, tSF1)],
tthis_y[np.logical_and(tnonRGmask, tSF1)],
c='green',
s=15,
marker='o',
label='SF',
alpha=0.5)
plt.scatter(
tthis_x[np.logical_and(tnonRGmask, tLINER)],
tthis_y[np.logical_and(tnonRGmask, tLINER)],
c='orange',
s=15,
marker='o',
label='LINER',
alpha=0.5)
plt.scatter(
tthis_x[np.logical_and(tnonRGmask, tAGN2)],
tthis_y[np.logical_and(tnonRGmask, tAGN2)],
c='red',
s=15,
marker='o',
label='Seyfert',
alpha=0.5)
x = np.linspace(-1.28, 0.04, 100)
y1 = 0.61 / (x - 0.05) + 1.30
plt.plot(x, y1, 'r--', alpha=0.5)
x = np.linspace(-2.5, 0.3, 100)
y2 = 0.61 / (x - 0.47) + 1.19
plt.plot(x, y2, 'b--', alpha=0.5)
x = np.linspace(-0.18, 0.9, 100)
plt.text(-0.95, -0.65, 'SF')
plt.text(0.2, 0.9, 'AGN')
plt.text(-0.25, -0.85, 'Composite')
plt.xlim(-1.3, 0.6)
plt.ylim(-1.2, 1.3)
xmin, xmax = ax3.get_xlim()
ymin, ymax = ax3.get_ylim()
plt.text(
0.05 * (xmax - xmin) + xmin,
ymin + (ymax - ymin) * 0.90,
'(c)',
fontsize=25)
plt.xlabel(r'$\log$([N II]/H$\alpha$)')
ax3.yaxis.set_ticks_position('both')
ax3.tick_params(direction='in')
#plt.title('RG='+ str(sum(tRGmask))+' and Non-RG='+str(sum(tnonRGmask)))
#plt.legend(frameon=False, fontsize=12)
plt.grid('off')
plt.xticks([-1,-0.5,0,0.5])
ax4 = plt.subplot2grid((1, 4), (0, 3))
tRGmask = (tthis_ew <= 3)
tnonRGmask = (tthis_ew > 3)
plt.scatter(
tthis_x2[tRGmask],
tthis_y2[tRGmask],
c='gray',
s=5,
marker='o',
label='RG',
alpha=0.3)
plt.scatter(
tthis_x2[np.logical_and(tnonRGmask, tAGN2)],
tthis_y2[np.logical_and(tnonRGmask, tAGN2)],
c='red',
s=15,
marker='o',
label='Seyfert',
alpha=0.5)
plt.scatter(
tthis_x2[np.logical_and(tnonRGmask, tSF1)],
tthis_y2[np.logical_and(tnonRGmask, tSF1)],
c='green',
s=15,
marker='o',
label='SF',
alpha=0.5)
plt.scatter(
tthis_x2[np.logical_and(tnonRGmask, tLINER)],
tthis_y2[np.logical_and(tnonRGmask, tLINER)],
c='orange',
s=15,
marker='o',
label='LINER',
alpha=0.5)
plt.scatter(
tthis_x2[np.logical_and(tnonRGmask, tComposite)],
tthis_y2[np.logical_and(tnonRGmask, tComposite)],
c='blue',
s=15,
marker='o',
label='Composite',
alpha=0.5)
x = np.linspace(-0.3, 0.5, 100)
y1 = 1.89 * x + 0.76
plt.plot(x, y1, 'g--', alpha=0.5)
x = np.linspace(-2.5, 0.1, 100)
#y2=0.72/(x-0.32)+1.30
#y3 = 0.48 / (x - 0.10) + 1.30
plt.plot(x, y2, 'b--', alpha=0.5)
#plt.plot(x, y3,'r--',alpha=0.5)
plt.text(-1, -1.05, 'SF \& Composite')
plt.text(-0.8, 0.9, 'Seyfert')
plt.text(0.2, 0.7, 'LINER')
plt.xlim(-1.2, 0.6)
plt.ylim(-1.2, 1.3)
xmin, xmax = ax4.get_xlim()
ymin, ymax = ax4.get_ylim()
plt.text(
0.05 * (xmax - xmin) + xmin,
ymin + (ymax - ymin) * 0.90,
'(d)',
fontsize=25)
plt.xlabel(r'$\log$([S II]/H$\alpha$)')
ax4.yaxis.set_ticks_position('both')
ax4.tick_params(direction='in')
#ax4.yaxis.set_label_position("right")
#plt.title('RG='+ str(sum(tRGmask))+' and Non-RG='+str(sum(tnonRGmask)))
#plt.legend(fontsize=12,framealpha=0.5, edgecolor='k')
plt.grid('off')
plt.xticks([-1,-0.5,0,0.5])
ax2.get_shared_y_axes().join(ax1, ax2)
ax2.set_yticklabels([])
ax3.get_shared_y_axes().join(ax2, ax3)
ax3.set_yticklabels([])
ax4.get_shared_y_axes().join(ax3, ax4)
ax4.set_yticklabels([])
plt.subplots_adjust(wspace=0)
if savefile is not None:
plt.savefig(savefile, dpi=200, bbox_inches='tight')
plt.show()
#####################################################################
def plot_sample_distribution(
x_arr, y_arr, z_arr, method='count',
x_bins=25, y_bins=25, z_min=None, z_max=None,
contour=True, nticks=5, x_lim=[8.5, 12], y_lim=[-3.3, 1.5],
n_contour=6, scatter=True, colorbar=False, gaussian=1,
xlabel=r'$\log (M_{*}/M_{\odot})$',
ylabel=r'$\log (\rm{SFR}/M_{\odot}\rm{yr}^{-1})$',
title=None,
x_title=0.6, y_title=0.1, s_alpha=0.1, s_size=10):
"""Density plot."""
from astroML.stats import binned_statistic_2d
from scipy.ndimage.filters import gaussian_filter
ORG = plt.get_cmap('OrRd')
ORG_2 = plt.get_cmap('YlOrRd')
BLU = plt.get_cmap('PuBu')
BLK = plt.get_cmap('Greys')
PUR = plt.get_cmap('Purples')
GRN = plt.get_cmap('Greens')
plt.rcParams['figure.dpi'] = 100.0
plt.rc('text', usetex=True)
if x_lim is None:
x_lim = [np.nanmin(x_arr), np.nanmax(x_arr)]
if y_lim is None:
y_lim = [np.nanmin(y_arr), np.nanmax(y_arr)]
x_mask = ((x_arr >= x_lim[0]) & (x_arr <= x_lim[1]))
y_mask = ((y_arr >= y_lim[0]) & (y_arr <= y_lim[1]))
x_arr = x_arr[x_mask & y_mask]
y_arr = y_arr[x_mask & y_mask]
z_arr = z_arr[x_mask & y_mask]
z_stats, x_edges, y_edges = binned_statistic_2d(
x_arr, y_arr, z_arr, method, bins=(np.linspace(8, 12, x_bins), np.linspace(-3.0, 1.5, y_bins)))
if z_min is None:
z_min = np.nanmin(z_stats)
if z_max is None:
z_max = np.nanmax(z_stats)
fig = plt.figure(figsize=(9, 6))
fig.subplots_adjust(left=0.14, right=0.93,
bottom=0.12, top=0.99,
wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(111)
#ax1.grid(linestyle='--', linewidth=2, alpha=0.5, zorder=0)
if contour:
CT = ax1.contour(x_edges[:-1], y_edges[:-1],
gaussian_filter(z_stats.T, gaussian),
n_contour, linewidths=1.5,
colors=[BLK(0.6), BLK(0.7)],
extend='neither')
#ax1.clabel(CT, inline=1, fontsize=15)
z_stats[z_stats==0] = np.nan
HM = ax1.imshow(z_stats.T, origin='lower',
extent=[x_edges[0], x_edges[-1],
y_edges[0], y_edges[-1]],
vmin=z_min, vmax=z_max,
aspect='auto', interpolation='none',
cmap=BLK)
if scatter:
sc = ax1.scatter(x_arr, y_arr, c=z_arr, cmap='Spectral', alpha=0.3, s=s_size,
label='__no_label__', zorder=1)
ax1.colorbar(sc, ax=ax1, orientation='horizontal')
ax1.set_xlabel(xlabel, size=25)
ax1.set_ylabel(ylabel, size=25)
ax1.set_yticks([-3, -2, -1, 0, 1])
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(22)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(22)
if colorbar:
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from matplotlib.ticker import MaxNLocator
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.2)
cbar_ticks = MaxNLocator(nticks).tick_values(z_min, z_max)
cbar = plt.colorbar(HM, cax=cax, ticks=cbar_ticks)
cbar.solids.set_edgecolor("face")
if title is not None:
ax1.text(x_title, y_title, title, size=30, transform=ax1.transAxes)
ax1.set_xlim(x_lim)
ax1.set_ylim(y_lim)
ax1.tick_params(direction='in')
return fig, z_stats, x_edges, y_edges
#####################################################################
def new_moving_average(x_arr,
y_arr,
z_arr,
mask,
mass_interval_dict,
mass_name,
x_bins=3,
y_bins=8,
step=5,
x_lim=(9, 12),
y_lim=(-13, -9),
num_thres=2):
from astroML.stats import binned_statistic_2d
for num in range(0, step-1):
forward = ((y_lim[0] - y_lim[1]) / y_bins) / (step - 1) * num
z_stats1, x_edges, y_edges = binned_statistic_2d(
x_arr,
y_arr,
z_arr,
'count',
bins=(np.linspace(x_lim[0], x_lim[1], x_bins + 1),
np.linspace(y_lim[0] + forward, y_lim[1] + forward,
y_bins + 1)))
z_stats2, x_edges, y_edges = binned_statistic_2d(
x_arr[mask],
y_arr[mask],
z_arr[mask],
'count',
bins=(np.linspace(x_lim[0], x_lim[1], x_bins + 1),
np.linspace(y_lim[0] + forward, y_lim[1] + forward,
y_bins + 1)))
z_stats = z_stats2 / z_stats1
number_mask = z_stats1[mass_interval_dict[mass_name]] <= num_thres
if num == 0:
x_stacks = (y_edges[:-1] + y_edges[1:]) / 2
y_stacks = z_stats[mass_interval_dict[mass_name]]
y_stacks[number_mask] = np.nan
# Binomial Error #
r = 0.842
err_stacks = r * np.sqrt(z_stats[mass_interval_dict[mass_name]] *
(1 - z_stats[mass_interval_dict[mass_name]]) /
z_stats1[mass_interval_dict[mass_name]])
err_stacks[number_mask] = np.nan
else:
x_stacks = np.vstack([x_stacks, (y_edges[:-1] + y_edges[1:]) / 2])
y = z_stats[mass_interval_dict[mass_name]]
y[number_mask] = np.nan
y_stacks = np.vstack([y_stacks, y])
# Binomial Error #
err = r * np.sqrt(z_stats[mass_interval_dict[mass_name]] *
(1 - z_stats[mass_interval_dict[mass_name]]) /
z_stats1[mass_interval_dict[mass_name]])
err[number_mask] = np.nan
err_stacks = np.vstack([err_stacks, err])
return x_stacks, y_stacks, err_stacks
#####################################################################
#####################################################################
def RG_moving_average(x_arr,
y_arr,
z_arr,
mask,
x_bins=3,
y_bins=8,
step=5,
x_lim=(9, 12),
y_lim=(-13, -9),
num_thres=2):
from astroML.stats import binned_statistic_2d
for num in range(0, step-1):
forward = ((y_lim[0] - y_lim[1]) / y_bins) / (step - 1) * num
z_stats1, x_edges, y_edges = binned_statistic_2d(
x_arr,
y_arr,
z_arr,
'count',
bins=(np.linspace(x_lim[0], x_lim[1], x_bins + 1),
np.linspace(y_lim[0] + forward, y_lim[1] + forward,
y_bins + 1)))
z_stats2, x_edges, y_edges = binned_statistic_2d(
x_arr[mask],
y_arr[mask],
z_arr[mask],
'count',
bins=(np.linspace(x_lim[0], x_lim[1], x_bins + 1),
np.linspace(y_lim[0] + forward, y_lim[1] + forward,
y_bins + 1)))
z_stats = z_stats2.sum(axis=0) / z_stats1.sum(axis=0)
number_mask = z_stats1.sum(axis=0) <= num_thres
if num == 0:
x_stacks = (y_edges[:-1] + y_edges[1:]) / 2
y_stacks = z_stats
y_stacks[number_mask] = np.nan
# Binomial Error #
r = 0.842
err_stacks = r * np.sqrt(z_stats *
(1 - z_stats) /
z_stats1.sum(axis=0))
err_stacks[number_mask] = np.nan
else:
x_stacks = np.vstack([x_stacks, (y_edges[:-1] + y_edges[1:]) / 2])
y = z_stats
y[number_mask] = np.nan
y_stacks = np.vstack([y_stacks, y])
# Binomial Error #
err = r * np.sqrt(z_stats *
(1 - z_stats) /
z_stats1.sum(axis=0))
err[number_mask] = np.nan
err_stacks = np.vstack([err_stacks, err])
return x_stacks, y_stacks, err_stacks
#####################################################################
#####################################################################
def moving_average(line, SSFR_bin, density, step, number_limit, rxdata, rydata, txdata, tydata, rmask, tmask):
xset = []
np.array(xset)
yset = []
np.array(yset)
errset = []
np.array(errset)
for j in range(0, density):
forward = SSFR_bin / density * j
#bins_for_SSFR = np.linspace(-13 + forward, -9 + forward, step)
bins_for_SSFR = np.arange(-13 + forward, -9 + forward, SSFR_bin)
bins_for_mass =
|
np.linspace(8, 12, step)
|
numpy.linspace
|
# DeepImpute implementation in PyTorch
import numpy as np
import pandas as pd
import scipy.sparse
import scipy.io
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.stats import expon
import torch.optim as optim
from typing import Union
from scdeep.trainer import Trainer
from scdeep.dataset import GeneExpressionDataset
class SubModule(nn.Module):
def __init__(self, input_dim, output_dim=512):
super(SubModule, self).__init__()
self.dense_layer = nn.Linear(input_dim, 256)
self.dropout = nn.Dropout(0.2)
self.output_layer = nn.Linear(256, output_dim)
def forward(self, x):
x = F.relu(self.dense_layer(x))
x = self.dropout(x)
x = F.softplus(self.output_layer(x))
return x
class DeepImputeModel(nn.Module):
def __init__(self):
super(DeepImputeModel, self).__init__()
self.inp = []
self.module_list = nn.ModuleList([])
def initialize_modules(self, input_dims):
self.inp = input_dims
for i in self.inp:
self.module_list.append(SubModule(input_dim=i))
def forward(self, x):
# x = torch.split(x, self.inp, dim=1)
output = []
for i, mod in enumerate(self.module_list):
output.append(mod(x[i]))
output = torch.cat(output, dim=1)
return output
class DeepImputeTrainer(Trainer):
def __init__(
self,
model: DeepImputeModel,
gene_dataset: GeneExpressionDataset,
genes_to_impute=None,
min_vmr=0.5,
nn_lim="auto",
number_predictor=None,
n_top_correlated_genes=5,
subset_dim=512,
mode="random",
**kwargs
):
super().__init__(model, gene_dataset, **kwargs)
self.targets = []
self.predictors = []
self.min_vmr = min_vmr
self.subset_dim = subset_dim
if type(self.gene_dataset.data) == np.ndarray or type(self.gene_dataset.data) == scipy.sparse.csr_matrix:
self.gene_dataset.data = pd.DataFrame(self.gene_dataset.data, dtype='float32')
else:
self.gene_dataset.data = pd.DataFrame(self.gene_dataset.data.values, dtype='float32')
# (note: below operations are carried out on data assuming it is a pandas dataframe)
genes_vmr = (self.gene_dataset.data.var() / (1 + self.gene_dataset.data.mean())).sort_values(ascending=False)
genes_vmr = genes_vmr[genes_vmr > 0]
# In case 1, while filling genes, we repeat genes that have been previously selected
# but do not choose genes that have a VMR < 0.5
# In case 2, irrespective of VMR, we select genes from the already selected gene pool for filling genes
if genes_to_impute is None:
genes_to_impute = self.filter_genes(genes_vmr, min_vmr, nn_lim)
else:
number_genes = len(genes_to_impute)
if number_genes % self.subset_dim != 0:
number_fill = self.subset_dim - (number_genes % self.subset_dim)
fill_genes = genes_to_impute[:number_fill]
genes_to_impute = np.concatenate((genes_to_impute, fill_genes))
self.corr_matrix = self.correlation_matrix(self.gene_dataset.data, number_predictor)
# the next two functions save the INDICES of genes that are to form the predictors and targets
self.set_targets(self.gene_dataset.data.reindex(columns=genes_to_impute), mode)
self.set_predictors(self.corr_matrix, n_top_correlated_genes)
self.gene_dataset.data = np.log1p(self.gene_dataset.data.values).astype(np.float32)
# self.gene_dataset.data = torch.tensor(self.gene_dataset.data.values)
(train_set, test_set, val_set) = self.train_test_validation()
self.register_posterior(train_set, test_set, val_set)
def loss(self, y_pred, y_true):
l = []
for i, ytrue in enumerate(y_true):
a = ytrue - y_pred[i]
y = (ytrue * torch.mul(a, a))
l.append(y.mean())
return l
def on_training_begin(self):
self.model.initialize_modules([len(pred) for pred in self.predictors])
self.model = self.model.cuda() if self.use_cuda else self.model
for name, param in self.model.named_parameters():
print(name)
print(param.shape)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
def model_output(self, data_tensor):
data_tensors, indices = data_tensor
inp = [data_tensors[:, column] for column in self.predictors]
# inp = torch.split(data_tensors, [len(i) for i in self.predictors], dim=1)
output = self.model(inp)
output = torch.split(output, [len(t) for t in self.targets], dim=1)
target = [data_tensors[:, column] for column in self.targets]
return output, target
def on_training_loop(self, data_tensor):
output, target = self.model_output(data_tensor)
self.current_loss = loss = self.loss(output, target)
self.current_loss = torch.mean(torch.stack(loss))
self.optimizer.zero_grad()
for l in loss:
l.backward(retain_graph=True)
self.optimizer.step()
@torch.no_grad()
def on_validation(self, data_tensor, loss):
output, target = self.model_output(data_tensor)
loss.append(np.asarray([l.item() for l in self.loss(output, target)]).mean())
return loss
def correlation_matrix(self, data, number_predictor=None):
# we find CV to find one value per gene to calculate the correlation between two genes
# data = data.loc[:, ~data.columns.duplicated()]
cv = data.std() / data.mean()
cv[np.isinf(cv)] = 0
if number_predictor is None:
predictions = data.loc[:, cv > 0]
else:
predictions = data.loc[:, cv.sort_values(ascending=False).index[:number_predictor]]
# the strongest linear relationship between two values
# is represented by the absolute value of the Pearson's correlation coefficient
corr_mat = pd.DataFrame(np.abs(np.corrcoef(predictions, rowvar=False)), index=predictions.columns,
columns=predictions.columns).fillna(0)
return corr_mat
def filter_genes(self, genes, min_vmr: Union[int, float], nn_lim):
if not str(nn_lim).isdigit():
nn_lim = (genes > min_vmr).sum()
number_subsets = int(np.ceil(nn_lim / self.subset_dim))
genes_to_impute = genes.index[:number_subsets * self.subset_dim]
rest = self.subset_dim - (len(genes_to_impute) % self.subset_dim)
if rest > 0 and rest != self.subset_dim:
fill_genes = np.random.choice(genes.index, rest)
genes_to_impute = np.concatenate((genes_to_impute, fill_genes))
# genes_to_impute contains the indices of genes that should be included for imputation
return genes_to_impute
def set_targets(self, data, mode): # mode = random / progressive
number_subsets = int(data.shape[1] / self.subset_dim)
if mode == 'progressive':
self.targets = data.columns.values.reshape((number_subsets, self.subset_dim))
else:
self.targets = np.random.choice(data.columns, size=(number_subsets, self.subset_dim), replace=False)
# self.targets = list(self.targets)
def set_predictors(self, covariance_matrix, n_top_correlated_genes):
self.predictors = []
for i, target in enumerate(self.targets):
predictor = covariance_matrix.loc[target].drop(np.intersect1d(target, covariance_matrix.columns), axis=1)
sorted_args = np.argsort(-predictor.values, axis=1)[:, :n_top_correlated_genes]
predictor = predictor.columns[sorted_args.flatten()]
self.predictors.append(np.unique(predictor))
print("Network {}: {} predictors, {} targets".format(i, len(np.unique(predictor)), len(target)))
def get_probs(self, vec, distr):
return {
"exp": expon.pdf(vec, 0, 20),
"uniform": np.tile([1. / len(vec)], len(vec)),
}.get(distr)
def mask_data(self, data_to_mask, test_size, distr="exp", dropout=0.01):
np.random.seed(self.seed)
permuted_indices = np.random.permutation(data_to_mask.shape[0])
test_set = int(np.ceil(data_to_mask.shape[0] * test_size))
test_indices =
|
np.array(permuted_indices[:test_set])
|
numpy.array
|
import bokeh.plotting as bkp
from bokeh.io import export_png
import pickle as pk
import numpy as np
import sys, os
#make it so we can import models/etc from parent folder
sys.path.insert(1, os.path.join(sys.path[0], '../common'))
from plotting import *
import bokeh.layouts as bkl
from bokeh.models import Title
dnm=sys.argv[1]
if dnm=='adult':
dnmnm='ADULT'
elif dnm=='phish':
dnmnm='PHISHING'
else:
dnmnm='WEBSPAM'
fldr_figs=sys.argv[2]
fldr_res=sys.argv[3]
beta=float(sys.argv[4])
i0=float(sys.argv[5])
f_rate=int(float(sys.argv[6]))
graddiag=str(sys.argv[7])
structured=str(sys.argv[8])
plot_0=False # plot baseline for zero corruption
algs = [('BCORES', 'β-Cores', pal[0]), ('SVI', 'SparseVI', pal[4]), ('BPSVI', 'PSVI', pal[7]), ('RAND', 'Uniform', pal[3])] #
fldr_figs = 'figs'
if not os.path.exists(fldr_figs):
os.mkdir(fldr_figs)
figs=[]
print('Plotting ' + dnm)
if dnm=='adult':
fig = bkp.figure(y_axis_label='Predictive Accuracy', x_axis_label='# Iterations', plot_height=1500, plot_width=2000, toolbar_location=None)
preprocess_plot(fig, '72pt', False, False)
fig2 = bkp.figure(y_axis_label='Predictive Accuracy', x_axis_label='Coreset Size', plot_height=1500, plot_width=2000, toolbar_location=None)
preprocess_plot(fig2, '72pt', False, False)
fig3 = bkp.figure(y_axis_label='Negative Predictive Log-Likelihood', x_axis_label='# Iterations', plot_height=1500, plot_width=2000, toolbar_location=None)
preprocess_plot(fig3, '72pt', False, False)
fig4 = bkp.figure(y_axis_label='Predictive LogLik', x_axis_label='Coreset Size', plot_height=1500, plot_width=2000, toolbar_location=None)
preprocess_plot(fig4, '72pt', False, False)
figs.append([fig, fig2, fig3, fig4])
else:
fig = bkp.figure(y_axis_label='', x_axis_label='# Iterations', plot_height=1500, plot_width=2000, toolbar_location=None)
preprocess_plot(fig, '72pt', False, False)
fig2 = bkp.figure(y_axis_label='', x_axis_label='Coreset Size', plot_height=1500, plot_width=2000, toolbar_location=None)
preprocess_plot(fig2, '72pt', False, False)
fig3 = bkp.figure(y_axis_label='', x_axis_label='# Iterations', plot_height=1500, plot_width=2000, toolbar_location=None)
preprocess_plot(fig3, '72pt', False, False)
fig4 = bkp.figure(y_axis_label='', x_axis_label='Coreset Size', plot_height=1500, plot_width=2000, toolbar_location=None)
preprocess_plot(fig4, '72pt', False, False)
figs.append([fig, fig2, fig3, fig4])
M=101
#if dnm=='webspam': M=151
for alg in algs:
#if alg[0]=='BPSVI':
# trials = [fn for fn in os.listdir(fldr_res) if fn.startswith(dnm+'_'+alg[0]+'_frate_'+str(f_rate)+'_i0_0.1_beta_0.9_graddiag_'+str(graddiag)+'_'+str(structured))]
# print('trials : ', trials)
#else:
trials = [fn for fn in os.listdir(fldr_res) if fn.startswith(dnm+'_'+alg[0]+'_frate_'+str(f_rate)+'_i0_'+str((i0))+'_beta_'+str(beta)+'_graddiag_'+str(graddiag)+'_'+str(structured))]
if len(trials) == 0:
fig.line([], [], color=alg[2], legend_label=alg[1], line_width=10); fig.patch([], [], color=alg[2], legend_label=alg[1], alpha=0.3)
fig2.line([], [], color=alg[2], legend_label=alg[1], line_width=10); fig2.patch([], [], color=alg[2], legend_label=alg[1], alpha=0.3)
fig3.line([], [], color=alg[2], legend_label=alg[1], line_width=10); fig3.patch([], [], color=alg[2], legend_label=alg[1], alpha=0.3)
fig4.line([], [], color=alg[2], legend_label=alg[1], line_width=10); fig4.patch([], [], color=alg[2], legend_label=alg[1], alpha=0.3)
continue
accs = np.zeros((len(trials), M))
plls = np.zeros((len(trials), M))
cszs = np.zeros((len(trials), M))
for tridx, fn in enumerate(trials):
f = open(os.path.join(fldr_res,fn), 'rb')
res = pk.load(f) #(w, p, accs, pll)
f.close()
wts = res[0][:M]
pts = res[1][:M]
accs[tridx] = res[2][:M]
plls[tridx] = -res[3][:M]
cszs[tridx, :] = np.array([len(w) for w in wts][:M])
csz50 = np.percentile(cszs, 50, axis=0)
csz25 = np.percentile(cszs, 25, axis=0)
csz75 = np.percentile(cszs, 75, axis=0)
acc50 = np.percentile(accs, 50, axis=0)
acc25 = np.percentile(accs, 25, axis=0)
acc75 = np.percentile(accs, 75, axis=0)
pll50 = np.percentile(plls, 50, axis=0)
pll25 = np.percentile(plls, 25, axis=0)
pll75 = np.percentile(plls, 75, axis=0)
fig.line(
|
np.arange(acc50.shape[0])
|
numpy.arange
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
x =
|
np.arange(0, 2*np.pi, 0.1)
|
numpy.arange
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.base import clone
from sklearn.linear_model import LinearRegression, LassoCV
from sklearn.model_selection import GroupKFold
import scipy
class DynamicPanelDML:
def __init__(self, model_t=LassoCV(cv=3),
model_y=LassoCV(cv=3),
n_cfit_splits=3):
model_t_copy = clone(model_t, safe=False)
model_y_copy = clone(model_y, safe=False)
self._model_t_gen = lambda: clone(model_t_copy, safe=False)
self._model_y_gen = lambda: clone(model_y_copy, safe=False)
self._n_cfit_splits = n_cfit_splits
return
def fit_nuisances(self, Y, T, X, groups, n_periods):
''' Fits all the nuisance models and calculates all residuals for each period and information set
'''
resT = {}
resY = {}
for kappa in np.arange(n_periods):
resT[kappa] = {}
resY[kappa] = np.zeros(self._n_train_units)
for tau in np.arange(kappa, n_periods):
resT[kappa][tau] = np.zeros(
(self._n_train_units,) + T.shape[1:])
for train, test in GroupKFold(self._n_cfit_splits).split(X, Y, groups):
inds_train = train[np.arange(train.shape[0]) % n_periods == 0]
inds_test = test[np.arange(test.shape[0]) % n_periods == 0]
for kappa in np.arange(n_periods):
for tau in np.arange(kappa, n_periods):
resT[kappa][tau][inds_test // n_periods] = T[inds_test + tau]\
- self._model_t_gen().fit(X[inds_train + kappa],
T[inds_train + tau]).predict(X[inds_test + kappa])
resY[kappa][inds_test // n_periods] = Y[inds_test + n_periods - 1]\
- self._model_y_gen().fit(X[inds_train + kappa],
Y[inds_train + n_periods - 1]).predict(X[inds_test + kappa])
return resT, resY
def _fit_cov_matrix(self, resT, resY, models):
''' Calculates the covariance (n_periods*n_treatments) x (n_periods*n_treatments) matrix for all the parameters
'''
n_periods = len(models)
M = np.zeros((n_periods * self._n_treatments,
n_periods * self._n_treatments))
Sigma = np.zeros((n_periods * self._n_treatments,
n_periods * self._n_treatments))
for kappa in
|
np.arange(n_periods)
|
numpy.arange
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 15:54:48 2018
@author: work
"""
import numpy as np
import keras.backend as K
from keras.utils import plot_model
from scipy.signal import convolve2d
def delta(pred):
dpred = np.zeros_like(pred)
dpred[1:-1,1:-1,0] = pred[2:,2:,0]-pred[:-2,2:,0] + \
2*(pred[2:,1:-1,0]- pred[:-2,1:-1,0]) + \
pred[2:,:-2,0]-pred[:-2,:-2,0]
dpred[1:-1,1:-1,1] = pred[2:,2:,1]-pred[2:,:-2,1] + \
2*(pred[1:-1,2:,1]-pred[1:-1,:-2,1]) +\
pred[:-2,2:,1]-pred[:-2,:-2,1]
dpred[1:-1,1:-1,2]= (pred[2:,2:,2]-pred[:-2,:-2,2] +\
pred[1:-1,2:,2]-pred[:-2,1:-1,2]+\
pred[2:,1:-1,2]-pred[1:-1,:-2,2])*
|
np.sqrt(2)
|
numpy.sqrt
|
import numpy as np
import pytest
import shfl
from shfl.private import DataNode
from shfl.differential_privacy.dp_mechanism import RandomizedResponseBinary
from shfl.differential_privacy.dp_mechanism import RandomizedResponseCoins
from shfl.differential_privacy.dp_mechanism import LaplaceMechanism
from shfl.differential_privacy.dp_mechanism import ExponentialMechanism
from shfl.differential_privacy.dp_mechanism import GaussianMechanism
from shfl.differential_privacy.composition_dp import AdaptiveDifferentialPrivacy
from shfl.differential_privacy.probability_distribution import NormalDistribution
def test_get_epsilon_delta():
e_d = (1, 1)
data_access_definition = AdaptiveDifferentialPrivacy(epsilon_delta=e_d)
assert data_access_definition.epsilon_delta == e_d
def test_randomize_binary_mechanism_coins():
data_size = 100
array = np.ones(data_size)
federated_array = shfl.private.federated_operation.federate_array(array, data_size)
federated_array.configure_data_access(RandomizedResponseCoins())
result = federated_array.query()
differences = 0
for i in range(data_size):
if result[i] != array[i]:
differences = differences + 1
assert 0 < differences < data_size
assert np.mean(result) < 1
def test_randomized_response_coins_epsilon_delta():
randomized_response_coins = RandomizedResponseCoins()
assert randomized_response_coins.epsilon_delta is not None
def test_randomized_response_binary_epsilon_delta():
randomized_response_binary = RandomizedResponseBinary(f0=0.1, f1=0.9, epsilon=1)
assert randomized_response_binary.epsilon_delta is not None
def test_laplace_epsilon_delta():
laplace_mechanism = LaplaceMechanism(sensitivity=0.1, epsilon=1)
assert laplace_mechanism.epsilon_delta is not None
def test_exponential_epsilon_delta():
def u(x, r):
output = np.zeros(len(r))
for i in range(len(r)):
output[i] = r[i] * sum(np.greater_equal(x, r[i]))
return output
r =
|
np.arange(0, 3.5, 0.001)
|
numpy.arange
|
import os
import shutil
from pathlib import Path
import ffmpeg
import matplotlib.pyplot as plt
import numpy as np
import typer
from audiotsm import phasevocoder
from audiotsm.io.wav import WavReader, WavWriter
from pytube import YouTube
from scipy.io import wavfile
TEMP_FOLER = Path(__file__).with_name("TEMP")
def main(
input_file: str = typer.Option(
"", help="The video file you want modified"
),
url: str = typer.Option("", help="A youtube url to download and process"),
output_file: str = typer.Option(
"",
help=(
"The output file. (optional. if not included, it'll just "
"modify the input file name)"
),
),
silent_threshold: float = typer.Option(
0.03,
help=(
"The volume amount that frames' audio needs to surpass to be"
" consider 'sounded'. It ranges from 0 (silence) to 1 (max volume)"
),
),
sounded_speed: float = typer.Option(
1.00,
help=(
"The speed that sounded (spoken) frames "
"should be played at. Typically 1."
),
),
silent_speed: float = typer.Option(
5.00,
help=(
"The speed that silent frames should be "
"played at. 999999 for jumpcutting."
),
),
frame_margin: float = typer.Option(
1, help=("Number of silent frames adjacent to sounded frames.")
),
sample_rate: float = typer.Option(
44100, help="Sample rate of the input and output videos"
),
frame_rate: float = typer.Option(
30, help=("Frame rate of the input and output videos.")
),
frame_quality: int = typer.Option(3, help=("1=highest, 31=lowest")),
pick_tresh: bool = typer.Option(False),
):
"""
Modifies a video file to play at different speeds when there is sound vs.
silence.
"""
input_file: Path = Path(
downloadYoutubeFile(url) if url else input_file
).resolve()
output_file = Path(
output_file
if output_file
else input_file.with_name(
input_file.stem + "_ALTERED" + input_file.suffix
)
)
audio_fade_envelope_size = 400
temp_folder, temp_audio = create_TEMP_FOLER(input_file)
frame_rate = get_frame_rate(input_file, frame_rate)
assert frame_rate > 0, "must be greater then zero"
# extract audio
print(f"Extracting audio file:{temp_audio}")
ffmpeg.input(input_file.as_posix()).output(
temp_audio.as_posix(), ab="160k", ac=2, ar=str(int(sample_rate))
).run(capture_stdout=True, capture_stderr=True)
print()
in_sample_rate, audio_data = wavfile.read(temp_audio)
sample_rate = in_sample_rate if in_sample_rate else sample_rate
sample_count = audio_data.shape[0]
max_volume = abs(audio_data).max()
samples_per_frame = sample_rate / frame_rate
frame_count = int(np.ceil(sample_count / samples_per_frame))
if pick_tresh:
silent_threshold = pick_threshold(sample_rate, audio_data)
print(f"silent_threshold = {silent_threshold}")
chunk_has_loud_audio = flag_loud_audio_chunks(
frame_count,
samples_per_frame,
sample_count,
audio_data,
max_volume,
silent_threshold,
)
speedChangeList = compute_speed_change_list(
frame_count,
frame_margin,
chunk_has_loud_audio,
silent_speed,
sounded_speed,
)
ffmpeg.input(input_file.as_posix()).output(
(temp_folder / "frame%06d.jpg").as_posix(),
**{"qscale:v": frame_quality},
).run()
outputaudio_data = np.zeros((0, audio_data.shape[1]))
outputPointer = 0
lastExistingFrame = None
premask = np.arange(audio_fade_envelope_size) / audio_fade_envelope_size
mask = np.repeat(premask[:, np.newaxis], 2, axis=1)
for start_stop_cnt, speedChange in enumerate(speedChangeList):
startFrame = speedChange[0]
stopFrame = speedChange[1]
speed = speedChange[2]
print(
f" - SpeedChanges: {start_stop_cnt} of {len(speedChangeList)}",
f" NumFrames:{stopFrame-startFrame}",
)
audioChunk = audio_data[
int(startFrame * samples_per_frame) : int(
stopFrame * samples_per_frame
)
]
alteredaudio_data, length = change_audio_speed(
audioChunk, sample_rate, speed, temp_folder
)
endPointer = outputPointer + length
outputaudio_data = np.concatenate(
(outputaudio_data, alteredaudio_data / max_volume)
)
smooth_audio_transition_between_speeds(
outputaudio_data,
length,
mask,
audio_fade_envelope_size,
outputPointer,
endPointer,
)
copy_frames_output_based_on_speed(
outputPointer,
samples_per_frame,
endPointer,
startFrame,
speed,
lastExistingFrame,
temp_folder,
)
outputPointer = endPointer
wavfile.write(
(temp_folder / "audioNew.wav").as_posix(),
sample_rate,
outputaudio_data,
)
input_video = ffmpeg.input(
(temp_folder / "newFrame%06d.jpg").as_posix(), framerate=frame_rate
)
input_audio = ffmpeg.input((temp_folder / "audioNew.wav").as_posix())
ffmpeg.concat(input_video, input_audio, v=1, a=1).output(
output_file.as_posix(), strict="-2"
).overwrite_output().run()
delete_path(temp_folder)
def downloadYoutubeFile(url):
streams = YouTube(url).streams.order_by("resolution")
audio_codec = (set([s.audio_codec for s in streams]) - {None}).pop()
streams = streams.filter(audio_codec=audio_codec)
name = streams.last().download()
newname = name.replace(" ", "_")
os.rename(name, newname)
return newname
def create_TEMP_FOLER(input_file: Path):
temp_folder = TEMP_FOLER / input_file.stem
if temp_folder.exists():
shutil.rmtree(temp_folder)
temp_folder.mkdir(exist_ok=True, parents=True)
temp_audio = temp_folder / "audio.wav"
return temp_folder, temp_audio
def get_frame_rate(input_file: Path, frame_rate: float):
vid = ffmpeg.probe(input_file.as_posix())
streams = list(
filter(lambda s: s["codec_type"] == "video", vid["streams"])
)
if streams:
stream = streams[0]
frame_rate = (int(stream["nb_frames"]) + 1) / float(stream["duration"])
return frame_rate
def zoom_factory(ax, x, y, base_scale=2.0):
def zoom_fun(event):
cur_xlim = ax.get_xlim()
xdata = event.xdata
if event.button == "up":
scale_factor = 1 / base_scale
elif event.button == "down":
scale_factor = base_scale
else:
scale_factor = 1
print(event.button)
newlim = np.array(cur_xlim)
newlim = xdata + (newlim - xdata) * scale_factor
ax.set_xlim(newlim)
line = ax.lines[0]
newx = np.linspace(*newlim, 1000)
newy = np.interp(newx, x, y)
line.set_xdata(newx)
line.set_ydata(newy)
plt.draw()
ax.plot(x, y)
ax.set_ylim(y.min(), y.max())
fig = ax.get_figure()
fig.canvas.mpl_connect("scroll_event", zoom_fun)
return zoom_fun
def pick_threshold(sample_rate, audio_data):
def onclick(event):
print(
"%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f"
% (
"double" if event.dblclick else "single",
event.button,
event.x,
event.y,
event.xdata,
event.ydata,
)
)
tresh.append(event.ydata)
plt.close()
tresh = []
skiping = sample_rate // 500
data_x = np.arange(audio_data.shape[0])[::skiping] / sample_rate
audio_data = abs(audio_data.T[0])[::skiping]
audio_data = audio_data / audio_data.max()
fig, ax = plt.subplots()
scale = 2
zoom_factory(ax, data_x, audio_data, base_scale=scale)
fig.canvas.mpl_connect("button_press_event", onclick)
plt.show()
return tresh[0]
def flag_loud_audio_chunks(
frame_count,
samples_per_frame,
sample_count,
audio_data,
max_volume,
silent_threshold,
):
chunk_has_loud_audio = np.zeros((frame_count))
for i in range(frame_count):
start = int(i * samples_per_frame)
end = min(int((i + 1) * samples_per_frame), sample_count)
audiochunks = audio_data[start:end]
maxchunksVolume = float(abs(audiochunks).max()) / max_volume
if maxchunksVolume >= silent_threshold:
chunk_has_loud_audio[i] = 1
return chunk_has_loud_audio
def compute_speed_change_list(
frame_count,
frame_spreadage,
chunk_has_loud_audio,
silent_speed,
sounded_speed,
):
# FrameNumberStart, FrameNumberStop, speed
chunks = [[0, 0, 0]]
frameSpeed = np.zeros((frame_count))
new_speeds = [silent_speed, sounded_speed]
for i in range(frame_count):
start = int(max(0, i - frame_spreadage))
end = int(min(frame_count, i + 1 + frame_spreadage))
isLoud = int(
|
np.max(chunk_has_loud_audio[start:end])
|
numpy.max
|
################################################################################
# pyopt_uq_scaneagle2.py
# This file performs a robust design optimizarion on a Boeing ScanEagle aircraft
# with 6 random variables. The random variables are Ma, TSFC, W0, E, G, mrho.
# This file can be run in 3 ways
#
# 1. Reduced optimization:
# run as `python pyopt_uq_scaneagle_6rv.py reduced`
# This runs the optimization using reduced collocation. Currently the number
# of dominant directions are set to 2. This can be changed to anywhere between
# 1 & 6 by setting the keyword argument in the function call
# `getDominantDirections(..., max_eigenmodes=5)`. This assumes that the user
# wants to set the number of dominant directions to 5
#
# 2. debug:
# run as `python pyopt_uq_scaneagle_6rv.py debug`
# This only creates a UQScanEagleOpt object. Its useful for printing out values
#
# 3. Full collocation
# run as `python pyopt_uq_scaneagle_6rv.py full_collocation`
# This performs RDO using a full 6 dimensional tensor product grid. It takes
# a long time to run this optimization (Last time I ran on an i7-3820, it took
# 13 hours)
################################################################################
import sys
import time
# pyStatReduce specific imports
import numpy as np
import chaospy as cp
import copy
from pystatreduce.new_stochastic_collocation import StochasticCollocation2
from pystatreduce.quantity_of_interest import QuantityOfInterest
from pystatreduce.dimension_reduction import DimensionReduction
from pystatreduce.stochastic_arnoldi.arnoldi_sample import ArnoldiSampling
import pystatreduce.examples as examples
#pyoptsparse sepecific imports
from scipy import sparse
import argparse
import pyoptsparse # from pyoptsparse import Optimization, OPT, SNOPT
# Import the OpenMDAO
from openmdao.api import IndepVarComp, Problem, Group, NewtonSolver, \
ScipyIterativeSolver, LinearBlockGS, NonlinearBlockGS, \
DirectSolver, LinearBlockGS, PetscKSP, SqliteRecorder, ScipyOptimizeDriver
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
# Default values
mean_Ma = 0.071
mean_TSFC = 9.80665 * 8.6e-6
mean_W0 = 10.0
mean_E = 85.e9
mean_G = 25.e9
mean_mrho = 1600
class UQScanEagleOpt(object):
"""
This class is the conduit for linking pyStatReduce and OpenAeroStruct with
pyOptSparse.
"""
def __init__(self, uq_systemsize, all_rv=False):
self.rdo_factor = 2.0
# Total number of nodes to use in the spanwise (num_y) and
# chordwise (num_x) directions. Vary these to change the level of fidelity.
num_y = 21
num_x = 3
mesh_dict = {'num_y' : num_y,
'num_x' : num_x,
'wing_type' : 'rect',
'symmetry' : True,
'span_cos_spacing' : 0.5,
'span' : 3.11,
'root_chord' : 0.3,
}
rv_dict = {'Mach_number' : mean_Ma,
'CT' : mean_TSFC,
'W0' : mean_W0,
'E' : mean_E, # surface RV
'G' : mean_G, # surface RV
'mrho' : mean_mrho, # surface RV
}
dv_dict = {'n_twist_cp' : 3,
'n_thickness_cp' : 3,
'n_CM' : 3,
'n_thickness_intersects' : 10,
'n_constraints' : 1 + 10 + 1 + 3 + 3,
'ndv' : 3 + 3 + 2,
'mesh_dict' : mesh_dict,
'rv_dict' : rv_dict
}
mu = np.array([mean_Ma, mean_TSFC, mean_W0, mean_E, mean_G, mean_mrho])
std_dev = np.diag([0.005, 0.00607/3600, 0.2, 5.e9, 1.e9, 50])
self.jdist = cp.MvNormal(mu, std_dev)
self.QoI = examples.OASScanEagleWrapper(uq_systemsize, dv_dict)
# This setup is according to the one in the scaneagle paper
self.QoI.p['oas_scaneagle.wing.thickness_cp'] = 1.e-3 * np.array([5.5, 5.5, 5.5])
self.QoI.p['oas_scaneagle.wing.twist_cp'] = 2.5*np.ones(3)
self.QoI.p.final_setup()
# Compute the dominant directions
self.dominant_space = DimensionReduction(n_arnoldi_sample=uq_systemsize+1,
exact_Hessian=False,
sample_radius=1.e-2)
self.dominant_space.getDominantDirections(self.QoI, self.jdist, max_eigenmodes=2)
dfuelburn_dict = {'dv' : {'dQoI_func' : self.QoI.eval_ObjGradient_dv,
'output_dimensions' : dv_dict['ndv'],
}
}
dcon_dict = {'dv' : {'dQoI_func' : self.QoI.eval_ConGradient_dv,
'output_dimensions' : dv_dict['ndv']
}
}
dcon_failure_dict = {'dv' : {'dQoI_func' : self.QoI.eval_ConFailureGradient_dv,
'output_dimensions' : dv_dict['ndv'],
}
}
self.QoI_dict = {'fuelburn' : {'QoI_func' : self.QoI.eval_QoI,
'output_dimensions' : 1,
'deriv_dict' : dfuelburn_dict
},
'constraints' : {'QoI_func' : self.QoI.eval_AllConstraintQoI,
'output_dimensions' : dv_dict['n_constraints'],
'deriv_dict' : dcon_dict
},
'con_failure' : {'QoI_func' : self.QoI.eval_confailureQoI,
'output_dimensions' : 1,
'deriv_dict' : dcon_failure_dict
}
}
def objfunc_uq(xdict):
"""
Objective funtion supplied to pyOptSparse for RDO.
"""
rdo_factor = UQObj.rdo_factor
UQObj.QoI.p['oas_scaneagle.wing.twist_cp'] = xdict['twist_cp']
UQObj.QoI.p['oas_scaneagle.wing.thickness_cp'] = xdict['thickness_cp']
UQObj.QoI.p['oas_scaneagle.wing.sweep'] = xdict['sweep']
UQObj.QoI.p['oas_scaneagle.alpha'] = xdict['alpha']
funcs = {}
# Compute statistical metrics
sc_obj.evaluateQoIs(UQObj.jdist)
mu_j = sc_obj.mean(of=['fuelburn', 'constraints'])
var_j = sc_obj.variance(of=['fuelburn', 'con_failure'])
# The RDO Objective function is
funcs['obj'] = mu_j['fuelburn'] + rdo_factor * np.sqrt(var_j['fuelburn'])
# The RDO Constraint function is
n_thickness_intersects = UQObj.QoI.p['oas_scaneagle.AS_point_0.wing_perf.thickness_intersects'].size
n_CM = UQObj.QoI.p['oas_scaneagle.AS_point_0.CM'].size
funcs['con_failure'] = mu_j['constraints'][0] + rdo_factor * np.sqrt(var_j['con_failure'][0,0])
funcs['con_thickness_intersects'] = mu_j['constraints'][1:n_thickness_intersects+1]
funcs['con_L_equals_W'] = mu_j['constraints'][n_thickness_intersects+1]
funcs['con_CM'] = mu_j['constraints'][n_thickness_intersects+2:n_thickness_intersects+2+n_CM]
funcs['con_twist_cp'] = mu_j['constraints'][n_thickness_intersects+2+n_CM:]
fail = False
return funcs, fail
def sens_uq(xdict, funcs):
"""
Sensitivity function provided to pyOptSparse for RDO.
"""
rdo_factor = UQObj.rdo_factor
UQObj.QoI.p['oas_scaneagle.wing.twist_cp'] = xdict['twist_cp']
UQObj.QoI.p['oas_scaneagle.wing.thickness_cp'] = xdict['thickness_cp']
UQObj.QoI.p['oas_scaneagle.wing.sweep'] = xdict['sweep']
UQObj.QoI.p['oas_scaneagle.alpha'] = xdict['alpha']
# Compute the statistical metrics
sc_obj.evaluateQoIs(UQObj.jdist, include_derivs=True)
dmu_js = sc_obj.dmean(of=['fuelburn', 'constraints'], wrt=['dv'])
dstd_dev_js = sc_obj.dStdDev(of=['fuelburn', 'con_failure'], wrt=['dv'])
# Get some of the intermediate variables
n_twist_cp = UQObj.QoI.input_dict['n_twist_cp']
n_cp = n_twist_cp + UQObj.QoI.input_dict['n_thickness_cp']
n_CM = UQObj.QoI.input_dict['n_CM']
n_thickness_intersects = UQObj.QoI.p['oas_scaneagle.AS_point_0.wing_perf.thickness_intersects'].size
# Populate the dictionary
funcsSens = {}
dmu_j = dmu_js['fuelburn']['dv']
dstd_dev_j = dstd_dev_js['fuelburn']['dv']
funcsSens['obj', 'twist_cp'] = dmu_j[0,0:n_twist_cp] + rdo_factor * dstd_dev_j[0,0:n_twist_cp]
funcsSens['obj', 'thickness_cp'] = dmu_j[0,n_twist_cp:n_cp] + rdo_factor * dstd_dev_j[0,n_twist_cp:n_cp]
funcsSens['obj', 'sweep'] = dmu_j[0,n_cp:n_cp+1] + rdo_factor * dstd_dev_j[0,n_cp:n_cp+1]
funcsSens['obj', 'alpha'] = dmu_j[0,n_cp+1:n_cp+2] + rdo_factor * dstd_dev_j[0,n_cp+1:n_cp+2]
dmu_con = dmu_js['constraints']['dv']
dstd_dev_con = dstd_dev_js['con_failure']['dv']
funcsSens['con_failure', 'twist_cp'] = dmu_con[0,0:n_twist_cp] + rdo_factor * dstd_dev_con[0,0:n_twist_cp]
funcsSens['con_failure', 'thickness_cp'] = dmu_con[0,n_twist_cp:n_cp] + rdo_factor * dstd_dev_con[0,n_twist_cp:n_cp]
funcsSens['con_failure', 'sweep'] = dmu_con[0,n_cp] + rdo_factor * dstd_dev_con[0,n_cp]
funcsSens['con_failure', 'alpha'] = dmu_con[0,n_cp+1] + rdo_factor * dstd_dev_con[0,n_cp+1]
funcsSens['con_thickness_intersects', 'twist_cp'] = dmu_con[1:n_thickness_intersects+1,0:n_twist_cp]
funcsSens['con_thickness_intersects', 'thickness_cp'] = dmu_con[1:n_thickness_intersects+1,n_twist_cp:n_cp]
funcsSens['con_thickness_intersects', 'sweep'] = dmu_con[1:n_thickness_intersects+1,n_cp:n_cp+1]
funcsSens['con_thickness_intersects', 'alpha'] = dmu_con[1:n_thickness_intersects+1,n_cp+1:]
funcsSens['con_L_equals_W', 'twist_cp'] = dmu_con[n_thickness_intersects+1,0:n_twist_cp]
funcsSens['con_L_equals_W', 'thickness_cp'] = dmu_con[n_thickness_intersects+1,n_twist_cp:n_cp]
funcsSens['con_L_equals_W', 'sweep'] = dmu_con[n_thickness_intersects+1,n_cp]
funcsSens['con_L_equals_W', 'alpha'] = dmu_con[n_thickness_intersects+1,n_cp+1]
idx = n_thickness_intersects + 2
funcsSens['con_CM', 'twist_cp'] = dmu_con[idx:idx+n_CM,0:n_twist_cp]
funcsSens['con_CM', 'thickness_cp'] = dmu_con[idx:idx+n_CM,n_twist_cp:n_cp]
funcsSens['con_CM', 'sweep'] = dmu_con[idx:idx+n_CM,n_cp:n_cp+1]
funcsSens['con_CM', 'alpha'] = dmu_con[idx:idx+n_CM,n_cp+1:]
idx = n_thickness_intersects + 2 + n_CM
funcsSens['con_twist_cp', 'twist_cp'] = dmu_con[idx:,0:n_twist_cp]
fail = False
return funcsSens, fail
if __name__ == "__main__":
# Set some of the initial values of the design variables
init_twist_cp = np.array([2.5, 2.5, 2.5])
init_thickness_cp = 1.e-3 * np.array([5.5, 5.5, 5.5]) # np.array([0.008, 0.008, 0.008])
init_sweep = 20.0
init_alpha = 5.
ndv = 3 + 3 + 1 + 1 # number of design variabels
start_time = time.time()
if sys.argv[1] == "reduced":
start_time = time.time()
uq_systemsize = 6
UQObj = UQScanEagleOpt(uq_systemsize, all_rv=True)
xi = np.zeros(uq_systemsize)
# Get some information on the total number of constraints
n_thickness_intersects = UQObj.QoI.p['oas_scaneagle.AS_point_0.wing_perf.thickness_intersects'].size
n_CM = 3
n_constraints = 1 + n_thickness_intersects + 1 + n_CM + 3
# Create the stochastic collocation object based on the dominant directions
dominant_dir = UQObj.dominant_space.dominant_dir
sc_obj = StochasticCollocation2(UQObj.jdist, 3, 'MvNormal', UQObj.QoI_dict,
include_derivs=True , reduced_collocation=True,
dominant_dir=dominant_dir)
sc_obj.evaluateQoIs(UQObj.jdist, include_derivs=True)
# Set up optimization in pyOptSparse
optProb = pyoptsparse.Optimization('UQ_OASScanEagle', objfunc_uq)
n_twist_cp = UQObj.QoI.input_dict['n_twist_cp']
n_thickness_cp = UQObj.QoI.input_dict['n_thickness_cp']
optProb.addVarGroup('twist_cp', n_twist_cp, 'c', lower=-5., upper=10, value=init_twist_cp)
optProb.addVarGroup('thickness_cp', n_thickness_cp, 'c', lower=0.001, upper=0.01, scale=1.e3, value=init_thickness_cp)
optProb.addVar('sweep', lower=10., upper=30., value=init_sweep)
# optProb.addVar('alpha', lower=-10., upper=10.)
optProb.addVar('alpha', lower=0., upper=10., value=init_alpha)
# Constraints
optProb.addConGroup('con_failure', 1, upper=0.)
optProb.addConGroup('con_thickness_intersects', n_thickness_intersects,
upper=0., wrt=['thickness_cp'])
optProb.addConGroup('con_L_equals_W', 1, lower=0., upper=0.)
optProb.addConGroup('con_CM', n_CM, lower=-0.001, upper=0.001)
optProb.addConGroup('con_twist_cp', 3, lower=
|
np.array([-1e20, -1e20, 5.])
|
numpy.array
|
"""
============================================================
GUNDAM : A Toolkit For Fast Two-Point Correlation Functions
============================================================
@author: <NAME> <<EMAIL>>
"""
# Define imports ==========================================================
from __future__ import print_function
import numpy as np
import os, sys
from astropy.table import Table, Column
from logging import getLogger, INFO, FileHandler, StreamHandler
import time
from munch import Munch
from copy import deepcopy
import cflibfor as cff
from collections import OrderedDict
import matplotlib
import matplotlib.pyplot as plt
#==============================================================================
cps = 0 # Current plot style, as defined in plotcf(). Incremented each time
# that function is called.
# =============================================================================
def set_threads(t):
"""
Set maximum number of threads to use. When ``t=-1`` defaults to the value
returned by multiprocessing.cpu_count()
.. rubric:: Parameters
t : integer
Number of threads desired
.. rubric:: Returns
t : integer
Number of threads to use
"""
from multiprocessing import cpu_count
maxt = cpu_count()
if t<=0:
t = maxt
else:
t = min(t,maxt)
return t
# =============================================================================
def comdis_worker(z, cosmo):
""" Worker function for comoving distancesm For more info check the documentation
of astropy.cosmology module
"""
return cosmo.comoving_distance(z).value
# =============================================================================
def comdis(z, par, nproc):
"""
Calculate comoving distances in parallel using multiprocessing and astropy
cosmology routines
.. rubric:: Parameters
z : array
Array of redshifts
par : Munch dictionary
Input parameters. Used to extract cosmology values
nproc : integer
Number of processors to use
.. rubric:: Returns
res : array
Array of comoving distances
"""
from astropy.cosmology import LambdaCDM
from multiprocessing import Pool
# Create cosmology object
cosmo = LambdaCDM(H0=par.h0, Om0=par.omegam, Ode0=par.omegal)
pool = Pool(processes=nproc) # create pool
zz = np.array_split(z,nproc) # split input array
#Apply comdis_worker() to each chunk of input
res = [pool.apply_async(comdis_worker, [chk,cosmo]) for chk in zz]
pool.close()
pool.join()
res = [chk.get() for chk in res]
res = np.concatenate(res)
return res
# =============================================================================
def cfindex(path='./'):
"""
List file_name + description of all **count** (.cnt) files in a given path.
Useful to quickly check a dir with dozens of correlation function runs
and therefore hundreds of files.
.. rubric:: Parameters
path : string
Path to list descriptions of .cnt files inside
"""
# Get files
cnt_files = [f for f in os.listdir(path) if f.endswith('.cnt')]
cnt_files.sort()
# Get descriptions
descr = [readcounts(path+f,silent=True).par.description for f in cnt_files]
# Print left aligned table
t = Table([cnt_files,descr],names=['File','Description'])
t.pprint(align='<',show_dtype=False,max_width=400)
# =============================================================================
def qprint(self):
"""
Prints a quick, nicely formatted version of Munch dictionaries, such as the
**counts** ouput dictionary or the **par** input dictionary used by Gundam
routines. Very useful to quickly explore what is stored inside.
This routine is added dynamically to the Munch class, so it can be accessed
as ``any_munch_obj.qprint()``
Note **counts** output dictionaries can also be explored using
:func:`gundam.cnttable` to display a more elaborated tabular view of
(exclusively) the counts calculated.
.. rubric:: Examples
.. code-block:: python
import gundam as gun
# Explore the many arrays and counts of a typical run
cred = gun.readcounts('redgalaxies.cnt')
cred.qprint()
# Check the parameters used to create the run
cred.par.qprint()
"""
def make_custom_sort(orders):
# Allows to sort a dict in a custom order. Useful to print nicely long
# dicts such as the par objects used by Gundam functions. Extracted from :
# http://stackoverflow.com/questions/12031482/custom-sorting-python-dictionary
orders = [{k: -i for (i, k) in enumerate(reversed(order), 1)} for order in orders]
def process(stuff):
if isinstance(stuff, dict):
l = [(k, process(v)) for (k, v) in list(stuff.items())]
keys = set(stuff)
for order in orders:
if keys.issuperset(order):
return OrderedDict(sorted(l, key=lambda x: order.get(x[0], 0)))
return OrderedDict(sorted(l))
if isinstance(stuff, list):
return [process(x) for x in stuff]
return stuff
return process
def eprint(v,idn):
# Print ellipsed version of 1D/2D/3D array
vs = v.squeeze()
ndim = vs.ndim
if ndim==1 : txt = eprint1d(vs)
if ndim==2 : txt = eprint2d(vs,idn)
if ndim==3 : txt = eprint3d(vs,idn)
return txt
def eprint1d(v):
# Print ellipsed version of 1D ndarray
ellip = ' ... '
txt = '[ ' + str(v[0]) + ellip + str(v[v.shape[0]//2]) + ellip + str(v[-1]) + ' ]'
return txt
def eprint2d(m,idn):
# Print ellipsed version of 2D ndarray
ellip = ' ... '
if m.shape[1]>=3:
txt1 = '[ ' + str(m[0,0]) + ellip + str(m[0,m.shape[1]//2]) + ellip + str(m[0,-1]) + ' ]'
txt2 = ' '*idn + '[' + ellip + ellip + ']'
txt3 = ' '*idn + '[ ' + str(m[-1,0]) + ellip + str(m[-1,m.shape[1]//2]) + ellip + str(m[-1,-1]) + ' ]'
else:
txt1 = '[ ' + str(m[0,:]) + ' ]'
txt2 = ' '*idn + '[' + ellip + ']'
txt3 = ' '*idn + '[ ' + str(m[-1,:]) + ' ]'
return '\n'.join([txt1,txt2,txt3])
def eprint3d(m,idn):
# Print ellipsed version of 3D ndarray
ellip = ' ... '
txt1 = '[[ ' + str(m[0,0,0]) + ellip + '///' + ellip + str(m[-1,-1,-1]) + ' ]]'
return '\n'.join([txt1])
spc = ' '
keys = self.keys()
# Find out if we have a counts or a par object to print accordingly
if 'par' in keys:
obj_counts = True
obj_pars = False
else :
obj_counts = False
obj_pars = True
# We have a counts object
if obj_counts:
lj = 12 # nr of characters for left justification of keys
if self.par.kind == 'rppiA':
msg = '(rp,pi) (auto) counts'
elif self.par.kind == 'rppiC':
msg = '(rp,pi) (cross) counts'
elif self.par.kind == 'pcf':
msg = 'Projected Correlation'
elif self.par.kind == 'pccf':
msg = 'Projected Cross-correlation'
elif self.par.kind == 'sA':
msg = '3D (auto) counts'
elif self.par.kind == 'sC':
msg = '3D (cross) counts'
elif self.par.kind == 'rcf':
msg = '3D Correlation'
elif self.par.kind == 'rccf':
msg = '3D Cross-correlation'
elif self.par.kind == 'thA':
msg = 'Angular (auto) counts'
elif self.par.kind == 'thC':
msg = 'Angular (cross) counts'
elif self.par.kind == 'acf':
msg = 'Angular Correlation'
elif self.par.kind == 'accf':
msg = 'Angular Cross-correlation'
headbar = '================= '+msg+' ================='
print(headbar)
if self.par.description :
print('Description'.ljust(lj) + '::', self.par.description)
k = 'npt'
if k in keys:
print(k.ljust(lj) + '::',self.npt)
k = 'npt1'
if k in keys:
print(k.ljust(lj) + '::',self.npt1)
k = 'npt2'
if k in keys:
print(k.ljust(lj) + '::',self.npt2)
allkeys = ['rpl','rpm','rpr','sl','sm','sr','thl','thm','thr',
'wrp','wrperr','xis','xiserr','wth','wtherr',
'dd','rr','dr','cd','cr','bdd','bcd','rppi','intpi',
'brppi','intpib']
for k in allkeys:
if k in keys: print(k.ljust(lj) + '::', eprint(self[k], lj+3))
k = 'log'
if k in keys:
print(k.ljust(lj) + '::', '"', self.log[0:self.log.find('\n')], '... "')
k = 'logfortran'
if k in keys:
print(k.ljust(lj) + '::', '"', self.logfortran[0:self.logfortran.find('\n')], '... "')
k = 'par'
if k in keys:
print(k.ljust(lj) + ':: {')
txt = self.par.toJSON(indent=lj+3, sort_keys=True)
leng = txt.find('\n',100) #just print first few keys
print(txt[2:leng], '\n'+spc*(lj+3)+'...', '\n'+spc*(lj+3)+'}')
# Print remaining keys, if any
sprintedkeys = ['npt','npt1','npt2','log','logfortran','par']
remkeys = set(keys) - set(allkeys) - set(sprintedkeys)
for k in remkeys:
v = self[k]
if type(v) is str : v = '"' + v + '"'
print(k.ljust(lj) + ':: ' + str(v))
# We have a par object
if obj_pars:
lj = 15 # nr of characters for left justification of keys
# This is the order chosen for printing so many parameters
allkeys = ['description','kind','estimator','file','file1','file2','outfn',
'h0','omegam','omegal','calcdist','autogrid','dens',
'mxh1','mxh2','mxh3',
'nsepp','dsepp','seppmin','logsepp',
'nsepv','dsepv',
'nseps','dseps','sepsmin','logseps',
'nsept','dsept','septmin','logsept',
'doboot','nbts','bseed','wfib',
'cra','cdec','cred','cwei','cdcom',
'cra1','cdec1','cred1','cwei1','cdcom1',
'cra2','cdec2','cred2','cwei2','cdcom2','custRAbound']
# Print keys above if present
for k in allkeys:
if k in keys:
v = self[k]
if type(v) is str : v = '"' + v + '"'
print(k.ljust(lj) + ':: ' + str(v))
# Print remaining keys, if any
remkeys = set(keys) - set(allkeys)
for k in remkeys:
v = self[k]
if type(v) is str : v = '"' + v + '"'
print(k.ljust(lj) + ':: ' + str(v))
## Another way of printing in custom order
#csort = ['description','kind','estimator','file','file1','outfn','h0',
# 'omegam','omegal','mxh1','mxh2','mxh3','nsepp','dsepp',
# 'seppmin','logsepp','nsepv','dsepv',
# 'doboot','nbts','bseed','wfib','cra','cdec','cred','cwei',
# 'cra1','cdec1','cred1','cwei1']
#
#cs = make_custom_sort([csort])
#pards = cs(pard)
#
#for k,v in zip(pards.keys(),pards.values()):
# if type(v) is str : v = '"' + v + '"'
# print(k.ljust(15) + ':: ' + str(v))
# =============================================================================
# Add qprint() method to Munch class. There must be better ways to do this
setattr(Munch,'qprint',qprint)
# =============================================================================
# =============================================================================
def allequal(v):
"""
Fast way to check if all elements of a 1D-array have the same value. Useful
to detect when all weights are set to 1.0, and hence to call faster
versions of the counting routines
.. rubric:: Parameters
v : array_like
Array to be checked
.. rubric:: Returns
res : bool
True if all elements have the same value
"""
res = True if ( len((v-v[0]).nonzero()[0]) == 0 ) else False
return res
# =============================================================================
def addpvcols(x, cfo, basecolname, **kwargs):
"""
Auxiliary function used by :func:`gundam.cnttable` to append columns to a
table populated with the fields of **counts** ouput dictionary that store
pair counts, all with nice column names. Works with 1d counts or 2d counts
arrays (e.g. those from a pcf run when ``nsepv>1``).
.. rubric:: Parameters
x : astropy table
Table to add data
cfo : Much dictionary
Counts dictionary with the count arrays, e.g. cfo.dd, cfo.rr, etc.
basecolname : string
The name of the field to add, e.g. `dd`, and also the prefix for the
column name, which if needed will be appended with `_001`, `_002`, etc.
for each radial bin
kwargs :
Any [key]=value pair to pass to the astropy Column constructor. Intended
to pass a format specification for the column, such as ``format='.4f'``
.. rubric:: Returns
None, it modifies the input table ``x``
"""
nsepv = cfo.par.nsepv
for i in range(nsepv):
if nsepv > 1:
colname = basecolname + '_' + format(i,'03')
else:
colname = basecolname
colvals = cfo[basecolname][:,i]
x.add_column(Column(name=colname,data=colvals,**kwargs))
# =============================================================================
def cnttable(cfi, fmt1=None, fmt2=None, write=False, browser=True):
"""
Shows a nicely formatted tabular view of the count data stored in a
**counts** output dictionary. The table is printed in `stdout` and optionally
displayed in the default web browser.
.. rubric:: Parameters
cfi : string or Munch dictionary
Filepath for the counts (.cnt) file, or the **counts** dictionary itself
fmt1 : string
Ouput format of numeric fields (bins, correlations and errors). Default='.4f'
fmt2 : string
Ouput format of numeric fields (counts). Default='.2f'
write : bool
If ``write=True``, save table to disk. Filename will be asked for. Default=False
browser : bool
If ``browser=True``, display HTML table in browser. Default=True
.. rubric:: Returns
tab : astropy table
Single table with all relevant counts as columns. Use ``print(tab)`` or
``tab.show_in_browser()``
.. rubric:: Examples
.. code-block:: python
# show info for a projected correlation from file on disk
cnttable('/proj/galred.cnt')
# show info a from variable in the session's memory
cnttable(galred)
"""
if type(cfi) == str:
cf = readcounts(cfi) # data comes from file
else:
cf = cfi # data comes from Munch object in memory
kind = cf.par.kind
x = Table() # create table
fo1 = '.4f' if fmt1 == None else fmt1 # format for bins, cfs and errror
fo2 = '.2f' if fmt2 == None else fmt2 # format for counts
if kind in ['pcf','pccf']:
x.add_column(Column(name='rpl',data=cf.rpl,format=fo1))
x.add_column(Column(name='rpm',data=cf.rpm,format=fo1))
x.add_column(Column(name='rpr',data=cf.rpr,format=fo1))
x.add_column(Column(name='wrp',data=cf.wrp,format=fo1))
x.add_column(Column(name='wrperr',data=cf.wrperr,format=fo1))
if kind=='pcf':
addpvcols(x,cf,'dd',format=fo2)
addpvcols(x,cf,'rr',format=fo2)
if 'dr' in cf:
addpvcols(x,cf,'dr',format=fo2)
if kind=='pccf':
addpvcols(x,cf,'cd',format=fo2)
addpvcols(x,cf,'cr',format=fo2)
if kind in ['rcf','rccf']:
x.add_column(Column(name='sl',data=cf.sl,format=fo1))
x.add_column(Column(name='sm',data=cf.sm,format=fo1))
x.add_column(Column(name='sr',data=cf.sr,format=fo1))
x.add_column(Column(name='xis',data=cf.xis,format=fo1))
x.add_column(Column(name='xiserr',data=cf.xiserr,format=fo1))
if kind=='rcf':
x.add_column(Column(name='dd',data=cf.dd,format=fo2))
x.add_column(Column(name='rr',data=cf.rr,format=fo2))
if 'dr' in cf:
x.add_column(Column(name='dr',data=cf.dr,format=fo2))
if kind=='rccf':
x.add_column(Column(name='cd',data=cf.cd,format=fo2))
x.add_column(Column(name='cr',data=cf.cr,format=fo2))
if kind in ['acf','accf']:
x.add_column(Column(name='thl',data=cf.thl,format=fo1))
x.add_column(Column(name='thm',data=cf.thm,format=fo1))
x.add_column(Column(name='thr',data=cf.thr,format=fo1))
x.add_column(Column(name='wth',data=cf.wth,format=fo1))
x.add_column(Column(name='wtherr',data=cf.wtherr,format=fo1))
if 'dr' in cf:
x.add_column(Column(name='dr',data=cf.dr,format=fo2))
if kind=='accf':
x.add_column(Column(name='cd',data=cf.cd,format=fo2))
x.add_column(Column(name='cr',data=cf.cr,format=fo2))
if kind in ['thA','thC']:
x.add_column(Column(name='thl',data=cf.thl,format=fo1))
x.add_column(Column(name='thm',data=cf.thm,format=fo1))
x.add_column(Column(name='thr',data=cf.thr,format=fo1))
if kind=='thA':
x.add_column(Column(name='dd',data=cf.dd,format=fo2))
if kind=='thC':
x.add_column(Column(name='dr',data=cf.dr,format=fo2))
if kind in ['sA','sC']:
x.add_column(Column(name='sl',data=cf.sl,format=fo1))
x.add_column(Column(name='sm',data=cf.sm,format=fo1))
x.add_column(Column(name='sr',data=cf.sr,format=fo1))
if kind=='sA':
x.add_column(Column(name='dd',data=cf.dd,format=fo2))
if kind=='sC':
x.add_column(Column(name='dr',data=cf.dr,format=fo2))
if kind in ['rppiA','rppiC']:
x.add_column(Column(name='rpl',data=cf.rpl,format=fo1))
x.add_column(Column(name='rpm',data=cf.rpm,format=fo1))
x.add_column(Column(name='rpr',data=cf.rpr,format=fo1))
if kind=='rppiA':
addpvcols(x,cf,'dd',format=fo2)
if kind=='rppiC':
addpvcols(x,cf,'dr',format=fo2)
if write:
fndefault = cf.params.outfn + '.table'
fn = input('Enter file ['+fndefault+']: ') or fndefault
x.write(fn, format='ascii.fixed_width',delimiter='')
if browser : x.show_in_browser() # show html table in the default web browser
return x
# =============================================================================
def makebins(nsep, sepmin, dsep, logsep):
"""
Create arrays of bins in which Gundam will count pairs and estimate
correlation functions, given the number of bins desired, the minimum bin value and the
chosen bin width.
Note units are not needed, but should be interpreted according to the input
parameters
.. rubric:: Parameters
nsep : integer
Number of bins
sepmin : float
Minimum bin location
dsep : float
Bin width (dex if ``logsep=1``)
logsep : bool
If ``True``, do log-space binning. If ``False``, do linear-space binning
.. rubric:: Returns
sep : float array
Bin locations used by Gundam (left-side + extra bin at right limit)
sepout : float array
Left, middle and right-side points of each bin. Useful to plot more easily
.. rubric:: Examples
.. code-block:: python
# Create 18 log bins of size=0.2 dex in redshift and projected space
seps,sepsout = makebins(18,0.01,0.2,1)
sepp,seppout = makebins(18,0.01,0.2,1)
# Create 25 bins of size 2 Mpc in radial space, from 0 to 50 Mpc
sepv = makebins(25,0.,2.,0)[0]
# Create instead 1 bin of size 50 Mpc, e.g. to work out the pcf integrated from 0 to 50 Mpc
sepv = makebins(1,0.,50.,0)[0]
"""
# Return the limits of bins to do correlation function counts given
# desired nr. of bins, minimum value and bin width
sep = np.empty(nsep+1,dtype=np.float64)
for i in range(0,nsep+1) :
if logsep :
sep[i] = sepmin*10**(i*dsep)
else:
sep[i] = sepmin + i*dsep
# For convenience, also return, leftpoint, midpoint and rightpoint of each bin
sepout = (sep[0:-1], (sep[0:-1] + sep[1:])/2, sep[1:])
return (sep,sepout)
# =============================================================================
def savecounts(cnt, altname=None):
"""
Save to disk the **counts** output dictionary returned by the main
counting routines.
The default file name is ``cnt.par.outfn`` + `.cnt`, which can be overriden
as ``altname`` + `.cnt`, if supplied
.. rubric:: Parameters
cnt : Munch dictionary
The **counts** object
altname : string. Default=None
If supplied, use an alternative file name instead of ``cnt.par.outfn``
"""
import pickle
savename = altname if altname is not None else cnt.par.outfn
with open(savename + '.cnt', 'wb') as f:
pickle.dump(cnt, f, pickle.HIGHEST_PROTOCOL)
msg = '> COUNTS object saved in : ' + savename + '.cnt'
try:
log = getLogger('cf')
log.info(msg)
except:
print(msg)
# =============================================================================
def readcounts(cfile, silent=False):
"""
Read from disk the **counts** dictionary generated by the main counting
routines.
.. rubric:: Parameters
cfile : string
Filepath for the counts (.cnt) file
silent : bool
If False, print a status message indicating the file was read. Default=False
.. rubric:: Returns
counts : Munch dictionary
The counts object
"""
import pickle
with open(cfile, 'rb') as f:
cnt = pickle.load(f)
if silent is False: print('Counts object read from:',cfile)
return cnt
# =============================================================================
def plotcf(x, y , yerr, fac=1., write=False, figfile=None, par=None, angunit='deg',
xlabel=None, ylabel=None, label=None, shift=0.0,
ploterrbar=True, fill=False, filtneg=False, **kwargs):
"""
Plot a correlation function from arrays of `x`, `y` and `yerr` values.
Both axes are set to log-space and axes labels are selected automatically
according to the type of correlation (i.e. given by ``par.kind``)
.. rubric:: Parameters
x,y,yerr : float arrays
x, y coordinates and corresponding errors of the correlation function.
If ``yerr=0`` or all elements of yerr are <=0 or ``ploterrbar=False``,
no errorbar is plotted
fac : float. Default=1.0
Multiplication factor for ``y`` and ``yerr``
write : bool. Default=False
Save the figure to disk (default format is pdf). See :ref:`Notes <notes-plotcf>`
to save in other graphic formats
figfile : string. Default=None
Specify an alternative file name for the figure. If specified, overrides the
default which is to take it from ``par.outfn``. Do not add extension.
par : dictionary of type Munch. Default=None
Used to pass ``outfn`` name to name saved figures when ``write=True``
angunit : string. Default='deg'
* 'arcsec' : set ouput axis in arcsec (``x`` values are unscaled)
* 'arcmin' : set ouput axis in arcmin (``x`` values are scaled as ``x``/60)
* 'deg' : set ouput axis in degrees (``x`` values are scaled as ``x``/3600)
xlabel, ylabel : string. Default=None
X-axis and Y-axis labels. If supplied, they override the default labels
deduced from ``par.kind``
label : string. Default=None
Label for the legend of the curve. If supplied, it will override the
default label which is the basename of ``par.outfn``. Note you have
to issue at least a `plt.legend()` to actually display the legend box
shift : float. Default=0.0
Fraction of bin size by which ``x`` values are shifted. Useful to slightly
separate overlapping curves
ploterrbar : bool. Default=True
If ``ploterrbar=True``, plot error bars according to ``yerr``
fill : bool. Default=False
If ``fill=True``, plot a filled semi-transparent error region instead of
the usual error bars
filtneg : bool. Default=False
If ``filtneg=True``, filter out points where (y-yerr)<0, i.e. those
with large errors in a log plot
kwargs : keyword list
Any extra [key]=value pairs are passed to the underlying
:func:`matplotlib.pyplot.plot()` routine, except for ``alpha`` which
is passed to :func:`pyplot.fill_between()`, ``capsize`` which is passed
to :func:`pyplot.errorbar()`, and ``figformat`` which is passed to
:func:`pyplot.savefig()`. Use this to customize colors, linestyles, markers, etc.
.. _notes-plotcf:
.. rubric:: Notes
* Sucessive calls cycle between 4 predefined styles (for color, marker,
linewidth, etc.) that can be overrriden by passing the corresponding
[key]=value pairs in ``kwargs``
* The output graphic format can be changed by passing the ``figformat`` key in
``kwargs``, e.g. ``figformat='pdf'``. Any format supported by matplotlib
is valid.
.. rubric:: Examples
.. code-block:: python
import gundam as gun
c1 = gun.readcounts('redgalPCF.cnt')
c2 = gun.readcounts('redgalRCF.cnt')
plt.figure(1) # Plot w(rp)
gun.plotcf(c1.rpm,c1.wrp,c1.wrperr,par=c1.par)
plt.figure(2) # Plot xi(s)
gun.plotcf(c2.sm,c2.xis,c2.xiserr,par=c2.par,color='yellow')
"""
global cps
killsciform = matplotlib.ticker.ScalarFormatter(useOffset=False)
# In some cases, CF, counts and errors should be multiplied by factor 2
y = fac*y
yerr = fac*yerr
# Filter negative y-values, if requested. Useful to uncluter both ends
# of a cf plot, where errors are usually large
if filtneg:
pos = (y-yerr)>0.
x = x[pos]
y = y[pos]
yerr = yerr[pos]
# Shift x values by fraction of bin, if desired ---------
if shift>0.0:
bsiz = np.log10(x[1])-np.log10(x[0])
dx = bsiz*shift
x = 10**(np.log10(x) + dx)
# Get graphic format
figformat = 'pdf' if 'figformat' not in kwargs else kwargs.pop('figformat')
# Define styles and choose label -------------------------
cls = ['red','blue','green','black'] if 'color' not in kwargs else [kwargs.pop('color')]*4
mks = ['o','s','^','d'] if 'marker' not in kwargs else [kwargs.pop('marker')]*len(cls)
mkss = [4,4,4,4] if 'markersize' not in kwargs else [kwargs.pop('markersize')]*len(cls)
lin = ["-","-","-","-"] if 'linestyle' not in kwargs else [kwargs.pop('linestyle')]*len(cls)
lwd = [2,2,2,2] if 'linewidth' not in kwargs else [kwargs.pop('linewidth')]*len(cls)
# Get alpha for fills
alpha = 0.2 if 'alpha' not in kwargs else kwargs.pop('alpha')
# Get capsize for errorbars
capsize = 3 if 'capsize' not in kwargs else kwargs.pop('capsize')
# Chose label for curve
if par != None:
lab = os.path.basename(par.outfn)
else:
lab = 'data'
if label != None: lab = label
# Choose x-axis and y-axis labels
if par != None:
if par.kind in ['pcf','pccf','rppiA','rppiC']:
xtit = r'$r_p \ [h^{-1} Mpc]$' if xlabel == None else xlabel
ytit = r'$w(r_p)$' if ylabel == None else ylabel
if par.kind in ['rcf','rccf','sA','sC']:
xtit = r'$s \ [h^{-1} Mpc]$' if xlabel == None else xlabel
ytit = r'$\xi(s) [h^{-1} Mpc]$' if ylabel == None else ylabel
if par.kind in ['acf','accf','thA','thC']:
if angunit == 'arcsec':
xtit = r'$\theta \ [\prime\prime]$' if xlabel == None else xlabel
if angunit == 'arcmin':
xtit = r'$\theta \ [\prime]$' if xlabel == None else xlabel
x = x/60.
if angunit == 'deg':
xtit = r'$\theta \ [^{\circ}]$' if xlabel == None else xlabel
x = x/3600.
ytit = r'$w(\theta)$' if ylabel == None else ylabel
if par.kind in ['rppiA','rppiC','thA','thC','sA','sC']:
ytit = r'$counts$' if ylabel == None else ylabel
else:
xtit = 'x' if xlabel == None else xlabel
ytit = 'y' if ylabel == None else ylabel
# Plot curve --------------------------------------------
plt.plot(x, y, color=cls[cps], marker=mks[cps], markersize=mkss[cps],
linestyle=lin[cps], linewidth=lwd[cps], label=lab, **kwargs)
# Plot error bars ---------------------------------------
if ( (np.shape(yerr) != ()) or ((y > 0.).any()) ) and (ploterrbar):
if not(fill):
plt.errorbar(x, y, yerr=yerr, fmt='none', ecolor=cls[cps],
elinewidth=lwd[cps], capsize=capsize, capthick=lwd[cps], label=None)
else:
plt.fill_between(x, y-yerr, y+yerr, where=(y-yerr)>0.,alpha=alpha,color=cls[cps])
# The where>0 condition avoids points going negative in log scale
# Others -------------------------------------------
plt.xlabel(xtit) ; plt.ylabel(ytit)
ax = plt.gca()
ax.set_xscale('log') ; ax.set_yscale('log')
ax.xaxis.set_major_formatter(killsciform) #change labels from sci to plain
ax.yaxis.set_major_formatter(killsciform)
if (write):
if (par != None):
pat = figfile if figfile is not None else par.outfn
kind = '' if figfile is not None else par.kind
else:
pat = figfile
kind = ''
plt.savefig(pat+'.'+kind+'.'+figformat, format=figformat)
if cps < len(cls)-1 :
cps = cps + 1 # increment current plot style
else:
cps = 0
# Update legend if exists. Useful for adding curves to figures made
# with comparecf()
if ax.get_legend():
ax.legend(frameon=False, fontsize='small')
# =============================================================================
def cntplot(cnt, **kwargs):
"""
Plot a correlation function from a **counts** output dictionary (either read
from disk or passed directly). Both axes are set to log-space and axes labels
are selected automatically according to the type of correlation (i.e. given
by ``par.kind``)
This is a wrapper for :func:`gundam.plotcf`, so all of its parameters can
be specified too.
.. rubric:: Parameters
cnt : string or Munch dictionary
Filepath for the counts (.cnt) file, or the **counts** dictionary itself
kwargs : keyword list
Any extra [key]=value pairs are passed to the underlying
:func:`gundam.plotcf` routine
.. rubric:: Examples
.. code-block:: python
import gundam as gun
# Read a pcf run and plot the correlation function
cnt1 = gun.readcounts('/p01/redgalsP.cnt')
cntplot(cnt1)
# Plot the correlation function from a .cnt file
cntplot('/p01/redgalsA.cnt', label='angcf of redgals', fill=True)
"""
# If cnt is a filepath, load data from file
if type(cnt) == str: cnt = readcounts(cnt)
# Find out the kind of correlation
kind = cnt.par.kind
# Choose data keys and titles to plot
if kind in ['acf','accf']:
x = cnt.thm
y = cnt.wth
yerr = cnt.wtherr
elif kind in ['pcf','pccf']:
x = cnt.rpm
y = cnt.wrp
yerr = cnt.wrperr
elif kind in ['rcf','rccf']:
x = cnt.sm
y = cnt.xis
yerr = cnt.xiserr
elif kind == 'rppiA':
x = cnt.rpm
y = cnt.intpi
yerr = 0
elif kind == 'thA':
x = cnt.thm
y = cnt.dd
yerr = 0
elif kind == 'sA':
x = cnt.sm
y = cnt.dd
yerr = 0
elif kind == 'rppiC':
x = cnt.rpm
y = cnt.intpi
yerr = 0
elif kind == 'thC':
x = cnt.thm
y = cnt.dr
yerr = 0
elif kind == 'sC':
x = cnt.sm
y = cnt.dr
yerr = 0
# Do the plot
plotcf(x, y, yerr, par=cnt.par, **kwargs)
# =============================================================================
def comparecf(clist1, clist2=None, shift=0., fac=1., ploterrbar=True, fill=False,
filtneg=False, label=None, plotratio=False, ratioxrange=None,
color1=None, marker1=None, markers1=None, linestyle1=None, linewidth1=None,
color2=None, marker2=None, markers2=None, linestyle2=None, linewidth2=None,
f=None, ax1=None, ax2=None):
"""
Plot multiple correlation functions in a single figure for easy comparison.
Optionally show an additional lower panel displaying the ratio of each
function respect to a single "control" correlation (many-to-one) or to
multiple correlations (one-to-one).
.. rubric:: Parameters
clist1 : list of Munch dictionaries / list of strings
Count dictionaries or filepaths of .cnt files of correlations
clist2 : list of Munch dictionaries / list of strings. Default=None
List of control correlations. When ``plotratio=True``, the y-values
of each correlation curve in ``clist1`` are divided by those in ``clist2``
(one-to-one) and plotted in a lower panel. If ``clist2`` has a single
element, the ratios are from all ``clist1`` divided by the single ``clist1``.
See :ref:`Notes <notes-comparecf>` for more details
shift : float. Default=0.0
Fraction of bin size by which ``x`` values are shifted. Useful to slightly
separate overlapping curves
fac : float. Default=1.0
Multiplication factor for ``y`` and ``yerr``
ploterrbar : bool. Default=True
If ``ploterrbar=True``, plot error bars according to ``yerr``
fill : bool. Default=False
If ``fill=True``, plot a filled semi-transparent error region instead of
the usual error bars
filtneg : bool. Default=False
If ``filtneg=True``, filter out points where (y-yerr)<0, i.e. those
with large errors in a log plot
label: list
Optional list of strings to label each correlation function. If ommited,
the values are taken from the ``outfn`` key stored in the count objects
plotratio : bool. Default=False
If ``plotratio=True``, plot also a lower panel with ratios of
``clist1`` respect to ``clist2``
ratioxrange : list of form [xmin,xmax]
Only plot the ratio between xmin and xmax
color1,marker1,markers1,linestyle1,linewidth1 : lists
List of colors, marker symbols, marker sizes, line styles and line widths
for curves in ``clist1``
color2,marker2,markers2,linestyle2,linewidth2 : lists
List of colors, marker symbols, marker sizes, line styles and line widths
for control curves in ``clist2``
f : figure instance
Handle of existing Figure instance
ax1,ax2 : axes instances
Handles of correlation plot and ratio plot axes
.. _notes-comparecf:
.. rubric:: Notes
The correlation curves in ``clist2`` are **not** plotted in the correlation
function panel while curves present in **both clists** are **not shown**
in the ratio panel (i.e. to avoid ratios of curves respect to themselves).
.. rubric:: Returns
(f,ax1,ax2) or (f,ax1) : tuple of handles
Handles of figure, correlation axis (ax1) and ratio axis (ax2), if present
.. rubric:: Examples
.. code-block:: python
# Compare two w(rp) correlations
comparecf(['galred.cnt', 'galblue.cnt'])
# Compare one acf on disk with another passed as a counts ouput dictionary
comparecf(['/proj/galred.cnt', qso])
# Compare multiple samples and plot sample/control ratios
f,ax1,ax2 = comparecf(['galred.cnt', 'galblue.cnt', 'galgreen.cnt'], clist2=['allgals.cnt'], fill=True, plotratio=True)
# Add another curve to previous plot
comparecf(['qso.cnt'], clist2=['control_qso.cnt'], color2=['k'], f=f, ax1=ax1, ax2=ax2, plotratio=True)
"""
from matplotlib.pyplot import legend
n1 = len(clist1)
n2 = len(clist2) if clist2 else 0
# Define styles for cfs and ratios -----------------
cls1 = color1 if color1 else ['red','blue','green','black']
mks1 = marker1 if marker1 else ['o','s','^','d']
mkss1 = markers1 if markers1 else [4,4,4,4]
lin1 = linestyle1 if linestyle1 else ["-","-","-","-"]
lwd1 = linewidth1 if linewidth1 else [2,2,2,2]
cls2 = color2 if color2 else cls1
mks2 = marker2 if marker2 else mks1
mkss2 = markers2 if markers2 else mkss1
lin2 = linestyle2 if linestyle2 else lin1
lwd2 = linewidth2 if linewidth2 else lwd1
# Set up 1 or 2 axis ----------------------------
if plotratio :
if ax1 is None :
f,(ax1,ax2) = plt.subplots(2, 1, sharex='col', gridspec_kw={'height_ratios':[1.5,1],'hspace':0.05})
kk = f.sca(ax1) # set current axis to 1st axis
# Find out the kind of correlation ---------------
kind = readcounts(clist1[0]).par.kind if type(clist1[0]) == str else clist1[0].par.kind
for i in range(n1):
# Data comes from file or Munch object in variable
data = readcounts(clist1[i]) if type(clist1[i]) == str else clist1[i]
if kind == 'pcf':
x = data.rpm
y = data.wrp
yerr = data.wrperr
bsiz = data.par.dsepp
if kind == 'xis':
x = data.mids
y = data.xis
yerr = data.xiserr
bsiz = data.par.dseps
if kind == 'wth':
x = data.midth
y = data.wth
yerr = data.wtherr
bsiz = data.par.dsep
if shift>0:
dx = bsiz*shift*i
x = 10**(np.log10(x) + dx)
if label is not None:
lab = label[i]
else:
lab = label
pars = data.par
# Plot the ith correlation function ----------------- kind=kind
plotcf(x,y,yerr,fac=fac,write=False,par=pars,
ploterrbar=ploterrbar,fill=fill,label=lab,filtneg=filtneg,
color=cls1[i],marker=mks1[i],markersize=mkss1[i],linestyle=lin1[i],linewidth=lwd1[i])
legend(frameon=False,fontsize='small')
# Plot the ratio of corr.func. Single control case -----
if plotratio and (n2==1) :
pos2in1 = [j for j, k in enumerate(clist1) if k == clist2[0]]
if pos2in1!=[i]:
kk = f.sca(ax2) # set current axis to 2nd axis
# Data comes from file or Munch object in variable
data2 = readcounts(clist2[0]) if type(clist2[0]) == str else clist2[0]
if kind == 'pcf': yc = data2.wrp
if kind == 'xis': yc = data2.xis
if kind == 'wth': yc = data2.wth
# Do ratio --------------------------------------
yy = y / yc
# Limit ratio to chosen x-axis range -----------
if ratioxrange:
good = (x>ratioxrange[0]) & (x<ratioxrange[1])
x = x[good]
yy = yy[good]
# Plot ratio -----------------------------------
plt.plot(x,yy,color=cls2[i],marker=mks2[i],markersize=mkss2[i],linestyle=lin2[i],linewidth=lwd2[i])
plt.hlines(1.,plt.gca().get_xlim()[0],plt.gca().get_xlim()[1],linestyles='dashed',linewidth=1.)
plt.xlabel(ax1.get_xlabel()) # get x-axis label as defined by plotcf()
kk = f.sca(ax1) # go back to 1st axis
# Plot the ratio of corr.func. Multiple control case -----
if plotratio and (n2>1) :
raise NameError('Multiple control case not yet simplemented')
if plotratio : ax1.set_xlabel('') # remove x-label from 1st axis
ret = (f,ax1,ax2) if plotratio else (plt.gcf(), plt.gca())
return ret
# =============================================================================
def fitpowerlaw(x, y, yerr, iguess=[1.,-1.], fitrange=None, plot=False,
markfitrange=False, **kwargs):
"""
Fit a power-law of the form :math:`ax^{\gamma}` to a correlation function over
a given x-coordinate range. Optionally plot the fitted curve
.. rubric:: Parameters
x : float array
x-coordinates such as cnt.rpm, cnt.thm, etc.
y : float array
x-coordinates such as cnt.wrp, cnt.wth, etc.
yerr : float array
Errors in y-coordinates such as cnt.wrperr, cnt.wtherr, etc.
iguess : list of floats. Default=[1., -1.]
Initial guesses for :math:`a` and :math:`\gamma`
fitrange : float array of form [xmin,xmax]. Default=None
Restrict fit to points inside the given interval
plot : bool. Default=False
Plot the fitted power-law curve
markfitrange : bool. Default=False
Overlay marks for points actually used in the fitting
kwargs : keyword list
Any extra [key]=value pairs are passed to :func:`matplolib.pyplot.plot()`
Use this to customize colors, linestyles, markers, etc.
.. rubric:: Examples
.. code-block:: python
import gundam as gun
c1 = gun.readcounts('galaxies.cnt')
cntplot(c1)
gun.fitpowerlaw(c1.rpm, c1.wrp, c1.wrperr, plot=True)
"""
import scipy.optimize
# Filter out points outside fitrange
if fitrange:
idx = np.where((x>=fitrange[0]) & (x<=fitrange[1]))
x = x[idx] ; y = y[idx] ; yerr = yerr[idx]
# Take logs
xx = np.log10(x) ; yy = np.log10(y) ; yyerr = yerr/y
# Filter out points with nan/infs/etc.
idx = np.isfinite(yy) & np.isfinite(yyerr)
xx = xx[idx] ; yy = yy[idx] ; yyerr = yyerr[idx]
# Define the (line) fitting function
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
# Carry out fitting
#pinit = [1.0, -1.0] # initial guess
out = scipy.optimize.leastsq(errfunc,iguess,args=(xx,yy,yyerr), full_output=1)
pfinal = out[0] # final parameters
covar = out[1] # covariance matrix
# Extract fit parameters and errors
gamma = pfinal[1]
a = 10.0**pfinal[0]
gammaerr = np.sqrt( covar[0][0] )
aerr = np.sqrt( covar[1][1] ) * a
print('Fitting Power Law --> y = a*x^gamma')
print('===================================')
print(' gamma =', gamma,'\u00B1',gammaerr)
print(' a =', a,'\u00B1',aerr)
print('===================================')
if plot:
cls = 'black' if 'color' not in kwargs else kwargs.pop('color')
lin = '--' if 'linestyle' not in kwargs else kwargs.pop('linestyle')
lwd = 1 if 'linewidth' not in kwargs else [kwargs.pop('linewidth')]
xdom = np.linspace(plt.xlim()[0],plt.xlim()[1],10)
ydom = a*xdom**gamma
plt.plot(xdom,ydom,color=cls,linestyle=lin,linewidth=lwd,zorder=1,**kwargs)
if markfitrange:
plt.scatter(10**xx,10**yy,s=80,facecolors='none',edgecolors='blue')
return (gamma,gammaerr,a,aerr)
# =============================================================================
def cntplot2D(cnt, estimator=None, slevel=5, write=False, figfile=None,
xlabel=None, ylabel=None, cmap='jet', **kwargs):
"""
Plot the 2D correlation function in the projected-radial space
(:math:`r_p` vs :math:`\pi` space) with optional gaussian smoothing and
contour levels
.. rubric:: Parameters
cnt : string or Munch dictionary
Filepath for the counts (.cnt) file or the **counts** output dictionary
estimator : string. Default=None
Estimator for the correlation function. Any of ('NAT','LS','HAM','DP').
If ``estimator=None``, then it is taken from ``cnt.par.estimator``
slevel : float. Default=5
Smoothing level (namely the size of the Gaussian smothing kernel)
write : bool. Default=False
Save the figure to disk (default format is pdf). See :ref:`Notes <notes-cntplot2D>`
to save in other graphic formats
figfile : string. Default=None
Specify an alternative file name for the figure. If ``None``, then choose
``cnt.par.outfn`` as default. Do not add extension.
xlabel, ylabel : string. Default=None
X-axis and Y-axis labels. If supplied, they override the default labels
(:math:`r_p \ [h^{-1} Mpc]` and :math:`\pi \ [h^{-1} Mpc]`)
cmap : string. Default='jet'
Colormap for the plot
kwargs : keyword list
Any extra [key]=value pairs are passed to :func:`matplolib.pyplot.pcolor()`
Use this to customize shading, edges, alpha, etc.
.. _notes-cntplot2D:
.. rubric:: Notes
* The graphic format can be changed by passing the ``figformat`` key in
``kwargs``, e.g. ``figformat='pdf'``. Any format supported by matplotlib
is valid.
.. rubric:: Examples
.. code-block:: python
# Check some nice Fingers of God and the Kaiser squashing
cntplot2D('lum_red_gals.cnt', cmap='viridis')
"""
def gaussKern(size):
"""
Calculate a normalised Gaussian kernel to apply as a smoothing function
Parameters
size (int) : kernel size (how many points will be used in the smoothing operation)
Returns
g (array(size,size)) : normalised 2D kernel array for use in convolutions
"""
size = int(size)
x, y = np.mgrid[-size:size+1,-size:size+1]
g = np.exp(-(x**2/float(size)+y**2/float(size)))
return g / g.sum()
def smooth(im, n=15):
"""
Smooth a 2D array im by convolving with a Gaussian kernel of size n
Parameters
im, n : 2D array, kernel size
Returns
improc : smoothed array (same dimensions as the input array)
"""
from scipy import signal
g = gaussKern(n)
improc = signal.convolve2d(im, g, mode='same', boundary='symm')
return (improc)
# If cnt is a filepath, load data from file
if type(cnt) == str: cnt = readcounts(cnt)
# Get graphic format
figformat = 'pdf' if 'figformat' not in kwargs else kwargs.pop('figformat')
# Build 2D matrix of correlation function
est = estimator if estimator is not None else cnt.par.estimator
nd, nr = cnt.npt*1.0, cnt.npt1*1.0 # for easy typing
if est=='NAT':
nf = (nr/nd) * ((nr-1)/(nd-1)) # normalizing factor
xi = nf*(1.*cnt.dd/cnt.rr) - 1
elif est=='LS':
nf1 = (nr/nd)*((nr-1)/(nd-1)) # normalizing factor 1
nf2 = (nr-1)/(2.*nd) # normalizing factor 2
xi = (nf1*cnt.dd - nf2*2.*cnt.dr + cnt.rr) / cnt.rr
elif est=='HAM':
nf = 4*(nd/(nd-1))*(nr/(nr-1)) # normalizing factor
xi = nf*cnt.dd*cnt.rr/cnt.dr**2 - 1
elif est=='DP':
nf = (2.*nr)/(nd-1) # normalizing factor
xi = nf*cnt.dd/cnt.dr - 1
# Take log, replicate over four quadrants and smooth
logxi = np.log10(xi)
qTR = logxi.copy()
qTR[~np.isfinite(qTR)] = 0.
qTR = qTR.T # top right quadrant
qTL = np.fliplr(qTR) # top left quadrant
qBL = np.flipud(qTL) # bottom left quadrant
qBR = np.fliplr(qBL) # bottom right quadrant
qT = np.hstack((qTL,qTR)) # top half
qB = np.hstack((qBL,qBR)) # bottom half
qq = np.vstack((qB,qT)) # full array
qqs = smooth(qq,n=slevel) # smoothed full array
# Extend bins
sepp = makebins(cnt.par.nsepp,cnt.par.seppmin,cnt.par.dsepp,cnt.par.logsepp)[0]
sepv = makebins(cnt.par.nsepv,0.,cnt.par.dsepv,False)[0]
exsepv = np.concatenate([-1*sepv[cnt.par.nsepv:0:-1],sepv])
exsepp = np.concatenate([-1*sepp[cnt.par.nsepp:0:-1],sepp])
# Get bin coordinates
x,y = np.meshgrid(exsepp, exsepv)
# Plot array
plt.pcolor(x,y,qqs,cmap=cmap,**kwargs)
# Plot contours
lev = np.linspace(np.amin(qqs),np.amax(qqs),15)
plt.contour(x[0:-1,0:-1], y[0:-1,0:-1], qqs, levels=lev, colors='k',
linestyles='solid', linewidths=1)
# Plot titles
xtit = r'$r_p \ [h^{-1} Mpc]$' if xlabel is None else xlabel
ytit = r'$\pi \ [h^{-1} Mpc]$' if ylabel is None else ylabel
plt.xlabel(xtit) ; plt.ylabel(ytit)
if write :
pat = figfile if figfile is not None else cnt.par.outfn
plt.savefig(pat+'.2DCF.'+figformat, format=figformat)
# =============================================================================
def writeasc_cf(lb, mb, rb, f, ferr, par, fmt='%17.5f', altname=None):
"""
Write an ASCII file for w(rp) / xi(s) / w(th) ouput counts produced by the code
.. rubric:: Parameters
lb,mb,rb : float arrays
Left, mid and rigth-side of bins
f, ferr : float arrays
Correlation function and its error
par : Munch dictionary
Used to pass ``par.outfn`` to name the output file
fmt : string. Default='%17.5f'
Numeric formatting string
altname : string. Default=None
If supplied, use an alternative file name instead of ``par.outfn``
.. rubric:: Examples
.. code-block:: python
import gundam as gun
c1 = gun.readcounts('redgals.cnt')
writeasc_cf(c1.rpl, c1.rpm, c1.rpr, c1.wrp, c1.wrperr, c1.par)
"""
savename = altname if altname is not None else par.outfn
log = getLogger('cf')
if par.kind in ['pcf','pccf']:
extension = '.wrp'
kopf = ' lb mb rb wrp errwrp'
savename = savename + extension if altname is None else altname
msg = '> w(rp) PCF saved in : ' + savename
elif par.kind in ['rcf','rccf']:
extension = '.xis'
kopf = ' lb mb rb xis xiserr'
savename = savename + extension if altname is None else altname
msg = '> xi(s) RSCF saved in : ' + savename
elif par.kind in ['acf','accf']:
extension = '.wth'
kopf = ' lb mb rb wth errwth'
savename = savename + extension if altname is None else altname
msg = '> w(th) ACF saved in : ' + savename
m = np.array([lb,mb,rb,f,ferr])
m = m.transpose()
np.savetxt(savename, m, fmt=fmt, header=kopf)
log.info(msg)
# =============================================================================
def writeasc_rppicounts(lb, mb, rb, rppi, par, fmt='%17.5f', cntid=None,
altname=None):
"""
Write an ASCII file for a rp-pi count array produced by the code. This is
a 2D array of counts in the projected (rp) and radial (pi) directions.
The columns in the output will be [`lb mb rb tot_counts rppi`] where the
first 3 are the left, mid and right-side of bins, `tot_counts` are
the counts integrated for all radial bins, and `rppi` has one column for
each radial bin
.. rubric:: Parameters
lb,mb,rb : float
Left, mid and rigth-side of bins
rppi : float array
2-dimensional count array. Usually this is one of the fields cnt.dd,
cnt.rr, etc. of a projected correlation run
par : Munch dictionary
Used to pass various data, including ``par.outfn`` to name the output file
fmt : string. Default='%17.5f'
Numeric formatting string
cntid : string. Default=None
ID string for column headers. Usually can be 'dd', 'rr', 'dr', etc.
Also appended as the extension of the ouput file (when ``altname=None``)
altname : string. Default=None
If supplied, use an alternative file name instead of ``par.outfn`` + `.cntid`
.. rubric:: Examples
.. code-block:: python
import gundam as gun
c1 = gun.readcounts('redgals.cnt')
c1.par.outfn
'/home/myuser/sdss/redgals'
# Write the DD counts in rp-pi dimensions
gun.writeasc_rppicounts(c1.rpl, c1.rpm, c1.rpr, c1.dd, c1.par, cntid='dd')
# Inspect the output file
with open('redgals.dd', 'r') as f:
print(f.read(), end="")
# lb mb rb dd dd_001 dd_002 dd_003 ...
# 0.10000 0.12417 0.14835 11509.00 2082.00 1500.00 1168.00 ...
# 0.14835 0.18421 0.22007 20273.00 3122.00 2378.00 1899.00 ...
# 0.22007 0.27327 0.32647 36169.00 4940.00 3845.00 3283.00 ...
# 0.32647 0.40539 0.48431 64866.00 8453.00 6302.00 5236.00 ...
# ...
"""
log = getLogger('cf')
nsepv, nsepp = par.nsepv, par.nsepp
filename = altname if altname is not None else par.outfn + '.' + cntid
kopf = ' lb mb rb ' + cntid
if nsepv > 1:
for i in range(1,nsepv+1):
kopf = kopf + ' ' + cntid + '_' + format(i,'03')
totc = np.sum(rppi,axis=1)
totc.shape = (nsepp,1)
m = np.transpose(np.array([lb,mb,rb]))
if nsepv > 1:
m = np.hstack((m,totc,rppi))
else:
m = np.hstack((m,rppi))
np.savetxt(filename,m,fmt=fmt,header=kopf)
msg = '> ASCII counts saved in : ' + filename
log.info(msg)
# =============================================================================
def writeasc_counts(lb, mb, rb, c, par, fmt='%17.5f', cntid=None, altname=None):
"""
Write an ASCII file for a (1-dimensional) counts array produced by the code.
The columns in the output will be [`lb mb rb c`] where the
first 3 are the left, mid and right-side of bins and `c` are the counts
.. rubric:: Parameters
lb,mb,rb : float
Left, mid and rigth-side of bins
c : float array
Counts array. Usually this is one of the fields cnt.dd, cnt.dr, cnt.rr,
etc. of a correlation run
par : Munch dictionary
Used to pass ``par.outfn`` to name the output file
fmt : string. Default='%17.5f'
Numeric formatting string
cntid : string. Default=None
ID string for column header. Usually can be 'dd', 'rr', 'dr', etc.
Also appended as the extension of the ouput file (when ``altname=None``)
altname : string. Default=None
If supplied, use an alternative file name instead of ``par.outfn`` + `.cntid`
.. rubric:: Examples
.. code-block:: python
import gundam as gun
c1 = gun.readcounts('bluegals.cnt')
# Write the DD counts in angular dimensions. Use an alternative file name
gun.writeasc_counts(c1.thl, c1.thm, c1.thr, c1.dd, c1.par, cntid='dd', altname='akounts')
# Inspect the output file
with open('akounts', 'r') as f:
print(f.read(), end="")
# lb mb rb dd
# 0.01000 0.01206 0.01413 3178.00
# 0.01413 0.01704 0.01995 6198.00
# 0.01995 0.02407 0.02818 12765.00
# 0.02818 0.03400 0.03981 24888.00
# 0.03981 0.04802 0.05623 49863.00
# 0.05623 0.06783 0.07943 98883.00
...
"""
log = getLogger('cf')
filename = altname if altname is not None else par.outfn + '.' + cntid
kopf = ' lb mb rb ' + cntid
m = np.array([lb,mb,rb,c])
m = m.transpose()
np.savetxt(filename,m,fmt=fmt,header=kopf)
msg = '> ' + cntid + ' counts saved in : ' + filename
log.info(msg)
# =============================================================================
def savepars(par, altname=None):
"""
Save the parameters dictionary **par**, such as the one
generated by :func:`gundam.packpars`, in a JSON file. By default it is
named as ``par.outfn`` + `.par`
.. rubric:: Parameters
par : Munch dictionary
Input parameters dictionary for Gundam routines
altname : string. Default=None
If supplied, use an alternative file name instead of ``par.outfn`` + `.par`
.. rubric:: Examples
.. code-block:: python
import gundam as gun
# Get default values for an angular CF run and save to disk
par = gun.packpars(kind='acf', outfn='/proj/acfrun01')
gun.savepars(par)
"""
pj = par.toJSON(indent=0)
filename = altname if altname is not None else par.outfn + '.par'
with open(filename,'w') as f: f.write(pj)
msg = '> PARAMS saved in : ' + filename
log = getLogger('cf') ; log.info(msg)
# =============================================================================
def readpars(filename):
"""
Load from a JSON (.par) file the input parameters dictionary used by many
Gundam routines.
.. rubric:: Parameters
filename : string
Filepath of .par file
"""
import munch
with open(filename,'r') as f:
s = f.read()
a = munch.json.loads(s) # convert string to dict
b = munch.munchify(a) # convert dict to bunch
print('Params read from :',filename)
return b
# =============================================================================
def tpcf(npt, nrpt, dd, bdd, rr, dr, estimator):
"""
Return the (auto)correlation function for a given estimator and arrays of
data and random counts
If DR counts are not needed (e.g. the 'NAT' estimator), just set ``dr=0``
If boostrap errors are not needed or available, just set ``bdd`` to a
zero-valued array with null 2nd dimension, e.g. ``bdd=np.zeros([len(dd),0])``
.. rubric :: Parameters
npt,nrpt : integer
Number of data and random particles
dd : float array
DD counts
bdd : float array
Bootstrap DD counts
rr : float array
RR counts in projected and radial separations
dr : float array
DR counts in projected and radial separations
estimator : string
Statistical estimator of the correlation function. Default=`NAT`
* 'NAT' : Natural -> :math:`DD/RR - 1`
* 'HAM' : Hamilton -> :math:`DD*RR/DR^{2} - 1`
* 'LS' : Landy-Szalay -> :math:`(DD - 2DR + RR) / RR`
* 'DP' : Davis-Peebles -> :math:`DD/DR - 1`
.. rubric :: Returns
xi : float array
Correlation function
xierr : float array
Boostrap error estimate. Set to zero if ``bdd`` is nulled as explained
above
.. rubric :: Notes
See `this paper <http://arxiv.org/pdf/1211.6211v2.pdf>`_ for a nice review on
estimators and their normalization factors. Here, the normalization factors
are derived to : (1) keep estimator formulae clean, (2) avoid having
operations such as (npt*(npt-1)) * dd, where counts are multiplied/divided
by very big numbers when npt is large.
.. rubric :: Examples
.. code-block:: python
# Calculate the angular CF using the Landy-Szalay estimator
acf, acferr = gun.tpcf(npt,nrpt,dd,bdd,rr,dr,estimator='LS')
"""
fn = sys._getframe().f_code.co_name # get function self name
msg = 'Computing 3d/angular correlation function.....['+fn+'()]'
log = getLogger('cf') ; log.info(msg)
npt, nrpt = float(npt), float(nrpt)
nseps,nbts = bdd.shape
# Initialize ouput arrays -------------------------------------
xi = np.zeros(nseps) # the correlation function (initizalize to -1 ?)
xierr = np.zeros(nseps) # the error
bxi = np.zeros(nbts)
# Loop over bins ----------------------------------------------
if estimator == 'HAM':
nf = 4.*(npt/(npt-1))*(nrpt/(nrpt-1)) # normalizing factor
for i in range(nseps):
if dr[i] > 0. :
xi[i] = nf*dd[i]*rr[i]/dr[i]**2 - 1.
for j in range(nbts):
bxi[j] = nf*bdd[i,j]*rr[i]/dr[i]**2 - 1.
xierr[i] = np.std(bxi) if nbts>0 else 0.
if estimator == 'NAT':
nf = (nrpt/npt)*((nrpt-1.)/(npt-1.)) # normalizing factor
for i in range(nseps):
if rr[i] > 0. :
xi[i] = nf*dd[i]/rr[i] - 1.
for j in range(nbts):
bxi[j] = nf*bdd[i,j]/rr[i] - 1.
xierr[i] = np.std(bxi) if nbts>0 else 0.
if estimator == 'LS':
nf1 = (nrpt/npt)*((nrpt-1)/(npt-1)) # normalizing factor 1
nf2 = (nrpt-1.)/(2.*npt) # normalizing factor 2
for i in range(nseps):
if rr[i] > 0. :
xi[i] = (nf1*dd[i] - nf2*2.*dr[i] + rr[i]) / rr[i]
for j in range(nbts):
bxi[j] = (nf1*bdd[i,j] - nf2*2.*dr[i] + rr[i]) / rr[i]
xierr[i] = np.std(bxi) if nbts>0 else 0.
if estimator == 'DP':
nf = (2.*nrpt)/(npt-1) # normalizing factor
for i in range(nseps):
if dr[i] > 0. :
xi[i] = nf*dd[i]/dr[i] - 1.
for j in range(nbts):
bxi[j] = nf*bdd[i,j]/dr[i] - 1.
xierr[i] = np.std(bxi) if nbts>0 else 0.
return (xi,xierr)
# =============================================================================
def tpccf(npt, nrpt, cd, bcd, cr, estimator):
"""
Return the (cross)correlation function for a given estimator and count
arrays for data (D), random (R) and cross (C) samples.
For the moment the only estimator implemented is the Davis-Peebles :
:math:`\\xi=CD/CR-1`
If bootstrap errors are not needed or available, just set ``bdd`` to a
zero-valued array, e.g. ``bdd=np.zeros([len(dd),0])``
.. rubric:: Parameters
npt,nrpt : integer
Number of particles in data (D) and random (R) samples
cd : float array
CD counts
bcd : float array
Bootstrap CD counts
cr : float array
CR counts
estimator : string
* 'DP' : Davis-Peebles -> :math:`CD/CR-1`
.. rubric:: Notes
C and D are data samples while R is random sample corresponding to D
.. rubric:: Returns
fxi : float array
Cross-correlation function
fxierr : float array
Boostrap error estimates
.. rubric:: Examples
.. code-block:: python
import gundam as gun
c = gun.readcounts('qso_gal.cnt')
(ccf,ccferr) = tpccf(c.npt, c.nrpt, c.cd, c.bcd, c.cr, estimator='DP')
"""
fn = sys._getframe().f_code.co_name # get function self name
msg = 'Computing 3d/angular cross-correlation function....['+fn+'()]'
log = getLogger('cf') ; log.info(msg)
npt, nrpt = float(npt), float(nrpt)
nseps, nbts = bcd.shape
# Initialize output arrays ------------------------------------
fxi = np.zeros(nseps)
fxierr = np.zeros(nseps)
bxi =
|
np.zeros(nbts)
|
numpy.zeros
|
#!/usr/bin/env python
import numpy as np
from src.environment import LANDMARKS
from src.agent import Agent
from src.filters import EKF
from src import utils, timer
INPUT_OMEGA = 0.4
INITIAL_POSE = (1.0, 0.0, np.pi / 2.0)
class CircularAgent(Agent):
def get_ideal(self, current, t):
"""
Parameters:
----------
current: np.array(x, y, theta)
current pose (is not used in this agent)
t: float
elapsed time
Returns:
----------
np.array(x, y, theta)
ideal pose of t
"""
ideal = np.array((0, 0, np.pi/2.0)) + np.array((
|
np.cos(INPUT_OMEGA * t)
|
numpy.cos
|
#! /usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement datasets classes for graph and family tree tasks."""
import numpy as np
import itertools
from torch.utils.data.dataset import Dataset
#from torchvision import datasets
import torch
import jacinle.random as random
import pickle
from .family import randomly_generate_family
from ...envs.graph import get_random_graph_generator
import math
from .nqueens import NQueenSolution
from IPython.core.debugger import Pdb
import copy
from jacinle.logging import get_logger, set_output_file
from torch.distributions.categorical import Categorical
TRAIN = 0
DEV = 1
TEST = 2
logger = get_logger(__file__)
__all__ = [
'GraphOutDegreeDataset', 'GraphConnectivityDataset', 'GraphAdjacentDataset',
'FamilyTreeDataset','NQueensDataset', 'FutoshikiDataset','TowerDataset','SudokuDataset'
]
class GraphDatasetBase(Dataset):
"""Base dataset class for graphs.
Args:
epoch_size: The number of batches for each epoch.
nmin: The minimal number of nodes in the graph.
pmin: The lower bound of the parameter p of the graph generator.
nmax: The maximal number of nodes in the graph,
the same as $nmin in default.
pmax: The upper bound of the parameter p of the graph generator,
the same as $pmin in default.
directed: Generator directed graph if directed=True.
gen_method: Controlling the graph generation method.
If gen_method='dnc', use the similar way as in DNC paper.
Else using Erdos-Renyi algorithm (each edge exists with prob).
"""
def __init__(self,
epoch_size,
nmin,
pmin,
nmax=None,
pmax=None,
directed=False,
gen_method='dnc'):
self._epoch_size = epoch_size
self._nmin = nmin
self._nmax = nmin if nmax is None else nmax
assert self._nmin <= self._nmax
self._pmin = pmin
self._pmax = pmin if pmax is None else pmax
assert self._pmin <= self._pmax
self._directed = directed
self._gen_method = gen_method
def _gen_graph(self, item):
n = self._nmin + item % (self._nmax - self._nmin + 1)
p = self._pmin + random.rand() * (self._pmax - self._pmin)
gen = get_random_graph_generator(self._gen_method)
return gen(n, p, directed=self._directed)
def __len__(self):
return self._epoch_size
class GraphOutDegreeDataset(GraphDatasetBase):
"""The dataset for out-degree task in graphs."""
def __init__(self,
degree,
epoch_size,
nmin,
pmin,
nmax=None,
pmax=None,
directed=False,
gen_method='dnc'):
super().__init__(epoch_size, nmin, pmin, nmax, pmax, directed, gen_method)
self._degree = degree
def __getitem__(self, item):
graph = self._gen_graph(item)
# The goal is to predict whether out-degree(x) == self._degree for all x.
return dict(
n=graph.nr_nodes,
relations=np.expand_dims(graph.get_edges(), axis=-1),
target=(graph.get_out_degree() == self._degree).astype('float'),
)
class GraphConnectivityDataset(GraphDatasetBase):
"""The dataset for connectivity task in graphs."""
def __init__(self,
dist_limit,
epoch_size,
nmin,
pmin,
nmax=None,
pmax=None,
directed=False,
gen_method='dnc'):
super().__init__(epoch_size, nmin, pmin, nmax, pmax, directed, gen_method)
self._dist_limit = dist_limit
def __getitem__(self, item):
graph = self._gen_graph(item)
# The goal is to predict whether (x, y) are connected within a limited steps
# I.e. dist(x, y) <= self._dist_limit for all x, y.
return dict(
n=graph.nr_nodes,
relations=np.expand_dims(graph.get_edges(), axis=-1),
target=graph.get_connectivity(self._dist_limit, exclude_self=True),
)
class GraphAdjacentDataset(GraphDatasetBase):
"""The dataset for adjacent task in graphs."""
def __init__(self,
nr_colors,
epoch_size,
nmin,
pmin,
nmax=None,
pmax=None,
directed=False,
gen_method='dnc',
is_train=True,
is_mnist_colors=False,
mnist_dir='../data'):
super().__init__(epoch_size, nmin, pmin, nmax, pmax, directed, gen_method)
self._nr_colors = nr_colors
self._is_mnist_colors = is_mnist_colors
# When taking MNIST digits as inputs, fetch MNIST dataset.
if self._is_mnist_colors:
assert nr_colors == 10
self.mnist = datasets.MNIST(
mnist_dir, train=is_train, download=True, transform=None)
def __getitem__(self, item):
graph = self._gen_graph(item)
n = graph.nr_nodes
if self._is_mnist_colors:
m = self.mnist.__len__()
digits = []
colors = []
for i in range(n):
x = random.randint(m)
digit, color = self.mnist.__getitem__(x)
digits.append(np.array(digit)[np.newaxis])
colors.append(color)
digits, colors = np.array(digits), np.array(colors)
else:
colors = random.randint(self._nr_colors, size=n)
states = np.zeros((n, self._nr_colors))
adjacent = np.zeros((n, self._nr_colors))
# The goal is to predict whether there is a node with desired color
# as adjacent node for each node x.
for i in range(n):
states[i, colors[i]] = 1
adjacent[i, colors[i]] = 1
for j in range(n):
if graph.has_edge(i, j):
adjacent[i, colors[j]] = 1
if self._is_mnist_colors:
states = digits
return dict(
n=n,
relations=np.expand_dims(graph.get_edges(), axis=-1),
states=states,
colors=colors,
target=adjacent,
)
class FamilyTreeDataset(Dataset):
"""The dataset for family tree tasks."""
def __init__(self,
task,
epoch_size,
nmin,
nmax=None,
p_marriage=0.8,
balance_sample=False):
super().__init__()
self._task = task
self._epoch_size = epoch_size
self._nmin = nmin
self._nmax = nmin if nmax is None else nmax
assert self._nmin <= self._nmax
self._p_marriage = p_marriage
self._balance_sample = balance_sample
self._data = []
def _gen_family(self, item):
n = self._nmin + item % (self._nmax - self._nmin + 1)
return randomly_generate_family(n, self._p_marriage)
def __getitem__(self, item):
#Pdb().set_trace()
while len(self._data) == 0:
family = self._gen_family(item)
relations = family.relations[:, :, 2:]
if self._task == 'has-father':
target = family.has_father()
elif self._task == 'has-daughter':
target = family.has_daughter()
elif self._task == 'has-sister':
target = family.has_sister()
elif self._task == 'parents':
target = family.get_parents()
elif self._task == 'grandparents':
target = family.get_grandparents()
elif self._task == 'uncle':
target = family.get_uncle()
elif self._task == 'maternal-great-uncle':
target = family.get_maternal_great_uncle()
else:
assert False, '{} is not supported.'.format(self._task)
if not self._balance_sample:
return dict(n=family.nr_people, relations=relations, target=target)
# In balance_sample case, the data format is different. Not used.
def get_positions(x):
return list(np.vstack(np.where(x)).T)
def append_data(pos, target):
states = np.zeros((family.nr_people, 2))
states[pos[0], 0] = states[pos[1], 1] = 1
self._data.append(dict(n=family.nr_people,
relations=relations,
states=states,
target=target))
positive = get_positions(target == 1)
if len(positive) == 0:
continue
negative = get_positions(target == 0)
np.random.shuffle(negative)
negative = negative[:len(positive)]
for i in positive:
append_data(i, 1)
for i in negative:
append_data(i, 0)
return self._data.pop()
def __len__(self):
return self._epoch_size
class NQueensDataset(Dataset):
"""The dataset for nqueens tasks."""
def __init__(self,
epoch_size,
n=10,
num_missing = 1,
random_seed = 42,
min_loss = False,
arbit_solution = False,
train_dev_test = TRAIN,
data_file = None,
data_sampling='rs'):
super().__init__()
self._epoch_size = epoch_size
self._n = n
self.num_missing = num_missing
self.min_loss = min_loss
self.arbit_solution = arbit_solution
self.mode = train_dev_test
self.data_sampling = data_sampling
self.nqueen_solver = NQueenSolution()
self.relations = self.nqueen_solver.get_relations(n)
print("In constructor. Size: {}".format(n))
if data_file is None:
outfile = "data/nqueens_data_"+str(self._n)+"_"+str(self.num_missing)+".pkl"
else:
outfile = data_file
#
with open(outfile,"rb") as f:
self.dataset = pickle.load(f)
self.max_count = 0
self.unique_indices = []
self.ambiguous_indices = []
for i,data in enumerate(self.dataset):
self.max_count = max(self.max_count, data["count"])
if data["count"]==1:
self.unique_indices.append(i)
else:
self.ambiguous_indices.append(i)
np.random.seed(random_seed)
self.reset_sampler(data_sampling)
def reset_sampler(self,data_sampling):
self.data_sampling = data_sampling
if data_sampling == 'rsxy':
logger.info("Sampling uniformly from (x,y) tuples")
self.sampler = Categorical(probs = torch.tensor([x['count'] for x in self.dataset]).float())
else:
self.sampler = Categorical(probs = torch.tensor([1.0 for _ in self.dataset]).float())
def pad_set(self,target_set):
pad_counter = self.max_count - len(target_set)
return_set = list(target_set)
return_set.extend([target_set[-1] for _ in range(pad_counter)])
return np.array(return_set)
def sample_imbalance(self, imbalance_ratio):
if np.random.rand()<imbalance_ratio:
ind = np.random.choice(self.ambiguous_indices)
else:
ind = np.random.choice(self.unique_indices)
return ind
def __getitem__(self, item):
#ind = np.random.randint(0,len(self.dataset))
ind = self.sampler.sample().item()
if self.mode==TRAIN:
if self.data_sampling=="unique":
ind = self.sample_imbalance(0)
elif self.data_sampling=="ambiguous":
ind = self.sample_imbalance(1)
elif self.data_sampling=="one-one":
ind = self.sample_imbalance(0.5)
elif self.data_sampling=="two-one":
ind = self.sample_imbalance(0.33)
elif self.data_sampling=="three-one":
ind = self.sample_imbalance(0.25)
elif self.data_sampling=="four-one":
ind = self.sample_imbalance(0.20)
else:
ind = item%len(self.dataset)
data = self.dataset[ind]
if len(data["query"].shape)==1:
data["query"] = np.expand_dims(data["query"],1)
if self.mode==TRAIN and self.arbit_solution:
data["target"] = data["target_set"][0]
else:
data["target"] = data["target_set"][np.random.randint(len(data["target_set"]))]
#
data["target_set"] = self.pad_set(data["target_set"])
data['mask'] = np.array([1 for _ in range(data['count'])] + [0 for _ in range(data['target_set'].shape[0] - data['count'])])
#Pdb().set_trace()
data["relations"] = self.relations
data['ind'] = ind
if isinstance(data["qid"],tuple):
data["qid"] = np.array([data["qid"][0]]+list(data["qid"][1]))
return data
def __len__(self):
if self.mode==TRAIN:
return self._epoch_size
else:
return len(self.dataset)
class FutoshikiDataset(Dataset):
"""The dataset for futoshiki tasks."""
def __init__(self,
epoch_size,
n=10,
num_missing = 1,
num_constraints = 0,
data_size = -1,
random_seed = 42,
min_loss = False,
arbit_solution = False,
train_dev_test = TRAIN,
data_file = None,
data_sampling='rs',args=None):
super().__init__()
self.args = args
self._epoch_size = epoch_size
self._n = n
self.num_missing = num_missing
self.min_loss = min_loss
self.arbit_solution = arbit_solution
self.mode = train_dev_test
self.data_sampling = data_sampling
self.relations = self.get_relations()
print("In constructor. Size: {}".format(n))
if train_dev_test == TRAIN:
mode = 'train'
elif train_dev_test == DEV:
mode = 'val'
elif train_dev_test == TEST:
mode = 'test'
if data_file is None:
outfile = "data/futo_{}_{}_{}_{}.pkl".format(self._n, num_missing, num_constraints, mode)
else:
outfile = data_file
#
logger.info("data file : {}".format(outfile))
#Pdb().set_trace()
with open(outfile,"rb") as f:
self.dataset = pickle.load(f)
if data_size != -1:
self.dataset= self.dataset[:data_size]
#
self.max_count = 0
self.unique_indices = []
self.ambiguous_indices = []
for i,data in enumerate(self.dataset):
if 'count' in data:
this_count = data['count']
else:
this_count = data['target_set'].shape[0]
data['count'] = this_count
self.max_count = max(self.max_count, this_count)
if this_count == 1:
self.unique_indices.append(i)
else:
self.ambiguous_indices.append(i)
np.random.seed(random_seed)
self.reset_sampler(data_sampling)
def reset_sampler(self,data_sampling):
self.data_sampling = data_sampling
if data_sampling == 'rsxy':
logger.info("Sampling uniformly from (x,y) tuples")
self.sampler = Categorical(probs = torch.tensor([x['count'] for x in self.dataset]).float())
else:
self.sampler = Categorical(probs = torch.tensor([1.0 for _ in self.dataset]).float())
def get_relations(self):
n = self._n
n2 = self._n**2
n3 = self._n**3
relations = np.zeros((n3, n3,3))
for x in range(n3):
row = int(x/n2)
col = int((x%n2)/n)
num = int(x%n2)%n
for y in range(n):
# cell constraints
relations[x][row*n2+col*n+y][0]=1
# row constraints
relations[x][y*n2+col*n+num][1]=1
# column constraints
relations[x][row*n2+y*n+num][2]=1
return relations
def pad_set(self,target_set):
pad_counter = self.max_count - len(target_set)
if pad_counter < 0:
return target_set[:self.max_count]
return_set = list(target_set)
return_set.extend([target_set[-1] for _ in range(pad_counter)])
return np.array(return_set)
def sample_imbalance(self, imbalance_ratio):
if np.random.rand()<imbalance_ratio:
ind = np.random.choice(self.ambiguous_indices)
else:
ind = np.random.choice(self.unique_indices)
return ind
def __getitem__(self, item):
#Pdb().set_trace()
#ind = np.random.randint(0,len(self.dataset))
ind = self.sampler.sample().item()
#print(ind)
if self.mode==TRAIN:
if self.data_sampling=="unique":
ind = self.sample_imbalance(0)
elif self.data_sampling=="ambiguous":
ind = self.sample_imbalance(1)
elif self.data_sampling=="one-one":
ind = self.sample_imbalance(0.5)
elif self.data_sampling=="two-one":
ind = self.sample_imbalance(0.33)
elif self.data_sampling=="three-one":
ind = self.sample_imbalance(0.25)
elif self.data_sampling=="four-one":
ind = self.sample_imbalance(0.20)
else:
ind = item%len(self.dataset)
data = self.dataset[ind]
if self.mode==TRAIN and self.arbit_solution:
data["target"] = data["target_set"][0]
else:
data["target"] = data["target_set"][np.random.randint(data['count'])]
data["target_set"] = self.pad_set(data["target_set"])
data['n'] = self._n
data['is_ambiguous'] = int(data['count'] > 1)
data['qid'] = np.array([ind])
data['ind'] = ind
data['mask'] = np.array([1 for _ in range(data['count'])] + [0 for _ in range(data['target_set'].shape[0] - data['count'])])
if self.args.model != 'satnet' or self.args.latent_model == 'nlm':
data["relations"] = self.relations
if self.args.model == 'satnet':
data['gtlt'] = np.concatenate((data['query'][::self._n,1], data['query'][::self._n,2]),axis=0)
return data
def __len__(self):
if self.mode==TRAIN:
return self._epoch_size
else:
return len(self.dataset)
class SudokuDataset(Dataset):
"""The dataset for sudoku tasks."""
def __init__(self,
epoch_size,
data_size = -1,
arbit_solution = False,
train_dev_test = TRAIN,
data_file = None,
data_sampling='rs',args=None):
super().__init__()
self.args = args
self._epoch_size = epoch_size
self.arbit_solution = arbit_solution
self.mode = train_dev_test
self.data_sampling = data_sampling
self._n = 81
print("In constructor. {}".format(args.task))
if train_dev_test == TRAIN:
mode = 'train'
elif train_dev_test == DEV:
mode = 'val'
elif train_dev_test == TEST:
mode = 'test'
outfile = data_file
#
logger.info("data file : {}".format(outfile))
#Pdb().set_trace()
with open(outfile,"rb") as f:
self.dataset = pickle.load(f)
if data_size != -1:
self.dataset= self.dataset[:data_size]
#
np.random.seed(args.seed)
self.max_count = args.solution_count
self.unique_indices = []
self.ambiguous_indices = []
for i,data in enumerate(self.dataset):
data['query'] = (data['query']).astype(int)
if len(data["target_set"])>self.max_count:
self.dataset[i]["target_set"] = data["target_set"][:self.max_count]
self.dataset[i]["count"]=self.max_count
if 'count' in data:
this_count = data['count']
else:
this_count = data['target_set'].shape[0]
self.dataset[i]['count'] = this_count
if this_count == 1:
self.unique_indices.append(i)
else:
self.ambiguous_indices.append(i)
self.max_count += 1
self.reset_sampler(data_sampling)
def reset_sampler(self,data_sampling):
self.data_sampling = data_sampling
if data_sampling == 'rsxy':
logger.info("Sampling uniformly from (x,y) tuples")
self.sampler = Categorical(probs = torch.tensor([x['count'] for x in self.dataset]).float())
else:
self.sampler = Categorical(probs = torch.tensor([1.0 for _ in self.dataset]).float())
def pad_set(self,target_set):
pad_counter = self.max_count - len(target_set)
if pad_counter < 0:
return target_set[:self.max_count]
return_set = list(target_set)
return_set.extend([target_set[-1] for _ in range(pad_counter)])
return np.array(return_set)
def sample_imbalance(self, imbalance_ratio):
if np.random.rand()<imbalance_ratio:
ind = np.random.choice(self.ambiguous_indices)
else:
ind = np.random.choice(self.unique_indices)
return ind
def __getitem__(self, item):
#Pdb().set_trace()
#ind = np.random.randint(0,len(self.dataset))
ind = self.sampler.sample().item()
#print(ind)
if self.mode==TRAIN:
if self.data_sampling=="unique":
ind = self.sample_imbalance(0)
elif self.data_sampling=="ambiguous":
ind = self.sample_imbalance(1)
elif self.data_sampling=="one-one":
ind = self.sample_imbalance(0.5)
elif self.data_sampling=="two-one":
ind = self.sample_imbalance(0.33)
elif self.data_sampling=="three-one":
ind = self.sample_imbalance(0.25)
elif self.data_sampling=="four-one":
ind = self.sample_imbalance(0.20)
else:
ind = item%len(self.dataset)
data = self.dataset[ind]
if self.mode==TRAIN and self.arbit_solution:
data["target"] = data["target_set"][0]
else:
data["target"] = data["target_set"][np.random.randint(data['count'])]
data["target_set"] = self.pad_set(data["target_set"])
data['n'] = self._n
data['is_ambiguous'] = int(data['count'] > 1)
data['qid'] = np.array([ind])
data['ind'] = ind
data['mask'] = np.array([1 for _ in range(data['count'])] + [0 for _ in range(data['target_set'].shape[0] - data['count'])])
return data
def __len__(self):
if self.mode==TRAIN:
return self._epoch_size
else:
return len(self.dataset)
class TowerDataset(Dataset):
"""The dataset for towers tasks."""
def __init__(self,
epoch_size,
n=3,
num_missing = 4,
random_seed = 42,
arbit_solution = False,
train_dev_test = TRAIN,
data_file = None,
data_sampling='rs'):
super().__init__()
self._epoch_size = epoch_size
self._n = n
self.num_missing = num_missing
self.arbit_solution = arbit_solution
self.mode = train_dev_test
self.data_sampling = data_sampling
self.unary_relations, self.relations = self.get_relations(n)
outfile = data_file
with open(outfile,"rb") as f:
self.dataset = pickle.load(f)
self.max_count = 0
self.unique_indices = []
self.ambiguous_indices = []
for i,data in enumerate(self.dataset):
data["query"] = self.vectorize_query(data["query"])
data["target_set"] = [np.concatenate((np.zeros(4*self._n**2),self.get_one_hot(target)))
for target in data["target_set"]]
#data["target_set"] = [self.get_one_hot(target) for target in data["target_set"]]
data["count"] = len(data["target_set"])
data["is_ambiguous"] = (data["count"]>1)
self.max_count = max(self.max_count, data["count"])
if data["count"]==1:
self.unique_indices.append(i)
else:
self.ambiguous_indices.append(i)
np.random.seed(random_seed)
def get_one_hot(self,grid):
grid = grid.flatten()
expand_grid = np.zeros((grid.size, self._n+1))
expand_grid[np.arange(grid.size),grid] = 1
expand_grid = expand_grid[:,1:]
expand_grid = expand_grid.flatten()
return expand_grid
def vectorize_query(self,query):
n3 = self._n**3
exp_query = np.concatenate((self.get_one_hot(query), np.zeros(n3)))
return np.stack([exp_query]+self.unary_relations).T
def get_relations(self,n):
n2 = n**2
n3 = n**3
vector_dim = n3+4*n2
left_tower_numbers = np.array([1]*n2+[0]*(vector_dim-n2))
up_tower_numbers = np.array([0]*n2+[1]*n2+[0]*(vector_dim-2*n2))
right_tower_numbers = np.array([0]*(2*n2)+[1]*n2+[0]*(vector_dim-3*n2))
down_tower_numbers = np.array([0]*(3*n2)+[1]*n2+[0]*(vector_dim-4*n2))
unary_relations = [left_tower_numbers, up_tower_numbers, right_tower_numbers, down_tower_numbers]
relations = np.zeros((vector_dim, vector_dim,3))
prefix = 4*n2
for x in range(n3):
row = int(x/n2)
col = int((x%n2)/n)
num = int(x%n2)%n
for y in range(n):
# cell constraints
relations[prefix+x][prefix+row*n2+col*n+y][0]=1
# row constraints
relations[prefix+x][prefix+y*n2+col*n+num][1]=1
# column constraints
relations[prefix+x][prefix+row*n2+y*n+num][2]=1
for y in range(n):
relations[prefix+x][row*n+y][1]=1
relations[row*n+y][prefix+x][1]=1
relations[prefix+x][2*n2+row*n+y][1]=1
relations[2*n2+row*n+y][prefix+x][1]=1
relations[prefix+x][n2+col*n+y][2]=1
relations[n2+col*n+y][prefix+x][2]=1
relations[prefix+x][3*n2+col*n+y][2]=1
relations[3*n2+col*n+y][prefix+x][2]=1
for x in range(n2):
row = int(x/n)
cell = int(x%n)
for y in range(n):
relations[x][row*n+y][0]=1
relations[n2+x][n2+row*n+y][0]=1
relations[2*n2+x][2*n2+row*n+y][0]=1
relations[3*n2+x][3*n2+row*n+y][0]=1
return unary_relations,relations
def pad_set(self,target_set):
pad_counter = self.max_count - len(target_set)
return_set = list(target_set)
return_set.extend([target_set[-1] for _ in range(pad_counter)])
return np.array(return_set)
def sample_imbalance(self, imbalance_ratio):
if np.random.rand()<imbalance_ratio:
ind = np.random.choice(self.ambiguous_indices)
else:
ind = np.random.choice(self.unique_indices)
return ind
def __getitem__(self, item):
ind = np.random.randint(0,len(self.dataset))
if self.mode==TRAIN:
if self.data_sampling=="unique":
ind = self.sample_imbalance(0)
elif self.data_sampling=="ambiguous":
ind = self.sample_imbalance(1)
elif self.data_sampling=="one-one":
ind = self.sample_imbalance(0.5)
elif self.data_sampling=="two-one":
ind = self.sample_imbalance(0.33)
elif self.data_sampling=="three-one":
ind = self.sample_imbalance(0.25)
elif self.data_sampling=="four-one":
ind = self.sample_imbalance(0.20)
else:
ind = item%len(self.dataset)
data = self.dataset[ind]
if self.mode==TRAIN and self.arbit_solution:
data["target"] = data["target_set"][0]
else:
data["target"] = data["target_set"][np.random.randint(len(data["target_set"]))]
#
data['n']=self._n
data['qid'] = np.array([ind])
data["target_set"] = self.pad_set(data["target_set"])
data["mask"] = np.array([1 for _ in range(data['count'])] + [0 for _ in range(data['target_set'].shape[0] - data['count'])])
data["relations"] = self.relations
return data
def __len__(self):
if self.mode==TRAIN:
return self._epoch_size
else:
return len(self.dataset)
class FutoshikiDatasetDynamic(Dataset):
"""The dataset for Futoshiki tasks."""
def __init__(self,
epoch_size,
n=5,
num_missing = 1,
num_constraints = 0,
random_seed = 42,
min_loss = False,
train_dev_test = TRAIN,
data_file = None,
data_sampling='rs'):
super().__init__()
self._epoch_size = epoch_size
self._n = n
self.num_missing = num_missing
self.num_constraints = num_constraints
self.min_loss = min_loss
self.mode = train_dev_test
self.data_sampling = data_sampling
self.relations = self.get_relation()
self.max_count = 2*num_missing
if data_sampling=="unique":
self.max_count=1
outfile = data_file
with open(outfile,"rb") as f:
self.dataset = pickle.load(f)
if train_dev_test!=TRAIN:
self._epoch_size = len(self.dataset)
np.random.seed(random_seed)
def check_validity(self,grid, constraints=None):
for x in range(len(grid)):
row = set(grid[x])
if len(row)!=len(grid):
return False
col = set(grid[:,x])
if len(col)!=len(grid):
return False
if constraints is None:
return True
gt = zip(*np.nonzero(constraints[0]))
for ind in gt:
next_ind = (ind[0],ind[1]+1)
if grid[next_ind]>grid[ind]:
return False
lt = zip(*np.nonzero(constraints[1]))
for ind in lt:
next_ind = (ind[0],ind[1]+1)
if grid[next_ind]<grid[ind]:
return False
return True
def get_relation(self):
n = self._n
n2 = self._n**2
n3 = self._n**3
relations = np.zeros((n3, n3,3))
for x in range(n3):
row = int(x/n2)
col = int((x%n2)/n)
num = int(x%n2)%n
for y in range(n):
# cell constraints
relations[x][row*n2+col*n+y][0]=1
# row constraints
relations[x][y*n2+col*n+num][1]=1
# column constraints
relations[x][row*n2+y*n+num][2]=1
return relations
def get_one_hot(self,grid):
grid = grid.flatten()
expand_grid = np.zeros((grid.size, self._n+1))
expand_grid[
|
np.arange(grid.size)
|
numpy.arange
|
# importing lib
import numpy as np
from skimage import data
import matplotlib.pyplot as plt
from skimage.filters.rank import entropy
from skimage.color import rgb2hsv, rgb2gray, rgb2yuv
import cv2
from skimage.feature import greycomatrix
from matplotlib.colors import NoNorm
from skimage import io
from skimage.color import rgb2gray
def show_image(image,title ="image", cmap_type = 'gray'):
plt.imshow(image , cmap = cmap_type)
plt.title(title)
plt.axis('off')
plt.show()
def image_convert_to_grayscale(image):
return rgb2gray(image)
def image_size(image):
return (image.shape, image.size)
def image_dim(image):
return image_dim
def signaltonoise(a, axis=0, ddof=0):
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def image_dept(image):
return image.shape
# reading image
image_path = "Images/ramdom1.jpg"
image = cv2.imread(image_path)
def Question_01(image):
image = image_convert_to_grayscale(image)
show_image(image) # display image
print(image.shape)
print("image Dim and size : {}\nImage Depth : {}".format(image_size(image),
image_dept(image)))
print(signaltonoise(image))
# Question_01(image)
def Question_02(image):
# converting into grayscale
image = image_convert_to_grayscale(image)
print(image)
# glcm = np.squeeze(greycomatrix(image, distances=[1],
# angles=[0], symmetric=True,
# normed=True))
# print(glcm)
# Question_02(image)
from skimage import color
import numpy as np
import math
class Assignmet01:
def __init__(self,image,title):
self.image = image
self.title = title
def show_image(self,image,title ="image" ,grayscale = None):
if grayscale == True:
plt.imshow(image , cmap=plt.cm.gray )
plt.title(title)
plt.show()
elif grayscale == False:
plt.imshow(image )
plt.title(title)
plt.show()
def image_depth(self,image):
one_dim = image.flatten()
max_shade = max(one_dim)
print(max_shade)
if max_shade <=1:
return 0
elif max_shade <=2 :
return 1
elif max_shade <=7 :
return 3
elif max_shade <=15 :
return 4
elif max_shade <=31 :
return 5
elif max_shade <=63 :
return 6
elif max_shade <=127 :
return 7
elif max_shade <=255 :
return 8
# 2-0 --> 1
# 2-1 --> 2
# 2-2 --> 4
# 2-3 --> 8
# 2-4 --> 16
# 2-5 --> 32
# 2-6 --> 64
# 2-7 --> 128
# 2-8 --> 256
def image_channel(self,image):
self.show_image(image , title="Original image" ,grayscale=False)
self.hist(image , title="Original image")
red = image[:,:,0]
self.show_image(red , title="Red image" ,grayscale = True)
self.hist(red , title="red")
plt.show()
green = image[:,:,1]
self.show_image(green,title = "Green image" ,grayscale=False)
self.hist(green , title="green")
blue = image[:,:,2]
self.show_image(blue,title="Blue image" ,grayscale=False)
self.hist(blue,title="Blue")
def hist(self,image ,title = "title"):
plt.hist(image.ravel(),bins=256)
plt.title(title)
plt.show()
def brightness(self,image):
Brightness = image.mean()
# print("Brightness : ",Brightness)
return Brightness
def signaltonoise(self,a, axis=0, ddof=0):
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def conversion_to_grayscale(self,image):
grayscale = rgb2gray(image)
# print("representation of GrayScale : ", grayscale)
# print(grayscale)
rgb = color.gray2rgb(image)
# print("representation of RGB : ",rgb)
return grayscale
def Question_01(self,image):
# depth is missing
image = self.conversion_to_grayscale(image)
# display image
self.show_image(image)
# display Dimension
print("Image Dimension :{}\nImage size:{}\nSignal to Noise Ratio :{}".format(image.shape,image.size,
self.signaltonoise(image)))
def Question_02(self,image):
# 1D representation of image
oneD_image = image.flatten()
total_len = len(oneD_image)
(unique, counts) = np.unique(oneD_image, return_counts=True)
# print(unique, counts)
unique_element_len = len(unique)
entropy = 0
for index in range(0,(unique_element_len)-1):
# print(index)
entropy += -(counts[index]/total_len)* (math.log2((counts[index]/total_len)))
print(entropy)
return entropy
def Question_03(self,image):
# Flip the image in up direction
vertically_flipped = np.flipud(image)
self.show_image(vertically_flipped)
def Question_04(self,image):
pass
def log_transformation(self,value):
value =math.log(1+value)
return value
def Question_09(self,image):
""" log transformation formula is s = loge(1+r) r is the pixel value
"""
#grayscale
# image = self.conversion_to_grayscale(image)
print(image.shape)
log_image = np.uint8(np.log1p(image))
threshold = 20
img_3 = cv2.threshold(log_image , threshold ,255, cv2.THRESH_BINARY)[1]
cv2.imshow("input image" ,image)
cv2.imshow("transform image ",img_3)
cv2.waitKey(100000)
cv2.destroyAllWindows()
def Question_05(self,image):
neg_image = 255- image
cv2.imshow("Neg image ",neg_image)
cv2.imshow("orginal Image ",image)
cv2.waitKey(10000)
cv2.destroyAllWindows()
def Question_10(self,image,gamma=2):
for gamma in range(0,255):
trans_img = np.power(image,gamma)
# cv2.imshow("Input image : ",image)
# cv2.imshow("Transform Image :",trans_img)
# cv2.waitKey(10000)
# cv2.destroyAllWindows()
self.show_image(trans_img)
def Question_07(self,image):
beta = self.brightness(image) # average value
print("beta : ",beta)
# to redure the compilation of over loop or inner loop to became computauional cost
# one_D_image = image.flatten()
# print("lenght of 1 d image : ",len(one_D_image))
# print("one D image : ",one_D_image)
# M_X_N = len(image)
# count = 0
# for each_pixel_value in one_D_image:
# count += ((each_pixel_value - beta)**2 )
# contrast = math.sqrt(count/M_X_N)
# print("Contrast value ",contrast)
contrast = image.std()
print(contrast)
return contrast
def Question_11(self,image):
""" l(m,n) = (orignalPixel-image_min) * ((lmax-lmin)/image_max-image_min))+ lmin"""
min_pixel_value = image.min()
max_pixel_value = image.max()
print(min_pixel_value , max_pixel_value)
ratio = ((max_pixel_value-min_pixel_value) / (200 - 100)) + min_pixel_value
plt.figure(figsize=(12,4))
plt.subplot(1,3,1)
plt.imshow(image , cmap="gray" , norm=NoNorm())
plt.subplot(1,3,2)
trans_image = (image-
|
np.min(image)
|
numpy.min
|
# import copy
# import os
import pickle
import time
from multiprocessing import Pool
import corner
# import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as interp
from scipy.optimize import leastsq
from tqdm import tqdm
'''------------------------------------------------------------------------
cp_tools.py - a collection of functions useful for closure phase analysis
in Python. This includes mas2rad, rad2mas and phase_binary from pysco;
it depends on PyMultiNest, MultiNest and emcee
------------------------------------------------------------------------'''
def mas2rad(x):
''' Convenient little function to convert milliarcsec to radians '''
return x*np.pi/(180*3600*1000)
# =========================================================================
# =========================================================================
def rad2mas(x):
''' Convenient little function to convert radians to milliarcseconds '''
return x/np.pi*(180*3600*1000)
# =========================================================================
# =========================================================================
def phase_binary(u, v, wavel, p, return_cvis=False):
''' Calculate the phases observed by an array on a binary star
----------------------------------------------------------------
p: 3-component vector (+2 optional), the binary "parameters":
- p[0] = sep (mas)
- p[1] = PA (deg) E of N.
- p[2] = contrast ratio (primary/secondary)
optional:
- p[2:] = contrast ratio for several wavelengths that we want
to calculate the cps over
- u,v: baseline coordinates (meters)
- wavel: wavelength (meters)
---------------------------------------------------------------- '''
p = np.array(p)
# relative locations
th = (p[1] + 90.0) * np.pi / 180.0
ddec = mas2rad(p[0] * np.sin(th))
dra = -mas2rad(p[0] * np.cos(th))
# decompose into two "luminosities"
# but first, a little trick so this works whether
# p is a single value or a list of contrasts
spec = p[2:]
if len(spec) == 1:
spec = spec[0]
l2 = 1. / (spec + 1)
l1 = 1 - l2
# phase-factor
output_shape = list(u.shape)
output_shape[-1] = np.size(wavel)
phi = np.zeros(output_shape, dtype=complex)
phi.real = np.cos(-2*np.pi*(u*dra + v*ddec)/wavel)
phi.imag = np.sin(-2*np.pi*(u*dra + v*ddec)/wavel)
cvis = l1 + l2 * phi
phase = np.angle(cvis, deg=True)
if return_cvis:
return cvis
else:
return np.mod(phase + 10980., 360.) - 180.0
# =========================================================================
# =========================================================================
def cp_loglikelihood(params, u, v, wavel, t3data, t3err, model='constant'):
'''Calculate loglikelihood for closure phase data.
Used both in the MultiNest and MCMC Hammer implementations.'''
# hacky way to introduce priors
if (params[2] > 5000) or (params[2] < 0.):
return -np.inf
# if (params[0] > 250.) or (params[0] < 0.):
# return -np.inf
if (params[1] > 380.) or (params[1] < -5.):
return -np.inf
cps = cp_model(params, u, v, wavel, model=model)
chi2 = np.sum(((t3data-cps)/t3err)**2)
loglike = -chi2/2
return loglike
# =========================================================================
# =========================================================================
def cp_loglikelihood_cov(params, u, v, wavel, t3data, cov_inv, model='constant'):
'''Calculate loglikelihood for closure phase data. Uses the inverse
covariance matrix rather than the uncertainties
Used both in the MultiNest and MCMC Hammer implementations.'''
# hacky way to introduce priors
if (params[2] > 5000) or (params[2] < 0.):
return -np.inf
if (params[0] > 250.) or (params[0] < 0.):
return -np.inf
if (params[1] > 380.) or (params[1] < -5):
return -np.inf
cps = cp_model(params, u, v, wavel, model=model)
resids = t3data-cps
# Loop through wavelengths and calculate the chi2 for each one and add them
chi2 = 0
for wav in range(wavel.size):
# We want obs.T * Cov**-1 * obs
# But since obs is not a vector, we would need to loop over the second dimension.
# Instead, here's a trick to do this in a faster way
temp = resids[:, :, wav].transpose().dot(cov_inv[:, :, wav])
chi2 += np.sum(temp.transpose()*resids[:, :, wav])
loglike = -chi2/2
return loglike
# =========================================================================
# =========================================================================
def cp_loglikelihood_proj(params, u, v, wavel, proj_t3data, proj_t3err, proj, model='constant'):
'''Calculate loglikelihood for projected closure phase data.
Used both in the MultiNest and MCMC Hammer implementations.
Here proj is the eigenvector array'''
# hacky way to introduce priors
# if (params[2] > 50000) or (params[2] < 0.):
# return -np.inf
if (params[0] > 350.) or (params[0] < 0.):
return -np.inf
if (params[1] > 380.) or (params[1] < -5.):
return -np.inf
cps = cp_model(params, u, v, wavel, model=model)
proj_mod_cps = project_cps(cps, proj)
chi2 = np.sum(((proj_t3data-proj_mod_cps)/proj_t3err)**2)
loglike = -chi2/2
return loglike
# =========================================================================
# =========================================================================
def cp_loglikelihood_multiple(params, u, v, wavel, t3data, t3err, model='constant', ncomp=1):
'''Calculate loglikelihood for closure phase data and multiple companions.
Used both in the MultiNest and MCMC Hammer implementations.'''
cps = cp_model(params[0:3], u, v, wavel, model=model)
for ix in range(1, ncomp):
# 3 since there are 3 parameters per companion
cps += cp_model(params[ix*3:(ix+1)*3], u, v, wavel, model=model)
chi2 = np.sum(((t3data.ravel()-cps.ravel())/t3err.ravel())**2)
loglike = -chi2/2
return loglike
# =========================================================================
# =========================================================================
def cp_model(params, u, v, wavels, model='constant'):
'''Function to model closure phases. Takes a parameter list, u,v triangles and range of wavelengths.
Allows fitting of a model to contrast vs wavelength.
Models for contrast ratio:
constant (contrast is constant with wavelength, default)
linear (params[2,3]=contrast ratios at end wavelengths),
free (params[2:]=contrast ratios).
ndof (the wavelength channels are evenly spaced cubic interpolations in params[2:])
polynomial (of the form Sum[n] params[n+2]*(wavelength*1e6)**n )
NOTE: This doesn't allow for nonzero size of each component!'''
nwav = wavels.size
model_params = np.zeros(nwav+2)
model_params[0:2] = params[0:2]
if model == 'constant':
cons = np.repeat(params[2], nwav)
elif model == 'linear':
cons = params[2] + (params[3]-params[2]) * \
(wavels-wavels[0])/(wavels[-1]-wavels[0])
elif model == 'ndof':
ndof = params[2:].size
wavs = np.linspace(np.min(wavels), np.max((wavels)), ndof)
f = interp.interp1d(wavs, params[2:], kind='linear')
cons = f(wavels)
elif model == 'free':
# no model, crat vs wav is free to vary.
cons = params[2:]
elif model == 'polynomial':
coefficients = params[2:]
ndof = len(coefficients)
cons = np.repeat(0., nwav)
xax = (wavels-np.min(wavels))/(np.max(wavels)-np.min(wavels))
for order in range(ndof):
cons += coefficients[order]*xax**order
else:
raise NameError('Unknown model input to cp_model')
model_params[2:] = cons
# vectorize the arrays to speed up multi-wavelength calculations
u = u[..., np.newaxis] # (ncp x n_runs x 3 x 1) or (ncp x 3 x 1)
v = v[..., np.newaxis] # (ncp x n_runs x 3 x 1) or (ncp x 3 x 1)
# (1 x 1 x 1 x nwav) or (1x1xnwav)
wavels = wavels[np.newaxis, np.newaxis, :]
if u.ndim == 4:
wavels = wavels[np.newaxis]
phases = phase_binary(u, v, wavels, model_params)
cps = np.sum(phases, axis=-2)
return cps
# =========================================================================
# =========================================================================
def project_cps(cps, proj):
''' Short wrapper program to do the projection of a set of closure phases
onto another basis. proj is the projection matrix, usually the eigenvectors
for projecting onto a statistically independent basis set.'''
proj_cps = np.zeros((proj.shape[2], cps.shape[1], cps.shape[2]))
for wav in range(cps.shape[2]):
proj_cps[:, :, wav] = np.dot(proj[wav].transpose(), cps[:, :, wav])
return proj_cps
# =========================================================================
# =========================================================================
def hammer(cpo, ivar=[52., 192., 1.53], ndim='Default', nwalcps=50, plot=False,
projected=False, niters=1000, threads=1, model='constant', sep_prior=None,
pa_prior=None, crat_prior=None, err_scale=1., extra_error=0.,
use_cov=False, burn_in=0, verbose=False):
import emcee
'''Default implementation of emcee, the MCMC Hammer, for closure phase
fitting. Requires a closure phase object cpo, and is best called with
ivar chosen to be near the peak - it can fail to converge otherwise.
Also allows fitting of a contrast vs wavlength model. See cp_model for details!
Prior ranges introduce a flat (tophat) prior between the two values specified
burn_in = the number of iterations to discard due to burn-in'''
if ndim == 'Default':
ndim = len(ivar)
ivar = np.array(ivar) # initial parameters for model-fit
# Starting parameters for the walkers
p0 = []
scatter = np.zeros(ndim) + 0.01
scatter[0] = 0.05
scatter[1] = 0.05
for walker_ix in range(nwalcps):
p0.append(ivar+ivar*scatter*np.random.rand(ndim))
# p0 = [ivar + 0.1*ivar*np.random.rand(ndim) for i in range(nwalcps)] # initialise walcps in a ball
# p0 = [ivar + 0.75*ivar*np.random.rand(ndim) for i in range(nwalcps)] # initialise walcps in a ball
# print('\n -- Running emcee --')
t3err = np.sqrt(cpo.t3err**2+extra_error**2)
t3err *= err_scale
t0 = time.time()
if projected is False:
sampler = emcee.EnsembleSampler(nwalcps, ndim, cp_loglikelihood,
args=[cpo.u, cpo.v, cpo.wavel, cpo.t3data, t3err, model], threads=threads)
elif use_cov:
sampler = emcee.EnsembleSampler(nwalcps, ndim, cp_loglikelihood_cov,
args=[cpo.u, cpo.v, cpo.wavel, cpo.t3data, cpo.cov_inv, model], threads=threads)
else:
proj_t3err = np.sqrt(cpo.proj_t3err**2 + extra_error**2)
proj_t3err *= err_scale
sampler = emcee.EnsembleSampler(nwalcps, ndim, cp_loglikelihood_proj,
args=[cpo.u, cpo.v, cpo.wavel, cpo.proj_t3data, proj_t3err, cpo.proj, model], threads=threads)
sampler.run_mcmc(p0, niters, progress=True)
tf = time.time()
if verbose:
print('Time elapsed =', tf-t0, 's')
chain = sampler.flatchain
# Remove the burn in
chain = chain[burn_in:]
seps = chain[:, 0]
ths = chain[:, 1]
cs = chain[:, 2:][:, 0]
# Now introduce the prior, by ignoring values outside of the range
if sep_prior is not None:
wh = (seps >= sep_prior[0]) & (seps <= sep_prior[1])
seps = seps[wh]
ths = ths[wh]
cs = cs[wh]
if pa_prior is not None:
wh = (ths >= pa_prior[0]) & (ths <= pa_prior[1])
seps = seps[wh]
ths = ths[wh]
cs = cs[wh]
if crat_prior is not None:
wh = (cs >= crat_prior[0]) & (cs <= crat_prior[1])
seps = seps[wh]
ths = ths[wh]
cs = cs[wh]
# if crat_prior is not None:
# # for ix in range(ndim-2):
# # c = cs[:, ix]
# wh = (cs[:, 0] >= crat_prior[0]) & (cs[:, 0] <= crat_prior[1])
# seps = seps[wh]
# ths = ths[wh]
# cs = cs[wh, :]
# check that there are still some points left!
if seps.size > 0:
ngood = len(seps)
chain = np.zeros((ngood, ndim))
chain[:, 0] = seps
chain[:, 1] = ths
chain[:, 2] = cs
else:
print('WARNING: Your priors eliminated all points!')
meansep = np.mean(seps)
dsep = np.std(seps)
meanth = np.mean(ths)
dth = np.std(ths)
meanc = np.mean(cs, axis=0)
dc = np.std(cs, axis=0)
if verbose:
print('Separation', meansep, 'pm', dsep, 'mas')
print('Position angle', meanth, 'pm', dth, 'deg')
print('Contrast', meanc[0], 'pm', dc[0])
if model == 'linear':
print('Contrast2', meanc[1], 'pm', dc[1])
extra_pars = ['Contrast ']
extra_dims = ['Ratio']
elif model == 'free':
extra_pars = np.repeat('Contrast', ndim-2)
extra_dims = np.repeat('Ratio', ndim-2)
else:
extra_pars = 'None'
extra_dims = 'None'
paramdims = ['(mas)', '(deg)', 'Ratio']
for ix, par in enumerate(extra_pars):
# paramnames.append(par)
paramdims.append(extra_dims[ix])
res_p, err_p, err_m = [], [], []
for i in range(ndim):
mcmc = np.percentile(chain[:, i], [16, 50, 84])
q = np.diff(mcmc)
res_p.append(mcmc[1])
err_m.append(q[0])
err_p.append(q[1])
if plot:
# plt.figure(figsize=(5, 7))
# plt.subplot(3, 1, 1)
# plt.plot(sampler.chain[:, :, 0].T, color='grey', alpha=.5)
# # plt.plot(len(chain_sep), sep, marker='*', color='#0085ca', zorder=1e3)
# plt.ylabel('Separation [mas]')
# plt.subplot(3, 1, 2)
# plt.plot(sampler.chain[:, :, 1].T, color='grey', alpha=.5)
# # plt.plot(len(chain_sep), pa, marker='*', color='#0085ca', zorder=1e3)
# plt.ylabel('PA [deg]')
# plt.subplot(3, 1, 3)
# plt.plot(sampler.chain[:, :, 2].T, color='grey', alpha=.2)
# # plt.plot(len(chain_sep), cr, marker='*', color='#0085ca', zorder=1e3)
# plt.xlabel('Step')
# plt.ylabel('CR')
# plt.tight_layout()
# plt.show(block=False)
fig = corner.corner(chain, labels=['SEP [mas]', 'PA [deg]', 'CONTRAST'],
quantiles=(0.16, 0.84),
show_titles=True, title_kwargs={"fontsize": 10},
color='#096899', )
axes = np.array(fig.axes).reshape((ndim, ndim))
# Loop over the diagonal
for i in range(ndim):
ax = axes[i, i]
# ax.axvline(value1[i], color="g")
ax.axvline(res_p[i], color="#ce0056e6", lw=1)
# Loop over the histograms
for yi in range(ndim):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(res_p[xi], color="#ce0056e6", lw=1)
ax.axhline(res_p[yi], color="#ce0056e6", lw=1)
ax.plot(res_p[xi], res_p[yi], "#ce0056e6", lw=1)
plt.tight_layout()
plt.show(block=False)
data = {'sep': meansep, 'delsep': dsep, 'pa': meanth, 'delpa': dth, 'con': meanc,
'delcon': dc, 'chain': sampler.chain}
data2 = {'sep': res_p[0], 'delsepm': err_m[0], 'delsepp': err_p[0],
'pa': res_p[1], 'delpam': err_m[1], 'delpap': err_p[1],
'cr': res_p[2], 'delcrm': err_m[2], 'delcrp': err_p[2],
'chain': sampler.chain
}
# and clean up
# if threads > 1:
# sampler.pool.terminate()
return data, data2
# =========================================================================
# =========================================================================
# def nest(cpo, paramlimits=[20., 250., 0., 360., 1.0001, 10], resume=False, eff=0.3, multi=True,
# err_scale=1., extra_error=0., plot=True, npoints=1000, logcrat_prior=True,
# use_cov=False, projected=False, model='constant'):
# '''Default implementation of a MultiNest fitting routine for closure
# phase data. Requires a closure phase cpo object, parameter limits and
# sensible keyword arguments for the multinest parameters.
# This function does very naughty things creating functions inside this
# function because PyMultiNest is very picky about how you pass it
# data.
# Optional parameter eff tunes sampling efficiency, and multi toggles multimodal
# nested sampling on and off. Turning off multimodal sampling results in a speed
# boost of ~ 20-30%.
# logcrat_prior allows the prior for the contrast ratios to be flat in logarithmic space
# paramlimits is needed here, and must be a list with length = ncomp,
# with paramlimits[companion_number]=[minsep,maxsep,minpa,maxpa,mincrat,maxcrat]
# '''
# import pymultinest
# if projected:
# proj_t3err = np.sqrt(cpo.proj_t3err**2 + extra_error**2)
# proj_t3err *= err_scale
# else:
# t3err = np.sqrt(cpo.t3err**2+extra_error**2)
# t3err *= err_scale
# if logcrat_prior:
# def myprior(cube, ndim, n_params, paramlimits=paramlimits):
# cube[0] = cube[0]*(paramlimits[1] - paramlimits[0])+paramlimits[0]
# cube[1] = cube[1]*(paramlimits[3] - paramlimits[2])+paramlimits[2]
# for ix in range(n_params-2):
# cube[ix+2] = 10**(cube[ix+2]*(np.log10(paramlimits[5]) -
# np.log10(paramlimits[4]))+np.log10(paramlimits[4]))
# else:
# def myprior(cube, ndim, n_params, paramlimits=paramlimits):
# cube[0] = cube[0]*(paramlimits[1] - paramlimits[0])+paramlimits[0]
# cube[1] = cube[1]*(paramlimits[3] - paramlimits[2])+paramlimits[2]
# for ix in range(n_params-2):
# cube[ix+2] = cube[ix+2] * \
# (paramlimits[5] - paramlimits[4])+paramlimits[4]
# if projected:
# def myloglike(cube, ndim, n_params):
# loglike = cp_loglikelihood_proj(cube[0:n_params], cpo.u, cpo.v, cpo.wavel,
# cpo.proj_t3data, proj_t3err, cpo.proj, model=model)
# return loglike
# elif use_cov:
# def myloglike(cube, ndim, n_params):
# loglike = cp_loglikelihood_cov(cube[0:n_params], cpo.u, cpo.v, cpo.wavel,
# cpo.t3data, cpo.cov_inv, model=model)
# return loglike
# else:
# def myloglike(cube, ndim, n_params):
# loglike = cp_loglikelihood(
# cube[0:n_params], cpo.u, cpo.v, cpo.wavel, cpo.t3data, t3err, model=model)
# return loglike
# # How many parameters?
# if model == 'constant':
# parameters = ['Separation', 'Position Angle', 'Contrast Ratio']
# elif model == 'free':
# parameters = ['Separation', 'Position Angle']
# parameters.extend(len(cpo.wavel)*['Contrast Ratio'])
# else:
# raise Exception('Model not yet implemented in nest!')
# n_params = len(parameters)
# ndim = n_params
# # Check that the "chains" directory exists (which multinest needs)
# if os.path.exists(os.getcwd()+'/chains/') is False:
# os.mkdir(os.getcwd()+'/chains/')
# tic = time.time() # start timing
# # ---------------------------------
# # now run MultiNest!
# # ---------------------------------
# pymultinest.run(myloglike, myprior, n_params, wrapped_params=[1], resume=resume, verbose=True,
# sampling_efficiency=eff, multimodal=multi, n_iter_before_update=1000,
# n_live_points=npoints)
# # let's analyse the results
# a = pymultinest.Analyzer(n_params=n_params)
# s = a.get_stats()
# toc = time.time()
# if toc-tic < 60.:
# print('Time elapsed =', toc-tic, 's')
# else:
# print('Time elapsed =', (toc-tic)/60., 'mins')
# print()
# print("-" * 30, 'ANALYSIS', "-" * 30)
# print("Global Evidence:\n\t%.15e +- %.15e" %
# (s['global evidence'], s['global evidence error']))
# print('')
# params = s['marginals']
# print_line = "{0}: {1:.3F} pm {2:.3F}"
# for param_ix in range(n_params):
# print(print_line.format(parameters[param_ix], params[param_ix]['median'],
# params[param_ix]['sigma']))
# if plot:
# p = pymultinest.PlotMarginalModes(a)
# plt.figure(figsize=(5*n_params, 5*n_params))
# for i in range(n_params):
# plt.subplot(n_params, n_params, n_params * i + i + 1)
# p.plot_marginal(i, with_ellipses=True,
# with_points=False, grid_points=20)
# plt.ylabel("Probability")
# plt.xlabel(parameters[i])
# for j in range(i):
# plt.subplot(n_params, n_params, n_params * j + i + 1)
# # plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
# p.plot_conditional(i, j, with_ellipses=False, with_points=True, grid_points=20)
# plt.xlabel(parameters[i])
# plt.ylabel(parameters[j])
# return [a, s]
# =========================================================================
# =========================================================================
def detec_sim_loopfit(everything):
'''Function for multiprocessing in detec_limits. Takes a
single separation and full angle, contrast lists.
For each sep,pa,contrast, it calculates 10,000 simulations of that binary
(adding noise to each). A detection is defined as having chi2_bin - chi2_null <0
It then counts the number of detections over all separations and'''
detec_count = np.zeros((everything['nth'], everything['ncon']))
ndim = len(everything['error'].shape)
# This can be done once since it doesn't change with the binary params
# error should be ncp x nwav, rands should be ncp x nwav x n
err = everything['error']
resids = err[..., np.newaxis]*everything['rands']
for j, th in enumerate(everything['ths']):
for k, con in enumerate(everything['cons']):
bin_cp = cp_model([everything['sep'], th, con],
everything['u'], everything['v'], everything['wavel'])
rnd_cp = bin_cp[..., np.newaxis] + resids
# We want the difference in chi2 between the binary and null hypothesis.
# i.e. using rnd_cp for the single star and rnd_cp-bin_cp for the binary
# but this simplifies to the following equation
chi2_diff = np.sum(
(resids**2 - rnd_cp**2) / everything['error'][..., np.newaxis]**2, axis=tuple(range(ndim)))
# chi2_sngl = np.sum(np.sum((((rnd_cp)/ everything['error'][:,:,np.newaxis])**2),axis=0),axis=0)
# chi2_binr = np.sum(np.sum((((rnd_cp-bin_cp[:,:,np.newaxis]) / everything['error'][:,:,np.newaxis])**2),axis=0),axis=0)
# chi2_diff=chi2_binr-chi2_sngl
# this counts the number of detections
detec_count[j, k] = (chi2_diff < (-0.0)).sum()
# print('done one separation')
# print(err.shape,bin_cp.shape,rnd_cp.shape,everything['rands'].shape)
return detec_count
# =========================================================================
# =========================================================================
def detec_sim_loopfit_cov(everything):
'''Function for multiprocessing in detec_limits. Takes a
single separation and full angle, contrast lists.
For each sep,pa,contrast, it calculates 10,000 simulations of that binary
(adding noise to each). A detection is defined as having chi2_bin - chi2_null <0
It then counts the number of detections over all separations and'''
detec_count = np.zeros((everything['nth'], everything['ncon']))
for j, th in enumerate(everything['ths']):
for k, con in enumerate(everything['cons']):
bin_cp = cp_model([everything['sep'], th, con],
everything['u'], everything['v'], everything['wavel'])
errs = everything['rands']
# binary cp model
# ----------------------
rnd_cp = bin_cp[:, :, np.newaxis] + errs
chi2_sngl = rnd_cp.transpose().dot(
everything['cov_inv']).dot(rnd_cp)
chi2_binr = errs.transpose().dot(everything['cov_inv']).dot(errs)
chi2_diff = chi2_binr-chi2_sngl
# this counts the number of detections
detec_count[j, k] = (chi2_diff < (-0.0)).sum()
# print('done one separation')
return detec_count
# =========================================================================
# =========================================================================
def detec_sim_loopfit_proj(everything):
'''Function for multiprocessing in detec_limits. Takes a
single separation and full angle, contrast lists. Made for projected data'''
detec_count = np.zeros((everything['nth'], everything['ncon']))
proj = everything['proj']
ndim = len(everything['error'].shape)
# This can be done once since it doesn't change with the binary params
# Note that error and rands are already in the projected basis
proj_resids = everything['error'][..., np.newaxis]*everything['rands']
for j, th in enumerate(everything['ths']):
for k, con in enumerate(everything['cons']):
bin_cp = cp_model([everything['sep'], th, con],
everything['u'], everything['v'], everything['wavel'])
# Project the data:
proj_bin_cp = project_cps(bin_cp, proj)
proj_rnd_cp = proj_bin_cp[..., np.newaxis] + proj_resids
# We want the difference in chi2 between the binary and null hypothesis.
# i.e. using rnd_cp for the single star and rnd_cp-bin_cp for the binary
# but this simplifies to the following equation
chi2_diff = np.sum((proj_resids**2 - proj_rnd_cp**2) / everything['error'][..., np.newaxis]**2,
axis=tuple(range(ndim)))
# chi2_sngl = np.sum(np.sum((((rnd_cp)/ everything['error'][:,:,np.newaxis])**2),axis=0),axis=0)
# chi2_binr = np.sum(np.sum((((rnd_cp-bin_cp[:,:,np.newaxis]) / everything['error'][:,:,np.newaxis])**2),axis=0),axis=0)
# chi2_diff=chi2_binr-chi2_sngl
# this counts the number of detections
detec_count[j, k] = (chi2_diff < (-0.0)).sum()
return detec_count
# =========================================================================
# =========================================================================
def detec_limits(cpo, nsim=2000, nsep=32, nth=20, ncon=32, smin='Default', smax='Default',
cmin=1.0001, cmax=500., extra_error=0, threads=0, save=False, projected=False,
use_cov=False, icpo=False, err_scale=1., no_plot=False,
linear_in_mags=False):
'''uses a Monte Carlo simulation to establish contrast-separation
detection limits given an array of standard deviations per closure phase.
Because different separation-contrast grid points are entirely
separate, this task is embarrassingly parallel. If you want to
speed up the calculation, use multiprocessing with a threads
argument equal to the number of available cores.
Make nseps a multiple of threads! This uses the cores most efficiently.
Hyperthreading (2x processes per core) in my experience gets a ~20%
improvement in speed.
Written by <NAME> and <NAME>.
ACC added option for projected data and a few tweaks.
Use_cov option allows the random clps to be generated using the sample covariance matrix.
Note also that the calculation of the model closure phases could be done outside
the big loop, which would be efficient on CPU but not RAM. However, ACC
tried adding this and ran out of RAM (8GB) on GPI data (8880 clps), so removed it.'''
# Note that the accuracy of these sims are limited by the number of fake clps sets you take.
# e.g. if you only have 10 sims you can't get accuracy better than 10%
# (and even then, you will need several times more than this to get a robust 10% limit).
print('Detection limit resolution:', 100./(nsim*nth), '%')
if 100./(nsim*nth) > 0.01:
print('It is recommended that you increase nsim if you want robust 99.9% detection limits.')
# ------------------------
# first, load your data!
# ------------------------
if projected is True:
proj = cpo.proj
error = np.sqrt(cpo.proj_t3err**2+extra_error**2)*err_scale
n_clps = cpo.proj.shape[-1]
n_runs = cpo.n_runs
elif icpo is True:
proj = []
cpo.t3err = np.sqrt(cpo.t3err**2+extra_error**2)
error = cpo.t3err*err_scale
n_clps = cpo.n_clps
n_runs = cpo.n_runs
else:
proj = []
cpo.t3err = np.sqrt(cpo.t3err**2+extra_error**2)
error = cpo.t3err*err_scale
n_clps = cpo.ndata
n_runs = 1
if use_cov:
cov_inv = cpo.cov_inv
else:
cov_inv = []
# nwav = cpo.wavel.size
# ndata = cpo.u.shape[0]
# u=np.repeat(np.resize(cpo.u,[ndata,3,1]),nwav,2).ravel()
# v=np.repeat(np.resize(cpo.v,[ndata,3,1]),nwav,2).ravel()
# wavel=np.repeat(np.repeat(np.resize(cpo.wavel,[1,1,nwav]),ndata,0),3,1).ravel()
wavel = cpo.wavel
u = cpo.u
v = cpo.v
w = np.array(np.sqrt(u**2 + v**2))/np.median(wavel)
if smin == 'Default':
smin = rad2mas(1./4/np.max(w))
if smax == 'Default':
smax = rad2mas(1./np.min(w))
# ------------------------
# initialise Monte Carlo
# ------------------------
seps = smin + (smax-smin) *
|
np.linspace(0, 1, nsep)
|
numpy.linspace
|
import numpy as np
from scanorama import *
from scipy.stats import f_oneway
from sklearn.cluster import KMeans
import sys
def print_oneway(X, genes, ds_labels):
for gene_idx, gene in enumerate(genes):
ds_names = sorted(set(ds_labels))
dist = []
for ds in ds_names:
dist.append(X[ds_labels == ds, gene_idx])
sys.stdout.write('{}\t'.format(gene))
print('{}\t{}'.format(*f_oneway(*dist)))
def entropy_test(datasets_dimred, ds_labels):
ds_labels = np.array(ds_labels)
X_dimred =
|
np.concatenate(datasets_dimred)
|
numpy.concatenate
|
#!/usr/bin/env python
import sys
USAGE="""
"""
import numpy as np
file_input="data_5.txt"
fp_input=open(file_input)
lines=fp_input.readlines()
fp_input.close()
logP_list=[]
SAS_list=[]
QED_list=[]
MW_list=[]
TPSA_list=[]
for i in range(len(lines)):
line=lines[i]
if line[0]=='#':
continue
arr=line.split()
logP=float(arr[1])
SAS=float(arr[2])
QED=float(arr[3])
MW=float(arr[4])
TPSA=float(arr[5])
logP_list+=[logP]
SAS_list+=[SAS]
QED_list+=[QED]
MW_list+=[MW]
TPSA_list+=[TPSA]
logP_array=np.array(logP_list)
SAS_array=np.array(SAS_list)
QED_array=
|
np.array(QED_list)
|
numpy.array
|
from DecodingStrategies import VocabularyManager
from transformers import generation_tf_utils
from lemminflect import getAllInflections
import tensorflow as tf
import transformers
import numpy as np
import copy
OLD_REP_PENALTY_FUNC = generation_tf_utils._create_next_token_logits_penalties
TOKENIZER = transformers.AutoTokenizer.from_pretrained('gpt2')
VOCAB_MANAGER = VocabularyManager.VocabularyManager(TOKENIZER)
CURRENT_INFERENCE_CONTAINER = None
CURRENT_INCLUSION_WORDS = None
CURRENT_CONTEXT_LEN = None
CURRENT_SEQ_STATES = None
CURRENT_TARGET_LEN = None
CURRENT_NUM_BEAMS = None
class SequenceState:
class WordState:
class LemmaState:
def __init__(self, ids):
self.targetLen = len(ids)
self.counter = 0
self.ids = ids
self.canBeIncluded = False
self.wordType = VOCAB_MANAGER.getWordType(ids[0])
self.isSpacedNewWord = self.wordType != VocabularyManager.WordTypes.CONTINUATION
def getCurrentTargetToken(self):
return self.ids[self.counter]
def updateState(self, token):
if (token == self.getCurrentTargetToken()):
self.counter += 1
if (self.counter == self.targetLen):
self.canBeIncluded = True
self.counter = 0
return True
else:
self.counter = 0
return False
def hasStarted(self):
return self.counter > 0
def __init__(self, word):
self.allIDs = getPossibleIDsForWord(word)
self.lemmaStates = [self.LemmaState(ids) for ids in self.allIDs]
self.canBeIncluded = False
self.isIncluded = False
def updateState(self, recentToken):
for lemma in self.lemmaStates:
if (lemma.updateState(recentToken)):
self.canBeIncluded = True
def getBoostIDs(self, canIncludeWord=False):
# If lemma started, only recommend continuation of that lemma
if (self.hasStarted()):
return [lemma.getCurrentTargetToken() for lemma in self.lemmaStates if lemma.hasStarted()]
else:
if (canIncludeWord and self.canBeIncluded):
return []
if (canIncludeWord):
return [lemma.getCurrentTargetToken() for lemma in self.lemmaStates if lemma.isSpacedNewWord]
else:
return [lemma.getCurrentTargetToken() for lemma in self.lemmaStates]
def getBlockIDs(self):
allIDs = []
for lemma in self.lemmaStates:
allIDs.extend(lemma.ids)
return allIDs
def checkIfIncluded(self, recentToken):
if (VOCAB_MANAGER.getWordType(recentToken) != VocabularyManager.WordTypes.CONTINUATION):
self.isIncluded = True
self.canBeIncluded = False
def hasStarted(self):
return any([lemma.hasStarted() for lemma in self.lemmaStates])
def __init__(self, targetWords, targetLan, sampleID, sequenceID, numBeams=1, inclusionFactor=5.5, maxBoost=0.25,
useBoost=False, useBlocks=False):
self.wordStates = [self.WordState(w) for w in targetWords]
self.targetLan = targetLan
self.numBeams = numBeams
self.sampleID = sampleID
self.seqID = sequenceID
self.generatedLen = 0
self.finished = False
# Currently caching does not support multiple beams!
# assert numBeams == 1
self.checkFunc = self.getPenalties
# self.checkFunc = self.checkOnlyRecentSymbol if numBeams == 1 else self.checkFullSeq
self.inclusionFactor = inclusionFactor
self.maxBoost = maxBoost
self.useBoosts = useBoost
self.useBlocks = useBlocks
def checkIfFinished(self, recentToken):
if (self.finished):
CURRENT_INFERENCE_CONTAINER.promptBlockMask[self.seqID] = 0
return
for state in self.wordStates:
if (state.canBeIncluded):
state.checkIfIncluded(recentToken)
for state in self.wordStates:
if (state.isIncluded == False):
state.updateState(recentToken)
# allIncluded = all([state.isIncluded or state.canBeIncluded for state in self.wordStates])
allIncluded = all([state.isIncluded for state in self.wordStates])
# if (recentToken == 13 or allIncluded):
if (allIncluded):
CURRENT_INFERENCE_CONTAINER.promptBlockMask[self.seqID] = 0
self.finished = True
else:
CURRENT_INFERENCE_CONTAINER.promptBlockMask[self.seqID] = 1
def getPenalties(self, seqIDs):
if (self.finished):
return [], []
hasStartedWords = any([state.hasStarted() for state in self.wordStates])
hasCanBeIncluded = any([state.canBeIncluded for state in self.wordStates])
penalties, boosts = [], []
for state in self.wordStates:
if (state.isIncluded and self.useBlocks):
penalties.extend(state.getBlockIDs())
elif (self.useBoosts):
if (hasStartedWords):
if (state.hasStarted()):
boosts.extend(state.getBoostIDs(hasCanBeIncluded))
else:
boosts.extend(state.getBoostIDs(hasCanBeIncluded))
return penalties, boosts
def getCurrentlyIncludedWords(self, seqIDs):
return self.checkFunc(seqIDs)
def getBoostFactor(self, seqIDs):
completionFactor = len(seqIDs) / self.targetLan
boostFactor = np.exp(self.inclusionFactor * completionFactor) / np.exp(self.inclusionFactor)
# print("Boost Factor:", len(seqIDs), boostFactor)
return 1 + self.maxBoost * boostFactor
def getPossibleIDsForWord(word):
wordLemmas = [word]
for lemmas in getAllInflections(word).values():
wordLemmas.extend(lemmas)
wordLemmas = np.unique(wordLemmas)
contexts = (('', 0), ('The ', 1))
wordVersions = []
for lemma in wordLemmas:
wordVersions.extend([lemma.lower(), lemma.lower().capitalize(), lemma.upper()])
ids = []
for c, i in contexts:
for w in wordVersions:
ids.append(TOKENIZER.encode(c + w)[i:])
return ids
def HFRepPenalty(input_ids, logits, repetition_penalty, returnTF=False):
# create logit penalties for already seen input_ids
token_penalties = np.ones(generation_tf_utils.shape_list(logits))
prev_input_ids = [np.unique(input_id) for input_id in input_ids.numpy()]
for i, prev_input_id in enumerate(prev_input_ids):
logit_penalized = logits[i].numpy()[prev_input_id]
logit_penalties =
|
np.zeros(logit_penalized.shape)
|
numpy.zeros
|
import time
import joblib
import numpy as np
from sklearn.preprocessing import LabelBinarizer
from .base import Explainer
from .parsers import util
class LeafInfluence(Explainer):
"""
LeafInfluence: Explainer that adapts the influence functions method to tree ensembles.
Local-Influence Semantics
- Inf.(x_i, x_t) := L(y, F_{w/o x_i}(x_t)) - L(y, F(x_t))
- Pos. value means removing x_i increases the loss (i.e. adding x_i decreases loss) (helpful).
- Neg. value means removing x_i decreases the loss (i.e. adding x_i increases loss) (harmful).
Note
- For GBDT, influence values are multipled by -1; this makes the semantics of
LeafInfluence values more consistent with the other influence methods
that approximate changes in loss.
- Does NOT take class or instance weight into account.
Reference
- https://github.com/bsharchilev/influence_boosting/blob/master/influence_boosting/influence/leaf_influence.py
Paper
- https://arxiv.org/abs/1802.06640
Note
- Only supports GBDTs.
"""
def __init__(self, update_set=-1, atol=1e-5, n_jobs=1, logger=None):
"""
Input
update_set: int, No. neighboring leaf values to use for approximating leaf influence.
0: Use no other leaves, influence is computed independent of other trees.
-1: Use all other trees, most accurate but also most computationally expensive.
1+: Trade-off between accuracy and computational resources.
atol: float, Tolerance between actual and predicted leaf values.
n_jobs: int, No. processes to run in parallel.
-1 means use the no. of available CPU cores.
logger: object, If not None, output to logger.
"""
assert update_set >= -1
self.update_set = update_set
self.atol = atol
self.n_jobs = n_jobs
self.logger = logger
def fit(self, model, X, y):
"""
- Compute leaf values using Newton leaf estimation method;
make sure these match existing leaf values. Put into a 1d array.
- Copy leaf values and compute new 1d array of leaf values across all trees,
one new array resulting from removing each training example x in X.
- Should end up with a 2d array of shape=(no. train, no. leaves across all trees).
A bit memory intensive depending on the no. leaves, but should speed up the
explanation for ANY set of test examples. This array can also be saved to
disk to avoid recomputing these influence values.
Input
model: tree ensemble.
X: training data.
y: training targets.
"""
super().fit(model, X, y)
X, y = util.check_data(X, y, objective=self.model_.objective)
assert self.model_.tree_type != 'rf', 'RF not supported for LeafInfluence'
self.X_train_ = X.copy()
self.y_train_ = y.copy()
self.loss_fn_ = util.get_loss_fn(self.model_.objective, self.model_.n_class_, self.model_.factor)
# extract tree-ensemble metadata
trees = self.model_.trees
n_boost = self.model_.n_boost_
n_class = self.model_.n_class_
learning_rate = self.model_.learning_rate
l2_leaf_reg = self.model_.l2_leaf_reg
bias = self.model_.bias
# get no. leaves for each tree
leaf_counts = self.model_.get_leaf_counts() # shape=(no. boost, no. class)
# intermediate containers
current_approx = np.tile(bias, (X.shape[0], 1)).astype(util.dtype_t) # shape=(X.shape[0], no. class)
leaf2docs = [] # list of leaf_idx -> doc_ids dicts
n_prev_leaves = 0
# result containers
naive_gradient_addendum = np.zeros((X.shape[0], n_boost, n_class), dtype=util.dtype_t)
da_vector_multiplier = np.zeros((X.shape[0], n_boost, n_class), dtype=util.dtype_t)
denominator = np.zeros(np.sum(leaf_counts), dtype=util.dtype_t) # shape=(total no. leaves,)
leaf_values = np.zeros(np.sum(leaf_counts), dtype=util.dtype_t) # shape=(total no. leaves,)
n_not_close = 0
max_diff = 0
# save gradient information of leaf values for each tree
for boost_idx in range(n_boost):
doc_preds = np.zeros((X.shape[0], n_class), dtype=util.dtype_t)
# precompute gradient statistics
gradient = self.loss_fn_.gradient(y, current_approx) # shape=(X.shape[0], no. class)
hessian = self.loss_fn_.hessian(y, current_approx) # shape=(X.shape[0], no. class)
third = self.loss_fn_.third(y, current_approx) # shape=(X.shape[0], no. class)
for class_idx in range(n_class):
# get leaf values
leaf_count = leaf_counts[boost_idx, class_idx]
leaf_vals = trees[boost_idx, class_idx].get_leaf_values()
doc2leaf = trees[boost_idx, class_idx].apply(X)
leaf2doc = {}
# update predictions for this class
doc_preds[:, class_idx] = leaf_vals[doc2leaf]
# sanity check to make sure leaf values are correctly computed
# also need to save some statistics to update leaf values later
for leaf_idx in range(leaf_count):
doc_ids = np.where(doc2leaf == leaf_idx)[0]
leaf2doc[leaf_idx] = set(doc_ids)
# compute leaf values using gradients and hessians
leaf_enumerator = np.sum(gradient[doc_ids, class_idx])
leaf_denominator = np.sum(hessian[doc_ids, class_idx]) + l2_leaf_reg
leaf_prediction = -leaf_enumerator / leaf_denominator * learning_rate
# compare leaf values to actual leaf values
if not np.isclose(leaf_prediction, leaf_vals[leaf_idx], atol=1e-5):
n_not_close += 1
max_diff = max(max_diff, abs(leaf_prediction - leaf_vals[leaf_idx]))
# store statistics
denominator[n_prev_leaves + leaf_idx] = leaf_denominator
leaf_values[n_prev_leaves + leaf_idx] = leaf_prediction
n_prev_leaves += leaf_count # move to next set of tree leaves
leaf2docs.append(leaf2doc) # list of dicts, one per tree
# precompute influence statistics
naive_gradient_addendum[:, boost_idx, :] = hessian * doc_preds / learning_rate + gradient
da_vector_multiplier[:, boost_idx, :] = doc_preds / learning_rate * third + hessian
# n_prev_trees += n_class
current_approx += doc_preds # update approximation
# copy and compute new leaf values resulting from the removal of each x in X.
start = time.time()
if self.logger:
self.logger.info(f'\n[INFO] no. leaf vals not within 1e-5 tol.: {n_not_close:,}, '
f'max. diff.: {max_diff:.5f}')
self.logger.info(f'\n[INFO] computing alternate leaf values...')
# check predicted leaf values do not differ too much from actual model
if max_diff > self.atol:
raise ValueError(f'{max_diff:.5f} (max. diff.) > {self.atol} (tolerance)')
# select no. processes to run in parallel
if self.n_jobs == -1:
n_jobs = joblib.cpu_count()
else:
assert self.n_jobs >= 1
n_jobs = min(self.n_jobs, joblib.cpu_count())
if self.logger:
self.logger.info(f'[INFO] no. cpus: {n_jobs:,}...')
# process each training example removal in parallel
with joblib.Parallel(n_jobs=n_jobs) as parallel:
# result container
leaf_derivatives = np.zeros((0, np.sum(leaf_counts)), dtype=util.dtype_t)
# trackers
n_completed = 0
n_remaining = X.shape[0]
# get number of fits to perform for this iteration
while n_remaining > 0:
n = min(100, n_remaining)
results = parallel(joblib.delayed(_compute_leaf_derivatives)
(train_idx, leaf_counts, leaf2docs, denominator,
da_vector_multiplier, naive_gradient_addendum,
n_boost, n_class, X.shape[0], learning_rate,
self.update_set) for train_idx in range(n_completed,
n_completed + n))
# synchronization barrier
results = np.vstack(results) # shape=(n, 1 or X_test.shape[0])
leaf_derivatives = np.vstack([leaf_derivatives, results])
n_completed += n
n_remaining -= n
if self.logger:
cum_time = time.time() - start
self.logger.info(f'[INFO - LI] {n_completed:,} / {X.shape[0]:,}, cum. time: {cum_time:.3f}s')
# save results of this method
self.leaf_values_ = leaf_values # shape=(total no. leaves,)
self.leaf_derivatives_ = leaf_derivatives # shape=(no. train, total no. leaves)
self.leaf_counts_ = leaf_counts # shape=(no. boost, no. class)
self.bias_ = bias
self.n_boost_ = n_boost
self.n_class_ = n_class
self.n_train_ = X.shape[0]
return self
def get_local_influence(self, X, y):
"""
- Compute influence of each training example on each test example loss.
Return
- 2d array of shape=(no. train, X.shape[0])
* Train influences are in the same order as the original training order.
"""
X, y = util.check_data(X, y, objective=self.model_.objective)
influence = np.zeros((self.n_train_, X.shape[0], self.n_class_), dtype=util.dtype_t)
if self.logger:
self.logger.info('\n[INFO] computing influence for each test example...')
# compute influence of each training example on the test example
for remove_idx in range(self.n_train_):
influence[remove_idx] = self._loss_derivative(X, y, remove_idx)
# reshape result
influence = influence.sum(axis=2) # sum over class, shape=(no. train, X.shape[0])
return influence
# private
def _loss_derivative(self, X, y, remove_idx):
"""
Compute the influence on the set of examples (X, y) using the updated
set of leaf values from removing `remove_idx`.
Input
X: 2d array of test examples
y: 1d array of test targets.
remove_idx: index of removed train instance
Return
- Array of test influences of shape=(X.shape[0], no. class).
Note
- We multiply the result by -1 to have consistent semantics
with other influence methods that approx. loss.
"""
doc2leaf = self.model_.apply(X) # shape=(X.shape[0], no. boost, no. class)
og_pred = np.tile(self.bias_, (X.shape[0], 1)).astype(util.dtype_t) # shape=(X.shape[0], no. class)
new_pred = np.zeros((X.shape[0], self.n_class_), dtype=util.dtype_t) # shape=(X.shape[0], no. class)
# get prediction of each test example using the original and new leaf values
tree_idx = 0
n_prev_leaves = 0
for boost_idx in range(self.n_boost_): # per boosting iteration
for class_idx in range(self.n_class_): # per class
for test_idx in range(X.shape[0]): # per test example
leaf_idx = doc2leaf[test_idx][boost_idx][class_idx]
og_pred[test_idx, class_idx] += self.leaf_values_[n_prev_leaves + leaf_idx]
new_pred[test_idx, class_idx] += self.leaf_derivatives_[remove_idx][n_prev_leaves + leaf_idx]
n_prev_leaves += self.leaf_counts_[boost_idx, class_idx]
tree_idx += 1
return -self.loss_fn_.gradient(y, og_pred) * new_pred
def _compute_leaf_derivatives(remove_idx, leaf_counts, leaf2docs, denominator,
da_vector_multiplier, naive_gradient_addendum,
n_boost, n_class, n_train, learning_rate, update_set):
"""
Compute leaf value derivatives based on the example being removed.
Return
- 1d array of leaf value derivatives of shape=(total no. leaves,).
Note
- Parallelizable method.
"""
leaf_derivatives = np.zeros(
|
np.sum(leaf_counts)
|
numpy.sum
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
import obspy
from PIL import Image
from matplotlib.lines import Line2D
import os
from matplotlib.patches import Circle
from obspy.imaging.beachball import beach
import matplotlib.image as mpimg
import io
from sklearn import preprocessing
import pandas as pd
import matplotlib.cm as cm
import glob
from os.path import join as pjoin
from typing import List as _List, Union as _Union
import instaseis
from obspy import UTCDateTime as utct
from obspy.imaging.beachball import aux_plane
from sklearn.cluster import KMeans
from pyrocko import moment_tensor as mtm
import matplotlib.patches as mpatches
import glob
pyproj_datadir = os.environ["PROJ_LIB"]
from mpl_toolkits.basemap import Basemap
import re
from SS_MTI import Read_H5 as _ReadH5
from SS_MTI import MTDecompose as _MTDecompose
from SS_MTI import Forward as _Forward
from SS_MTI import PreProcess as _PreProcess
from SS_MTI import GreensFunctions as _GreensFunctions
from SS_MTI import RadiationPattern as _RadiationPattern
def Plot_veloc_models(Taup_model, depth_event=None, depth_syn=None):
depth = np.array([])
Vp = np.array([])
Vs = np.array([])
dens = np.array([])
for i, values in enumerate(Taup_model.model.s_mod.v_mod.layers):
depth = np.append(depth, values[0])
depth = np.append(depth, values[1])
Vp = np.append(Vp, values[2])
Vp = np.append(Vp, values[3])
Vs = np.append(Vs, values[4])
Vs = np.append(Vs, values[5])
dens = np.append(dens, values[6])
dens = np.append(dens, values[7])
fig, ax = plt.subplots(1, 3, sharey="all", sharex="all", figsize=(8, 6))
ax[0].plot(Vp, depth)
if depth_event is not None:
int_vp = interpolate.interp1d(depth, Vp)
event_vp = int_vp(depth_event)
ax[0].plot(event_vp, depth_event, "g*", markersize=15, label="Event Depth")
if depth_syn is not None:
for i in range(len(depth_syn)):
event_vp = int_vp(depth_syn[i])
if i == 0:
ax[0].plot(
event_vp, depth_syn[i], "r*", markersize=15, label="Synthetic Depth",
)
else:
ax[0].plot(event_vp, depth_syn[i], "r*", markersize=15, label="_hidden")
ax[0].set_title("VP", color="b", fontsize=20)
ax[0].set_ylabel("Depth [km]", fontsize=20)
ax[0].tick_params(axis="x", labelsize=18)
ax[0].tick_params(axis="y", labelsize=18)
# ax[0].ticklabel_format(style="sci", axis='y', scilimits=(-2, 2))
ax[0].grid(True)
# ax[0].set_ylim([500,0])
ax[0].set_xlim([0, 8])
ax[1].plot(Vs, depth, label="Shallow")
if depth_event is not None:
int_vs = interpolate.interp1d(depth, Vs)
event_vs = int_vs(depth_event)
ax[1].plot(event_vs, depth_event, "g*", markersize=15, label="Event Depth")
if depth_syn is not None:
for i in range(len(depth_syn)):
event_vs = int_vs(depth_syn[i])
if i == 0:
ax[1].plot(
event_vs, depth_syn[i], "r*", markersize=15, label="Synthetic Depth",
)
else:
ax[1].plot(event_vs, depth_syn[i], "r*", markersize=15, label="_hidden")
# ax[1].legend()
ax[1].set_title("VS", color="b", fontsize=20)
ax[1].tick_params(axis="x", labelsize=18)
ax[1].tick_params(axis="y", labelsize=18)
# ax[1].ticklabel_format(style="sci", axis='y', scilimits=(-2, 2))
ax[1].grid(True)
# ax[0].set_ylim([0,100])
ax[2].plot(dens, depth)
if depth_event is not None:
int_dens = interpolate.interp1d(depth, dens)
event_dens = int_dens(depth_event)
ax[2].plot(event_dens, depth_event, "g*", markersize=15, label="Event Depth")
if depth_syn is not None:
for i in range(len(depth_syn)):
event_dens = int_dens(depth_syn[i])
if i == 0:
ax[2].plot(
event_dens, depth_syn[i], "r*", markersize=15, label="Synthetic Depth",
)
else:
ax[2].plot(event_dens, depth_syn[i], "r*", markersize=15, label="_hidden")
ax[2].legend()
ax[2].set_title("Density", color="b", fontsize=20)
ax[2].tick_params(axis="x", labelsize=18)
ax[2].tick_params(axis="y", labelsize=18)
# ax[2].ticklabel_format(style="sci", axis='y', scilimits=(-2, 2))
ax[2].grid(True)
ax[2].set_ylim([0, 100])
ax[0].set_ylim(ax[0].get_ylim()[::-1])
return fig
def Plot_trace_vs_depth_copy(
stream: obspy.Stream,
depth: float,
total_depths: int,
Ytick: float,
phase: str,
phase_arr: float,
t_pre: float = 10.0,
t_post: float = 50.0,
fig: plt.figure = None,
ax: plt.axes = None,
extra_phases: [str] = None,
extra_arrs: [float] = None,
phase_colors: [str] = None,
phase_labels: dict = None,
):
if fig is None and ax is None:
fig, ax = plt.subplots(
nrows=1,
ncols=len(stream),
figsize=(5 * len(stream), 2 * total_depths),
sharex="col",
sharey="all",
)
st = stream.copy()
global_max = max([tr.data.max() for tr in st])
global_min = min([tr.data.min() for tr in st])
y = global_max * 0.9 + Ytick
ymin = global_min + Ytick
ymax = global_max + Ytick
for i in range(len(stream)):
ax[i].plot(
st[i].times() - t_pre, st[i].data + Ytick, "k",
)
ax[i].plot(
[0, 0], [ymin, ymax], "grey",
)
ax[i].text(0, y, phase, verticalalignment="center", color="grey", fontsize=6)
if extra_phases is not None:
for k in range(len(extra_phases)):
if extra_arrs[k] is None:
continue
phase_t = extra_arrs[k]
if phase_colors is None:
y = global_max * 0.9 + Ytick
c = "grey"
else:
y = global_max * 0.9 + Ytick
ind = re.findall(r"\d+", extra_phases[k])
if ind:
if len(ind) == 2:
if int(ind[0]) < depth:
c = "blue"
y = global_min * 0.4 + Ytick
else:
c = "red"
y = global_min * 0.4 + Ytick
else:
c = phase_colors[k]
else:
c = phase_colors[k]
y = global_max * 0.9 + Ytick
ax[i].plot(
[phase_t, phase_t], [ymin, ymax], c,
)
ax[i].text(
phase_t + 0.1,
y,
extra_phases[k],
verticalalignment="center",
color=c,
fontsize=6,
rotation=90,
)
ax[i].set_xlim(-t_pre, t_post)
ax[i].set_title(f"{phase}-Phase channel:{st[i].stats.channel}")
if phase_colors is not None:
unique_colors = list(set(phase_colors))
# unique_list = [mpatches.Patch(color=c, label=phase_labels[c]) for c in phase_labels]
unique_list = [
Line2D([0], [0], color=c, linewidth=3, label=phase_labels[c]) for c in phase_labels
]
ax[0].legend(
handles=unique_list, prop={"size": 6}, loc="upper left", bbox_to_anchor=(0.0, 1.07),
)
# fig.legend(handles=unique_list, prop={"size": 6}, loc="upper left")
fig.text(0.04, 0.5, "Source Depth (km)", va="center", rotation="vertical")
fig.text(0.5, 0.04, "Time after arrival (s)", va="center")
return fig, ax
def Plot_trace_vs_depth(
stream: obspy.Stream,
phase: str,
total_depths: int,
Ytick: float,
t_pre: float = 10.0,
t_post: float = 50.0,
fig: plt.figure = None,
ax: plt.axes = None,
):
if fig is None and ax is None:
fig, ax = plt.subplots(
nrows=1,
ncols=len(stream),
figsize=(5 * len(stream), 2 * total_depths),
sharex="col",
sharey="all",
)
st = stream.copy()
global_max = max([tr.data.max() for tr in st])
global_min = min([tr.data.min() for tr in st])
y = global_max * 0.9 + Ytick
ymin = global_min + Ytick
ymax = global_max + Ytick
for i in range(len(stream)):
ax[i].plot(
st[i].times() - t_pre, st[i].data + Ytick, "k",
)
ax[i].set_xlim(-t_pre, t_post)
ax[i].set_title(f"{phase}-Phase channel:{st[i].stats.channel}")
fig.text(0.04, 0.5, "Source Depth (km)", va="center", rotation="vertical")
fig.text(0.5, 0.04, "Time after arrival (s)", va="center")
return fig, ax
def Plot_phases_vs_comp(
stream: obspy.Stream,
phase_cuts: [str],
phase_arrs: [float],
t_pre: float = 20.0,
t_post: float = 60.0,
extra_phases: [str] = None,
extra_arrs: [float] = None,
phase_colors: [str] = None,
phase_labels: dict = None,
):
""" Plotting function that cuts the stream the phases in phase_cuts"""
if not len(phase_cuts) == len(phase_arrs):
raise ValueError("phase_cut and phase_arrs should have same length")
if extra_phases is not None:
if not len(extra_phases) == len(extra_arrs):
raise ValueError("extra_phases and extra_arrs should have same length")
fig, ax = plt.subplots(
nrows=len(stream), ncols=len(phase_cuts), figsize=(18, 8), sharex="col", sharey="all",
)
for j in range(len(phase_cuts)):
st = stream.copy()
st.trim(
starttime=st[0].stats.starttime + phase_arrs[j] - t_pre,
endtime=st[0].stats.starttime + phase_arrs[j] + t_post,
)
for i in range(len(stream)):
ax[i, j].plot(
st[i].times() - t_pre, st[i].data, "k",
)
y = ax[i, j].get_ylim()[1] * 0.8
ax[i, j].axvline(x=0, c="grey")
ax[i, j].text(
0, y, phase_cuts[j], verticalalignment="center", color="grey", fontsize=6,
)
if extra_phases is not None:
for k in range(len(extra_phases)):
if extra_arrs[k] is None:
continue
if phase_colors is None:
c = "grey"
else:
c = phase_colors[k]
phase_t = extra_arrs[k] - phase_arrs[j]
ax[i, j].axvline(x=phase_t, c=c)
ax[i, j].text(
phase_t + 0.1,
y,
extra_phases[k],
verticalalignment="center",
color=c,
fontsize=6,
rotation=90,
)
ax[i, j].set_xlim(-t_pre, t_post)
if i == 0:
ax[i, j].set_title(f"{phase_cuts[j]}-phase")
if j == 0:
ax[i, j].set_ylabel(st[i].stats.channel)
ax[0, 0].set_ylim(-1, 1)
if phase_colors is not None:
unique_colors = list(set(phase_colors))
# unique_list = [mpatches.Patch(color=c, label=phase_labels[c]) for c in phase_labels]
unique_list = [
Line2D([0], [0], color=c, linewidth=3, label=phase_labels[c]) for c in phase_labels
]
ax[0, 0].legend(
handles=unique_list, prop={"size": 6}, loc="upper left", bbox_to_anchor=(0.0, 1.4),
)
fig.text(0.04, 0.5, "Displacement (m)", va="center", rotation="vertical")
fig.text(0.5, 0.04, "Time after arrival (s)", va="center")
return fig, ax
def Plot_event_location(
la_s: float, lo_s: float, la_r: float, lo_r: float, name: str = "test event"
):
# la_s = event.latitude
# lo_s = event.longitude
mars_dir = "/home/nienke/Documents/Research/Data/mars_pictures/Mars_lightgray.jpg"
fig = plt.figure(figsize=(10, 8))
# m = Basemap(projection='moll', lon_0=round(0.0))
m = Basemap(
projection="merc", llcrnrlat=-80, urcrnrlat=80, llcrnrlon=0, urcrnrlon=200, resolution="c",
)
# draw parallels and meridians.
par = np.arange(-90, 90, 30)
label_par = np.full(len(par), True, dtype=bool)
meridians = np.arange(-180, 180, 30)
label_meri = np.full(len(meridians), True, dtype=bool)
m.drawmeridians(np.arange(-180, 180, 30), labels=label_meri)
m.drawparallels(np.arange(-90, 90, 30), label=label_par)
m.warpimage(mars_dir)
mstatlon, mstatlat = m(lo_r, la_r)
m.plot(mstatlon, mstatlat, "k^", markersize=20, label="InSight")
EQlonA, EQlatA = m(lo_s, la_s)
# EQlonB, EQlatB = m(lo_sB, la_sB) # 235b
# EQlonC, EQlatC = m(lo_sC, la_sC)
# EQlonD, EQlatD = m(lo_sC, la_sC)
m.plot(EQlonA, EQlatA, "r*", markersize=20, zorder=10, label=name)
# m.plot(EQlonB, EQlatB, 'g*', markersize=20, zorder=10, label = event_B.name)
# m.plot(EQlonC, EQlatC, 'b*', markersize=20, zorder=10, label=event_C.name)
plt.legend(fontsize=20)
plt.tight_layout()
# plt.show()
# plt.savefig('Location_Event.pdf')
return fig
""" Plot beachballs """
def Get_bb_img(MT, color, alpha=1.0):
### FULL MOMENT TENSOR
img = None
buf = io.BytesIO()
fig_bb = plt.figure(figsize=(5, 5), dpi=200)
ax_bb_1 = fig_bb.add_axes([0.0, 0.0, 1.0, 1.0])
ax_bb_1.set_xticks([])
ax_bb_1.set_yticks([])
ax_bb_1.axis("off")
if np.count_nonzero(MT) < 6 and len(MT) == 6:
pass
else:
b = beach(
fm=MT, width=990, linewidth=0, facecolor=color, xy=(0, 0), axes=ax_bb_1, alpha=alpha,
)
ax_bb_1.add_collection(b)
ax_bb_1.set_xlim((-1, 1))
ax_bb_1.set_ylim((-1, 1))
buf.seek(0)
fig_bb.savefig(buf, format="png", dpi=200)
buf.seek(0)
if img is None:
img = mpimg.imread(buf)
else:
img += mpimg.imread(buf)
plt.close(fig_bb)
return img, buf
def Plot_Direct_BB(
MT_Full,
Eps,
MT_DC,
M0_DC,
MT_CLVD,
M0_CLVD,
azimuths,
inc_angles,
phase_names,
color,
height=None,
horizontal=False,
):
if horizontal:
width = 15.0
height = 6.0
axis_height = 5.0 / height
resid_heigt = 1.0 - axis_height
title_height = resid_heigt
axis_width = 5.0 / width
else:
if height == None:
height = 19.0
axis_height = 5.0 / height
resid_height = 1.0 - 3.0 * axis_height
title_height = resid_height / 3.0
DC_scal = np.sqrt(1 - Eps / 0.5)
CLVD_scal = np.sqrt(1 - (1 - Eps / 0.5))
## Full moment tensor:
img1, buf1 = Get_bb_img(MT_Full, color)
if horizontal:
fig = plt.figure(figsize=(width, height), dpi=200)
ax_1 = fig.add_axes([0.0, 0.0, axis_width, axis_height])
else:
fig = plt.figure(figsize=(5, height), dpi=200)
ax_1 = fig.add_axes([0.0, 2 * (axis_height + title_height), 1.0, axis_height])
if img1 is not None:
ax_1.imshow(img1 / np.max(img1.flatten()))
if horizontal:
ax_X = fig.add_axes([0.0, 0.0, axis_width, axis_height], label="Circle_ray")
else:
ax_X = fig.add_axes(
[0.0, 2 * (axis_height + title_height), 1.0, axis_height], label="Circle_ray",
)
ax_X.set_xlim((-1, 1))
ax_X.set_ylim((-1, 1))
p = Circle((0.0, 0,), 0.99, linewidth=2, edgecolor="k", zorder=0, fill=False)
ax_X.add_patch(p)
if azimuths is not None and inc_angles is not None:
for a, i, phase in zip(azimuths, inc_angles, phase_names):
if i > 90.0:
x = np.sin(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
y = np.cos(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
else:
x = np.sin(np.deg2rad(a)) * i / 90.0
y = np.cos(np.deg2rad(a)) * i / 90.0
p = Circle(
(x, y), 0.015, linewidth=2, edgecolor="k", zorder=0, facecolor="k", fill=True,
)
ax_X.add_patch(p)
ax_X.text(x - 0.005, y + 0.03, s=phase, fontsize=40)
for a in [ax_1, ax_X]:
a.set_xticks([])
a.set_yticks([])
a.axis("off")
#
if horizontal:
title_1 = fig.add_axes([0.0, axis_height, axis_width, title_height])
else:
title_1 = fig.add_axes([0.0, 3 * axis_height + 2 * title_height, 1.0, title_height])
title_1.set_xticks([])
title_1.set_yticks([])
title_1.axis("off")
# title_1.text(
# 0.5,
# 0.2,
# "Full moment\n" r"$\epsilon=%.2f$" % Eps,
# ha="center",
# va="bottom",
# size="x-large",
# fontsize=50,
# )
title_1.text(
0.5, 0.2, "$\epsilon=%.2f$" % Eps, ha="center", va="bottom", size="x-large", fontsize=50,
)
########################
## DC moment tensor:
img2, buf2 = Get_bb_img(MT_DC, color)
if horizontal:
ax_2 = fig.add_axes(
[
axis_width + ((axis_width - (axis_width * DC_scal)) / 2),
0.0 + ((axis_height - (axis_height * DC_scal)) / 2),
axis_width * DC_scal,
axis_height * DC_scal,
]
)
else:
ax_2 = fig.add_axes([0.0, axis_height + title_height, 1.0, axis_height])
if img2 is not None:
ax_2.imshow(img2 / np.max(img2.flatten()))
if horizontal:
ax_X = fig.add_axes(
[
axis_width + ((axis_width - (axis_width * DC_scal)) / 2),
0.0 + ((axis_height - (axis_height * DC_scal)) / 2),
axis_width * DC_scal,
axis_height * DC_scal,
],
label="Circle_ray",
)
else:
ax_X = fig.add_axes(
[0.0, axis_height + title_height, 1.0, axis_height], label="Circle_ray"
)
ax_X.set_xlim((-1, 1))
ax_X.set_ylim((-1, 1))
p = Circle((0.0, 0,), 0.99, linewidth=2, edgecolor="k", zorder=0, fill=False)
ax_X.add_patch(p)
if azimuths is not None and inc_angles is not None:
for a, i, phase in zip(azimuths, inc_angles, phase_names):
if i > 90.0:
x = np.sin(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
y = np.cos(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
else:
x = np.sin(np.deg2rad(a)) * i / 90.0
y = np.cos(np.deg2rad(a)) * i / 90.0
p = Circle(
(x, y), 0.015, linewidth=2, edgecolor="k", zorder=0, facecolor="k", fill=True,
)
ax_X.add_patch(p)
ax_X.text(x - 0.005, y + 0.03, s=phase, fontsize=40)
for a in [ax_2, ax_X]:
a.set_xticks([])
a.set_yticks([])
a.axis("off")
if horizontal:
title_2 = fig.add_axes([axis_width, axis_height, axis_width, title_height])
else:
title_2 = fig.add_axes([0.0, 2 * axis_height + title_height, 1.0, title_height])
title_2.set_xticks([])
title_2.set_yticks([])
title_2.axis("off")
# title_2.text(
# 0.5,
# 0.2,
# "Double-Couple \n M0: %.2e" % M0_DC,
# ha="center",
# va="bottom",
# size="x-large",
# fontsize=25,
# )
# title_2.text(0.5, 0.2, "Direct", ha="center", va="bottom", size="x-large", fontsize=40)
### CLVD
img3, buf3 = Get_bb_img(MT_CLVD, color)
if horizontal:
ax_3 = fig.add_axes(
[
2 * (axis_width) + ((axis_width - (axis_width * CLVD_scal)) / 2),
0.0 + ((axis_height - (axis_height * CLVD_scal)) / 2),
axis_width * CLVD_scal,
axis_height * CLVD_scal,
]
)
else:
ax_3 = fig.add_axes([0.0, 0.0, 1.0, axis_height])
if img3 is not None:
ax_3.imshow(img3 / np.max(img3.flatten()))
if horizontal:
ax_X = fig.add_axes(
[
2 * (axis_width) + ((axis_width - (axis_width * CLVD_scal)) / 2),
0.0 + ((axis_height - (axis_height * CLVD_scal)) / 2),
axis_width * CLVD_scal,
axis_height * CLVD_scal,
],
label="Circle_ray",
)
else:
ax_X = fig.add_axes([0.0, 0.0, 1.0, axis_height], label="Circle_ray")
ax_X.set_xlim((-1, 1))
ax_X.set_ylim((-1, 1))
p = Circle((0.0, 0,), 0.99, linewidth=2, edgecolor="k", zorder=0, fill=False)
ax_X.add_patch(p)
if azimuths is not None and inc_angles is not None:
for a, i, phase in zip(azimuths, inc_angles, phase_names):
if i > 90.0:
x = np.sin(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
y = np.cos(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
else:
x = np.sin(np.deg2rad(a)) * i / 90.0
y = np.cos(np.deg2rad(a)) * i / 90.0
p = Circle(
(x, y), 0.015, linewidth=2, edgecolor="k", zorder=0, facecolor="k", fill=True,
)
ax_X.add_patch(p)
ax_X.text(x - 0.005, y + 0.03, s=phase, fontsize=40)
for a in [ax_3, ax_X]:
a.set_xticks([])
a.set_yticks([])
a.axis("off")
if horizontal:
title_3 = fig.add_axes([2 * axis_width, axis_height, axis_width, title_height])
else:
title_3 = fig.add_axes([0.0, axis_height, 1.0, title_height])
title_3.set_xticks([])
title_3.set_yticks([])
title_3.axis("off")
# title_3.text(
# 0.5,
# 0.2,
# "CLVD \n M0: %.2e" % M0_CLVD,
# ha="center",
# va="bottom",
# size="x-large",
# fontsize=25,
# )
return fig
def Plot_GS_BB(
strikes, dips, rakes, azimuths, inc_angles, phase_names, color, height=None, horizontal=True,
):
if horizontal:
width = 5.0
height = 6.0
axis_height = 5.0 / height
resid_heigt = 1.0 - axis_height
title_height = resid_heigt
axis_width = 5.0 / width
else:
if height == None:
height = 19.0
axis_height = 5.0 / height
resid_height = 1.0 - 3.0 * axis_height
title_height = resid_height / 3.0
fig_bb = plt.figure(figsize=(5, 5), dpi=200)
ax_bb = fig_bb.add_axes([0.0, 0.0, 1.0, 1.0])
ax_bb.set_xticks([])
ax_bb.set_yticks([])
ax_bb.axis("off")
img = None
buf = io.BytesIO()
i = 0
for strike, dip, rake in zip(strikes, dips, rakes):
i += 1
b = beach(
fm=[strike, dip, rake],
width=990,
linewidth=0,
facecolor=color,
xy=(0, 0),
axes=ax_bb,
alpha=1,
zorder=i,
)
ax_bb.add_collection(b)
ax_bb.set_xlim((-1, 1))
ax_bb.set_ylim((-1, 1))
buf.seek(0)
fig_bb.savefig(buf, format="png", dpi=200)
buf.seek(0)
if img is None:
img = mpimg.imread(buf)
else:
img += mpimg.imread(buf)
plt.close(fig_bb)
if horizontal:
fig = plt.figure(figsize=(width, height), dpi=200)
ax_1 = fig.add_axes([0.0, 0.0, axis_width, axis_height])
else:
fig = plt.figure(figsize=(5, height), dpi=200)
ax_1 = fig.add_axes([0.0, 2 * (axis_height + title_height), 1.0, axis_height])
if img is not None:
ax_1.imshow(img / np.max(img.flatten()))
if horizontal:
ax_X = fig.add_axes([0.0, 0.0, axis_width, axis_height], label="Circle_ray")
else:
ax_X = fig.add_axes(
[0.0, 2 * (axis_height + title_height), 1.0, axis_height], label="Circle_ray",
)
ax_X.set_xlim((-1, 1))
ax_X.set_ylim((-1, 1))
p = Circle((0.0, 0,), 0.99, linewidth=2, edgecolor="k", zorder=0, fill=False)
ax_X.add_patch(p)
if azimuths is not None and inc_angles is not None:
for a, i, phase in zip(azimuths, inc_angles, phase_names):
if i > 90.0:
x = np.sin(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
y = np.cos(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
else:
x = np.sin(np.deg2rad(a)) * i / 90.0
y = np.cos(np.deg2rad(a)) * i / 90.0
p = Circle(
(x, y), 0.015, linewidth=2, edgecolor="k", zorder=0, facecolor="k", fill=True,
)
ax_X.add_patch(p)
ax_X.text(x - 0.005, y + 0.03, s=phase, fontsize=40)
for a in [ax_1, ax_X]:
a.set_xticks([])
a.set_yticks([])
a.axis("off")
#
if horizontal:
title_1 = fig.add_axes([0.0, axis_height, axis_width, title_height])
else:
title_1 = fig.add_axes([0.0, 3 * axis_height + 2 * title_height, 1.0, title_height])
title_1.set_xticks([])
title_1.set_yticks([])
title_1.axis("off")
# title_1.text(0.5, 0.2, "Grid-Search", ha="center", va="bottom", size="x-large", fontsize=40)
return fig
""" Misfit analysis """
def plot_misfit_vs_depth(
baz: float,
save_paths: [str] = [],
depths: [int] = [45],
DOF: float = 700,
event_name: str = "S0235b",
misfit_name: str = "L2",
veloc_model: str = "TAYAK_BKE",
true_depth: float = None,
Moho: float = 30,
fmin: float = 1.0 / 10.0,
fmax: float = 1.0 / 2.0,
amount_of_phases: int = 5,
):
labels = ["", ""]
n_lowest = 1
fig, ax = plt.subplots(
nrows=3, ncols=1, sharex="all", figsize=(28, 17), gridspec_kw={"height_ratios": [4, 1, 1]},
)
# if event_name == "S0183a":
# fig, ax = plt.subplots(nrows=1, ncols=1, sharex="all", figsize=(28, 5.33),)
# else:
# fig, ax = plt.subplots(
# nrows=3,
# ncols=1,
# sharex="all",
# figsize=(28, 8),
# gridspec_kw={"height_ratios": [4, 1, 1]},
# )
# from matplotlib import gridspec
# gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
BB = []
Line_x = []
Line1_ymin = []
Line1_ymax = []
Line2_ymin = []
Line2_ymax = []
for i, save_path in enumerate(save_paths):
L2_GS = np.array([])
L2_Direct = np.array([])
Eps = np.array([])
cond_nrs = np.array([])
for idepth, depth in enumerate(depths):
print(i, depth)
GS_File = glob.glob(
pjoin(
save_path,
f"GS_{event_name}_{depth}_{fmin}_{fmax}_{misfit_name}_{veloc_model}.hdf5",
)
)[0]
if event_name == "S0183a":
pass
else:
Direct_File = glob.glob(
pjoin(
save_path,
f"Direct_{event_name}_{depth}_{fmin}_{fmax}_{misfit_name}_{veloc_model}.hdf5",
)
)[0]
## ================ READ GS =============================
(depth_GS, sdr, M0_GS, misfit_L2_GS,) = _ReadH5.Read_GS_h5(
Filename=GS_File, amount_of_phases=amount_of_phases
)
Total_L2_GS = np.sum(misfit_L2_GS, axis=1)
# Total_L2_norm_GS = np.sum(misfit_L2_norm_GS, axis=1)
# GOF = ( (Total_L2_GS - DOF ) * 100 ) / DOF
# GOF_GS = (Total_L2_norm_GS / DOF) * 100
GOF_GS = Total_L2_GS / DOF
lowest_indices = Total_L2_GS.argsort()[0:n_lowest]
# lowest_indices = GOF_GS.argsort()[0:n_lowest]
sdr = sdr[lowest_indices, :]
print("strike", sdr[0][0], "dip", sdr[0][1], "rake", sdr[0][2])
depth_GS = depth_GS[lowest_indices]
M0_GS = M0_GS[lowest_indices]
# shifts["P"] = shifts["P"][lowest_indices]
# shifts["S"] = shifts["S"][lowest_indices]
# L2_GS = np.append(L2_GS, Total_L2_GS[lowest_indices][0])
L2_GS = np.append(L2_GS, GOF_GS[lowest_indices][0])
# if depth == 8:
# lowest_indices[0] = lowest_indices[2]
# L2_GS = np.append(L2_GS, GOF_GS[lowest_indices][0])
# else:
# L2_GS = np.append(L2_GS, GOF_GS[lowest_indices][0])
if event_name == "S0183a":
pass
else:
## ============ Read Direct ========================
(
depth_Direct,
FULL_MT,
DC_MT,
CLVD_MT,
misfit_L2_Direct,
Epsilon,
M0,
M0_DC,
M0_CLVD,
angles,
cond_nr,
) = _ReadH5.Read_Direct_Inversion(Direct_File, amount_of_phases=amount_of_phases)
Total_L2_Direct = np.sum(misfit_L2_Direct)
# Total_L2_norm_Direct = np.sum(misfit_L2_norm_Direct, axis=1)
# GOF_Direct = (Total_L2_norm_Direct / DOF) * 100
GOF_Direct = Total_L2_Direct / DOF
# L2_Direct = np.append(L2_Direct, Total_L2_Direct[0])
L2_Direct = np.append(L2_Direct, GOF_Direct)
# ============== CREATE BEACHBALL PATCHES ===============
# y1 = Total_L2_GS[lowest_indices][0]
# y2 = Total_L2_Direct[0]
y2 = GOF_GS[lowest_indices][0]
if event_name == "S0183a":
ax_current = ax
pass
else:
y1 = GOF_Direct
ax_current = ax[0]
y_dist = np.log(np.abs(y1 - y2))
if y_dist < 100:
adding_value = 2e-1
Line_x.append(depth)
if y1 > y2:
y1 = y1 + adding_value
y2 = y2 - adding_value
Line1_ymin.append(GOF_Direct)
Line1_ymax.append(y1)
Line2_ymin.append(y2)
Line2_ymax.append(GOF_GS[lowest_indices][0])
else:
diff = y2 - y1
y1 = y1 + adding_value + diff
y2 = y2 - adding_value - diff
Line1_ymax.append(GOF_Direct)
Line1_ymin.append(y1)
Line2_ymax.append(y2)
Line2_ymin.append(GOF_GS[lowest_indices][0])
BB.append(
beach(
[sdr[0][0], sdr[0][1], sdr[0][2]],
xy=(depth_GS[0], y2),
width=40,
linewidth=1,
axes=ax_current,
)
)
if event_name == "S0183a":
pass
else:
BB.append(
beach(
DC_MT / M0_DC,
xy=(depth, y1),
width=40,
facecolor="r",
linewidth=1,
axes=ax_current,
)
)
Eps = np.append(Eps, Epsilon)
cond_nrs = np.append(cond_nrs, cond_nr)
ax_current.plot(depths, L2_GS, "-bo", label="Grid-Search %s" % labels[i], lw=i + 1)
if event_name == "S0183a":
pass
else:
ax_current.plot(depths, L2_Direct, "-ro", label="Direct %s" % labels[i], lw=i + 1)
if i == 0:
ax_current.axvline(x=Moho, c="grey", ls="dashed", label="Moho", lw=3)
# true_depth = 45.
if true_depth is not None:
ax_current.axvline(x=true_depth, c="green", ls="dotted", label="True Depth", lw=2)
if event_name == "S0183a":
pass
else:
ax[1].plot(depths, Eps, "--ko", label="Epsilon %s" % labels[i], lw=0.5)
if i == 0:
ax[1].axvline(x=Moho, c="grey", ls="dashed", lw=3)
if true_depth is not None:
ax[1].axvline(x=true_depth, c="green", ls="dotted", label="True Depth")
# ax[2].semilogy(depths, cond_nrs, "--ko", label="Condition number %s" % labels[i], lw=0.5)
ax[2].plot(depths, cond_nrs, "--ko", label="Condition number %s" % labels[i], lw=0.5)
ax[2].ticklabel_format(style="sci", axis="y", scilimits=(-2, 2))
# if event_name == "S0235b":
# ax[2].set_yticks([80, 100, 200, 400])
# elif event_name == "S0173a":
# ax[2].set_yticks([700, 1000, 1300])
if i == 0:
ax[2].axvline(x=Moho, c="grey", ls="dashed", lw=3)
if true_depth is not None:
ax[2].axvline(x=true_depth, c="green", ls="dotted", label="True Depth")
for iline in range(len(Line_x)):
ax_current.plot(
[Line_x[iline], Line_x[iline]],
[Line1_ymin[iline], Line1_ymax[iline]],
c="r",
ls="dashed",
alpha=0.5,
lw=0.5,
)
ax_current.plot(
[Line_x[iline], Line_x[iline]],
[Line2_ymin[iline], Line2_ymax[iline]],
c="b",
ls="dashed",
alpha=0.5,
lw=0.5,
)
for bb in BB:
ax_current.add_collection(bb)
ax_current.legend(prop={"size": 45}, loc="upper center", ncol=len(save_paths) + 1)
ax_current.set_ylabel(r"$\chi^2$", fontsize=45)
ax_current.tick_params(axis="both", which="major", labelsize=23)
ax_current.tick_params(axis="both", which="minor", labelsize=23)
ax_current.grid(True)
# ax_current.set_ylim(-0.5, 10)
if event_name == "S0183a":
pass
else:
extraticks = [0.1, 0.2, 0.3, 0.4]
ax[1].set_yticks(list(ax[1].get_yticks()) + extraticks)
# ax[1].legend(prop={"size": 15}, loc="upper right")
# ax[1].set_xlabel("Depth (km)", fontsize=20)
ax[1].set_ylabel(r"$\epsilon$", fontsize=45)
ax[1].tick_params(axis="both", which="major", labelsize=23)
ax[1].tick_params(axis="both", which="minor", labelsize=23)
ax[1].set_ylim(-0.05, 0.5)
ax[1].grid(True)
ax[2].set_xlabel("Depth (km)", fontsize=45)
ax[2].set_ylabel(r"$\kappa$", fontsize=45)
ax[2].tick_params(axis="both", which="major", labelsize=23)
ax[2].tick_params(axis="both", which="minor", labelsize=23)
# if event_name == "S0173a":
# ax[2].set_ylim(0.0, 2000.0)
ax[2].grid(True)
return fig
def plot_phases_vs_depth(
h5_file_folder: str,
method: str,
misfit_name: str,
fwd: _Forward._AbstractForward,
event: obspy.core.event.Event,
rec: instaseis.Receiver,
phases: [str],
components: [str],
t_pre: [float],
t_post: [float],
depths: [float],
phase_corrs: [float] = None,
fmin: float = None,
fmax: float = None,
zerophase: bool = None,
tstars: _Union[_List[float], _List[str]] = None,
color_plot: str = None,
pref_depth_start=[None],
pref_depth_end=[None],
):
assert method == "GS" or method == "Direct", "method needs to be either GS or Direct"
if tstars is None:
tstars = [None] * len(phases)
if phase_corrs is None:
phase_corrs = [0] * len(phases)
if (fmin == None) or (fmax == None):
print("Data will not be filtered due to fmin or fmax equal to None")
filter_par = False
else:
filter_par = True
# TODO: IMPLEMENT LQT COORDINATE SYSTEM
LQT_value = False
baz = None
inc = None
SHIFT = 2.5
""" Process the observed data """
obs_tt = []
for i, phase in enumerate(phases):
obs_tt.append(utct(event.picks[phase]) - event.origin_time + phase_corrs[i])
t_post_new = [t + SHIFT for t in t_post]
st_obs, sigmas = _PreProcess.prepare_event_data(
event=event,
phases=phases,
components=components,
slice=True,
tts=obs_tt,
t_pre=t_pre,
t_post=t_post_new,
filter=filter_par,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
noise_level=False,
)
widths = [5] * len(phases) + [1]
fig, ax = plt.subplots(
nrows=1,
ncols=len(phases) + 1,
figsize=(4 * len(phases), 4 * (len(phases) + 1)),
sharex="col",
sharey="col",
gridspec_kw={"width_ratios": widths},
)
Yticks = np.arange(len(depths) + len(pref_depth_start)) * 1.8
obs = 0
pref_depth_end_sorted = pref_depth_end.sort()
pref_depth_start.sort()
BB = []
extra_arrs = [[] for _ in range(len(depths))]
syn_tts = [[] for _ in range(len(depths))]
for idepth, depth in enumerate(depths):
print(depth)
if method == "GS":
if event.name == "S0173a":
MT_depth = 38
elif event.name == "S0235b":
MT_depth = 32 # 14
h5_file_path = pjoin(
h5_file_folder,
f"GS_{event.name}_{MT_depth}_{fmin}_{fmax}_{misfit_name}_{fwd.veloc_name}.hdf5",
)
depth_GS, sdr, M0_GS, misfit_L2_GS = _ReadH5.Read_GS_h5(
Filename=h5_file_path, amount_of_phases=5
)
Total_L2_GS = np.sum(misfit_L2_GS, axis=1)
n_lowest = 1
lowest_indices = Total_L2_GS.argsort()[0:n_lowest]
MT = sdr[lowest_indices, :][0]
# MT = [340.0, 90.0, 105.0]
print("strike", MT[0], "dip", MT[1], "rake", MT[2])
depth_GS = depth_GS[lowest_indices]
M0 = M0_GS[lowest_indices][0]
else:
h5_file_path = pjoin(
h5_file_folder,
f"Direct_{event.name}_{depth}_{fmin}_{fmax}_{misfit_name}_{fwd.veloc_name}.hdf5",
)
(
depth_Direct,
MT,
DC_MT,
CLVD_MT,
misfit_L2_Direct,
Epsilon,
M0,
M0_DC,
M0_CLVD,
angles,
cond_nr,
) = _ReadH5.Read_Direct_Inversion(h5_file_path, amount_of_phases=5)
Total_L2_Direct = np.sum(misfit_L2_Direct)
MT = DC_MT
""" Generate Green's functions per depth """
Underside_refl_src = []
Conversion_src = []
Conversion_rec = []
Reflection_phases = []
model = fwd.taup_veloc
for i, values in enumerate(model.model.s_mod.critical_depths):
if values[0] < 1.0 or values[0] > 100.0:
continue
interface = str(int(values[0]))
if i > 1:
Reflection_phases.append(
"P"
+ interface
+ "s"
+ str(int(model.model.s_mod.critical_depths[i - 1][0]))
+ "p"
)
# if values[0] > 50.0 and "TAYAK" in model_name:
# continue
for down_phase in ["p^", "s^"]:
for up_phase in ["P", "S"]:
Underside_refl_src.append(down_phase + interface + up_phase)
Conversion_src.append("S" + interface + "P")
Conversion_src.append("P" + interface + "S")
Conversion_rec.append("P" + interface + "p")
Conversion_rec.append("P" + interface + "s")
# extra_phases = Conversion_src
if event.name == "S0173a":
add = ["p^24P", "p^10P", "s^24S", "s^10S", "P10s", "P24s"]
elif event.name == "S0235b":
add = ["p^24P", "p^10P", "s^24S", "s^10S", "P10s", "P24s"]
extra_phases = ["pP", "sS", "sP", "pS", "SS", "PP", "SSS", "PPP",] + add
if not os.path.exists(os.path.join(h5_file_folder, "ray_paths")):
os.mkdir(os.path.join(h5_file_folder, "ray_paths"))
for j, extraphase in enumerate(extra_phases):
arrivals = fwd.taup_veloc.get_ray_paths(
source_depth_in_km=depth,
distance_in_degree=event.distance,
phase_list=[extraphase],
)
if arrivals:
ax_ray = arrivals.plot_rays(plot_type="cartesian", show=False, legend=True)
plt.savefig(os.path.join(h5_file_folder, "ray_paths", f"d_{depth}_{extraphase}"))
plt.close()
extra_arr = fwd.get_phase_tt(phase=extraphase, depth=depth, distance=event.distance)
if extra_arr:
extra_arrs[idepth].append(extra_arr)
else:
extra_arrs[idepth].append(extra_arr)
for i, phase in enumerate(phases):
syn_tt = fwd.get_phase_tt(phase=phase, depth=depth, distance=event.distance)
syn_tts[idepth].append(syn_tt)
syn_GF = fwd.get_greens_functions(
comp=components[i],
depth=depth,
distance=event.distance,
lat_src=event.latitude,
lon_src=event.longitude,
rec=rec,
tstar=tstars[i],
LQT=LQT_value,
inc=inc,
baz=baz,
M0=1.0,
filter=filter_par,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
)
tr_syn = fwd.generate_synthetic_data(
st_GF=syn_GF,
focal_mech=MT,
M0=M0,
slice=True,
tt=syn_tt,
t_pre=t_pre[i],
t_post=t_post[i] + SHIFT,
)
color = "k"
lw = 1
if (
all(x is None for x in pref_depth_start) is not None
and all(x is None for x in pref_depth_end) is not None
):
for d_range in range(len(pref_depth_end)):
if depth <= pref_depth_end[d_range] and depth >= pref_depth_start[d_range]:
color = "purple"
lw = 2
ytick = Yticks[idepth]
if (depth == depths[[np.abs(depths - x).argmin() + 1 for x in pref_depth_end]]).any():
obs = (
1
+ np.where(
depth == depths[[np.abs(depths - x).argmin() + 1 for x in pref_depth_end]]
)[0][0]
)
print(obs)
ytick = Yticks[idepth + obs - 1]
fig, ax[i] = Plot_phase_vs_depth_copy(
tr=st_obs[i],
depth=depth,
total_depths=len(depths) + len(pref_depth_start),
Ytick=ytick,
phase=phase,
t_pre=t_pre[i],
t_post=t_post[i],
fig=fig,
ax=ax[i],
extra_phases=None,
extra_arrs=None,
color="b",
linewidth=2,
SHIFT=SHIFT,
)
ytick = Yticks[idepth + obs]
elif (depth > depths[[np.abs(depths - x).argmin() + 1 for x in pref_depth_end]]).any():
ytick = Yticks[idepth + obs]
fig, ax[i] = Plot_phase_vs_depth_copy(
tr=tr_syn,
depth=depth,
total_depths=len(depths) + len(pref_depth_start),
Ytick=ytick,
phase=phase,
t_pre=t_pre[i],
t_post=t_post[i],
fig=fig,
ax=ax[i],
extra_phases=extra_phases,
extra_arrs=extra_arrs[idepth],
color=color,
linewidth=lw,
SHIFT=SHIFT,
)
BB.append(
beach(
MT,
xy=(0, ytick),
width=20,
linewidth=1,
alpha=0.5,
facecolor="r",
axes=ax[-1],
)
)
delta = Yticks[1] - Yticks[0]
idxs = [np.abs(depths - x).argmin() + 1 for x in pref_depth_end]
depths_ins = depths
for idx_i, idx in enumerate(idxs):
depths_ins = np.insert(depths_ins, idx + idx_i, 70)
for idx_i, idx in enumerate(idxs):
for i, phase in enumerate(phases):
if idx_i > 0:
fill_zero = idxs[idx_i]
else:
fill_zero = 0
if idx_i == len(idxs) - 1:
fill_one = -1
fill_zero = idxs[idx_i] + len(idxs)
else:
fill_one = idxs[idx_i + 1]
if phase == "P":
ymax = ax[i].get_ylim()[1]
if event.name == "S0173a":
if idx_i == 0:
ax[i].fill(
[17 - SHIFT, 40 - SHIFT, 40 - SHIFT, 17 - SHIFT],
[
Yticks[fill_zero] - 0.4 * delta,
Yticks[fill_zero] - 0.4 * delta,
Yticks[idx - (idx_i + 1)] + 0.4 * delta,
Yticks[idx - (idx_i + 1)] + 0.4 * delta,
],
facecolor="green",
alpha=0.2,
)
ax[i].fill(
[17 - SHIFT, 40 - SHIFT, 40 - SHIFT, 17 - SHIFT],
[
Yticks[idx + (idx_i + 1)] - 0.4 * delta,
Yticks[idx + (idx_i + 1)] - 0.4 * delta,
Yticks[fill_one] + 0.4 * delta,
Yticks[fill_one] + 0.4 * delta,
],
facecolor="green",
alpha=0.2,
)
ax[i].text(
17,
ymax * 0.8,
"Glitch",
verticalalignment="center",
color="green",
fontsize=8,
)
# fig, ax[i] = Plot_phase_vs_depth_copy(
# tr=st_obs[i],
# depth=depth,
# total_depths=len(depths) + 1,
# Ytick=Yticks[idepth + 1],
# phase=phase,
# t_pre=t_pre[i],
# t_post=t_post[i],
# fig=fig,
# ax=ax[i],
# extra_phases=None,
# extra_arrs=None,
# color="b",
# )
# ax[i].axvline(0.0, c="dimgrey")
if idx_i == 0:
ax[i].plot(
[0, 0],
[Yticks[fill_zero] - 0.4 * delta, Yticks[idx - 1] + 0.4 * delta],
"dimgrey",
lw=1,
)
ax[i].text(
0.1,
Yticks[idx - 1] + 0.4 * delta,
phase,
verticalalignment="center",
color="dimgrey",
fontsize=15,
weight="bold",
)
ax[i].plot(
[0, 0],
[Yticks[idx + (idx_i + 1)] - 0.4 * delta, Yticks[fill_one] + 0.4 * delta],
"dimgrey",
lw=1,
)
ax[i].text(
0.1,
Yticks[-1] + 0.4 * delta,
phase,
verticalalignment="center",
color="dimgrey",
fontsize=15,
weight="bold",
)
for k in range(len(extra_phases)):
""" Below observed plot: """
if idx_i == 0:
syn_tt_depth = np.asarray(
[arr[i] for arr in syn_tts[fill_zero:idx]], dtype=np.float
)
# syn_tt_depth = np.asarray([arr[i] for arr in syn_tts], dtype=np.float)
x = np.asarray([arr[k] for arr in extra_arrs[fill_zero:idx]], dtype=np.float)
x = x - syn_tt_depth
if np.any(x > t_post[i]):
x[x > t_post[i]] = None
if np.any(x < -t_pre[i]):
x[x < -t_pre[i]] = None
# y = np.asarray(Yticks[:-1])
y = np.asarray(Yticks[fill_zero:idx])
y = y[~np.isnan(x)]
x = x[~np.isnan(x)]
if x.size == 0:
continue
rotn = np.degrees(np.arctan(y[-1:] - y[-2:-1], x[-1:] - x[-2:-1]))
if rotn.size == 0:
trans_angle = 90
ax[i].plot(
[x[0], x[0]], [y[0] - 0.4 * delta, y[0] + 0.4 * delta], "dimgrey", lw=1
)
else:
l2 = np.array((x[-1], y[-1]))
rotation = rotn[-1]
trans_angle = plt.gca().transData.transform_angles(
np.array((rotation,)), l2.reshape((1, 2))
)[0]
ax[i].plot(x, y, "-", c="dimgrey", lw=1)
ax[i].text(
x[-1],
y[-1],
extra_phases[k],
verticalalignment="center",
color="dimgrey",
fontsize=15,
rotation=trans_angle,
weight="bold",
)
""" Above observed plot"""
if idx_i == len(idxs) - 1:
syn_tt_depth = np.asarray([arr[i] for arr in syn_tts[idx:]], dtype=np.float)
# syn_tt_depth = np.asarray([arr[i] for arr in syn_tts], dtype=np.float)
x = np.asarray([arr[k] for arr in extra_arrs[idx:]], dtype=np.float)
else:
syn_tt_depth = np.asarray(
[arr[i] for arr in syn_tts[idx:fill_one]], dtype=np.float
)
# syn_tt_depth = np.asarray([arr[i] for arr in syn_tts], dtype=np.float)
x = np.asarray([arr[k] for arr in extra_arrs[idx:fill_one]], dtype=np.float)
x = x - syn_tt_depth
if np.any(x > t_post[i]):
x[x > t_post[i]] = None
if np.any(x < -t_pre[i]):
x[x < -t_pre[i]] = None
print(idx_i)
if idx_i == len(idxs) - 1:
y = np.asarray(Yticks[idx + (idx_i + 1) :])
else:
y = np.asarray(Yticks[idx + (idx_i + 1) : fill_one + (idx_i + 1)])
y = y[~np.isnan(x)]
x = x[~np.isnan(x)]
if x.size == 0:
continue
rotn = np.degrees(np.arctan(y[-1:] - y[-2:-1], x[-1:] - x[-2:-1]))
if rotn.size == 0:
trans_angle = 90
ax[i].plot(
[x[0], x[0]], [y[0] - 0.4 * delta, y[0] + 0.4 * delta], "dimgrey", lw=1
)
else:
l2 = np.array((x[-1], y[-1]))
rotation = rotn[-1]
trans_angle = plt.gca().transData.transform_angles(
np.array((rotation,)), l2.reshape((1, 2))
)[0]
ax[i].plot(x, y, "-", c="dimgrey", lw=1)
ax[i].text(
x[-1],
y[-1],
extra_phases[k],
verticalalignment="center",
color="dimgrey",
fontsize=15,
rotation=trans_angle,
weight="bold",
)
# depths_ins = np.insert(depths, idx + idx_i, 70)
ax[i].yaxis.set_ticks(Yticks)
ax[i].set_yticklabels(depths_ins)
yticks = ax[i].yaxis.get_major_ticks()
yticks[idx + idx_i].set_visible(False)
ax[i].set_ylim(Yticks[0] - delta, Yticks[-1] + delta)
if not i == 0:
ax[i].get_yaxis().set_visible(False)
# # for i in range(len(phases)):
ax[-1].yaxis.set_ticks(Yticks)
ax[-1].set_yticklabels(depths_ins)
# yticks = ax[-1].yaxis.get_major_ticks()
# yticks[idx].set_visible(False)
ax[-1].set_ylim(Yticks[0] - delta, Yticks[-1] + delta)
ax[-1].axis("off")
ax[-1].set_xlim(-0.15, 0.15)
for bb in BB:
ax[-1].add_collection(bb)
fig.text(
0.5,
0.95,
f"Event {event.name}",
ha="center",
va="bottom",
size="x-large",
color="blue",
fontsize=25,
)
fig.text(0.04, 0.5, "Source Depth (km)", va="center", rotation="vertical", fontsize=25)
fig.text(0.5, 0.04, "Time after arrival (s)", va="center", fontsize=25)
return fig
def Plot_phase_vs_depth_copy(
tr: obspy.Trace,
depth: float,
total_depths: int,
Ytick: float,
phase: str,
t_pre: float = 10.0,
t_post: float = 50.0,
fig: plt.figure = None,
ax: plt.axes = None,
extra_phases: [str] = None,
extra_arrs: [float] = None,
color: str = None,
linewidth: float = 1,
SHIFT: float = 0.0,
):
if color is None:
color = "k"
st = tr.copy()
# norm = st.slice(
# starttime=st.stats.starttime, endtime=st.stats.starttime + 7.0
# ).max() # TODO: this value needs to come from the function
# st.data = st.data / norm
st.normalize()
global_max = st.data.max()
global_min = st.data.min()
y = global_max * 0.9 + Ytick
ymin = global_min + Ytick
ymax = global_max + Ytick
ax.plot(st.times() - t_pre - SHIFT, st.data + Ytick, color, lw=linewidth)
# ax.plot(
# [0, 0], [ymin, ymax], "grey",
# )
# ax.text(0, y, phase, verticalalignment="center", color="grey", fontsize=6)
if extra_phases is not None:
for k in range(len(extra_phases)):
if extra_arrs[k] is None or extra_arrs[k] > t_post or extra_arrs[k] < -t_pre:
continue
phase_t = extra_arrs[k]
y = global_max * 0.9 + Ytick
c = "grey"
ax.plot(
[phase_t, phase_t], [ymin, ymax], c,
)
ax.text(
phase_t + 0.1,
y,
extra_phases[k],
verticalalignment="center",
color=c,
fontsize=6,
rotation=90,
)
ax.set_xlim(-t_pre, t_post)
ax.set_ylim(ymin, ymax)
ax.set_title(f"{phase}-Phase channel:{st.stats.channel[-1]}")
# fig.legend(handles=unique_list, prop={"size": 6}, loc="upper left")
return fig, ax
def post_waveform_plotting(
h5_file_folder: str,
method: str,
misfit_name: str,
misfit_weight_len: float,
fwd: _Forward._AbstractForward,
event: obspy.core.event.Event,
rec: instaseis.Receiver,
phases: [str],
components: [str],
t_pre: [float],
t_post: [float],
depths: [float],
phase_corrs: [float] = None,
fmin: float = None,
fmax: float = None,
zerophase: bool = None,
tstars: _Union[_List[float], _List[str]] = None,
plot_extra_phases: [str] = None,
Ylims: [float] = None,
Return_Fig: bool = False,
):
if tstars is None:
tstars = [None] * len(phases)
if phase_corrs is None:
phase_corrs = [0] * len(phases)
if (fmin == None) or (fmax == None):
print("Data will not be filtered due to fmin or fmax equal to None")
filter_par = False
else:
filter_par = True
# TODO: IMPLEMENT LQT COORDINATE SYSTEM
LQT_value = False
baz = None
inc = None
""" PRE-PROCESS THE OBSERVED Travel_times """
obs_tt = []
for i, phase in enumerate(phases):
obs_tt.append(utct(event.picks[phase]) - event.origin_time + phase_corrs[i])
for idepth, depth in enumerate(depths):
print(depth)
if method == "GS":
color_plot = "b"
h5_file_path = pjoin(
h5_file_folder,
f"GS_{event.name}_{depth}_{fmin}_{fmax}_{misfit_name}_{fwd.veloc_name}.hdf5",
)
depth_GS, sdr, M0_GS, misfit_L2_GS = _ReadH5.Read_GS_h5(
Filename=h5_file_path, amount_of_phases=len(phases)
)
Total_L2_GS = np.sum(misfit_L2_GS, axis=1)
lowest_ind = Total_L2_GS.argsort()
Total_L2_GS.sort()
misfit_low = Total_L2_GS[:] - Total_L2_GS[0]
uncert = 0.05 * Total_L2_GS[0]
inds = np.where(misfit_low < uncert)
lowest_indices = lowest_ind[inds][0:1]
print(np.sum(misfit_L2_GS, axis=1)[lowest_indices])
# n_lowest = int(len(Total_L2_GS) * 0.05)
# lowest_indices = Total_L2_GS.argsort()[0:n_lowest:50]
# n_lowest = 10
# lowest_indices = Total_L2_GS.argsort()[0:n_lowest]
MT = sdr[lowest_indices, :]
depth_GS = depth_GS[lowest_indices]
M0 = M0_GS[lowest_indices]
""" Calculate take-off angles"""
takeoff_angles = ["P", "S", "pP"]
angles = []
for phase in takeoff_angles:
angles.append(
fwd.get_phase_tt(
phase=phase, depth=depth, distance=event.distance, takeoffs=True
)
)
""" Beachball plot """
fig = Plot_GS_BB(
MT[:, 0],
MT[:, 1],
MT[:, 2],
azimuths=[event.az, event.az, event.az],
inc_angles=angles,
phase_names=takeoff_angles,
color=color_plot,
)
plt.savefig(
pjoin(
h5_file_folder,
f"GS_BBB__{event.name}_{depth}_{misfit_name}_{fwd.veloc_name}_Post.pdf",
),
dpi=300,
)
plt.close()
else:
h5_file_path = pjoin(
h5_file_folder,
f"Direct_{event.name}_{depth}_{fmin}_{fmax}_{misfit_name}_{fwd.veloc_name}.hdf5",
)
(
depth_Direct,
MT,
DC_MT,
CLVD_MT,
misfit_L2_Direct,
Epsilon,
M0,
M0_DC,
M0_CLVD,
angles,
cond_nr,
) = _ReadH5.Read_Direct_Inversion(h5_file_path, amount_of_phases=5)
Total_L2_Direct = np.sum(misfit_L2_Direct)
color_plot = "r"
MT_FULL_ = MT # np.array([MT[0], MT[2], MT[1], MT[4], MT[3], MT[5],])
DC_MT_ = (
DC_MT # np.array([DC_MT[0], DC_MT[2], DC_MT[1], DC_MT[4], DC_MT[3], DC_MT[5],])
)
CLVD_MT_ = CLVD_MT # np.array(
# [CLVD_MT[0], CLVD_MT[2], CLVD_MT[1], CLVD_MT[4], CLVD_MT[3], CLVD_MT[5],]
# )
fig = Plot_Direct_BB(
MT_Full=MT_FULL_ / M0,
Eps=Epsilon,
MT_DC=DC_MT_ / M0_DC,
M0_DC=M0_DC,
MT_CLVD=CLVD_MT_ / M0_CLVD,
M0_CLVD=M0_CLVD,
azimuths=[event.az, event.az, event.az],
inc_angles=angles,
phase_names=["P", "S", "pP"],
color=color_plot,
height=19.0,
horizontal=True,
)
plt.savefig(
pjoin(
h5_file_folder,
f"Direct_BB_{event.name}_{depth}_{misfit_name}_{fwd.veloc_name}_Post.pdf",
),
dpi=300,
)
plt.close()
MT = np.expand_dims(DC_MT, axis=0)
M0 = np.expand_dims(M0_DC, axis=0)
# """ Generate Green's functions per depth """
# syn_tts = []
# syn_GFs = []
# for i, phase in enumerate(phases):
# syn_tt = fwd.get_phase_tt(phase=phase, depth=depth, distance=event.distance)
# syn_GF = fwd.get_greens_functions(
# comp=components[i],
# depth=depth,
# distance=event.distance,
# lat_src=event.latitude,
# lon_src=event.longitude,
# rec=rec,
# tstar=tstars[i],
# LQT=LQT_value,
# inc=inc,
# baz=baz,
# M0=1.0,
# filter=filter_par,
# fmin=fmin,
# fmax=fmax,
# zerophase=zerophase,
# )
# syn_GFs.append(syn_GF)
# syn_tts.append(syn_tt)
# if plot_extra_phases is not None:
# extra_arrs = []
# for j, extraphase in enumerate(plot_extra_phases):
# arr = fwd.get_phase_tt(phase=extraphase, depth=depth, distance=event.distance)
# extra_arrs.append(arr)
# else:
# extra_arrs = None
# fig = waveform_plot(
# syn_GFs=syn_GFs,
# syn_tts=syn_tts,
# obs_tts=obs_tt,
# fwd=fwd,
# misfit_weight_len=misfit_weight_len,
# event=event,
# phases=phases,
# components=components,
# t_pre=t_pre,
# t_post=t_post,
# MTs=MT,
# M0s=M0,
# fmin=fmin,
# fmax=fmax,
# zerophase=zerophase,
# plot_extra_phases=plot_extra_phases,
# extra_arrs=extra_arrs,
# color_plot=color_plot,
# Ylims=Ylims,
# )
# if Return_Fig:
# return fig
# else:
# plt.savefig(
# pjoin(
# h5_file_folder,
# f"{method}_waveforms_{event.name}_{depth}_{misfit_name}_{fwd.veloc_name}_Post.pdf",
# ),
# dpi=300,
# )
# plt.close()
def post_waveform_plotting_COMBINED(
h5_file_folder: str,
misfit_name: str,
misfit_weight_len: float,
fwd: _Forward._AbstractForward,
event: obspy.core.event.Event,
rec: instaseis.Receiver,
phases: [str],
components: [str],
t_pre: [float],
t_post: [float],
depths: [float],
phase_corrs: [float] = None,
fmin: float = None,
fmax: float = None,
zerophase: bool = None,
tstars: _Union[_List[float], _List[str]] = None,
plot_extra_phases: [str] = None,
Ylims: [float] = None,
Return_Fig: bool = False,
):
if tstars is None:
tstars = [None] * len(phases)
if phase_corrs is None:
phase_corrs = [0] * len(phases)
if (fmin == None) or (fmax == None):
print("Data will not be filtered due to fmin or fmax equal to None")
filter_par = False
else:
filter_par = True
# TODO: IMPLEMENT LQT COORDINATE SYSTEM
LQT_value = False
baz = None
inc = None
""" PRE-PROCESS THE OBSERVED Travel_times """
obs_tt = []
for i, phase in enumerate(phases):
obs_tt.append(utct(event.picks[phase]) - event.origin_time + phase_corrs[i])
for idepth, depth in enumerate(depths):
print(depth)
color_plot = "b"
h5_file_path = pjoin(
h5_file_folder,
f"GS_{event.name}_{depth}_{fmin}_{fmax}_{misfit_name}_{fwd.veloc_name}.hdf5",
)
depth_GS, sdr, M0_GS, misfit_L2_GS = _ReadH5.Read_GS_h5(
Filename=h5_file_path, amount_of_phases=5
)
Total_L2_GS = np.sum(misfit_L2_GS, axis=1)
lowest_ind = Total_L2_GS.argsort()
Total_L2_GS.sort()
misfit_low = Total_L2_GS[:] - Total_L2_GS[0]
uncert = 0.05 * Total_L2_GS[0]
inds = np.where(misfit_low < uncert)
lowest_indices = lowest_ind[inds][:50]
# n_lowest = int(len(Total_L2_GS) * 0.05)
# lowest_indices = Total_L2_GS.argsort()[0:n_lowest:50]
# n_lowest = 10
# lowest_indices = Total_L2_GS.argsort()[0:n_lowest]
MT = sdr[lowest_indices, :]
depth_GS = depth_GS[lowest_indices]
M0 = M0_GS[lowest_indices]
""" Calculate take-off angles"""
takeoff_angles = ["P", "S", "pP"]
angles = []
for phase in takeoff_angles:
angles.append(
fwd.get_phase_tt(phase=phase, depth=depth, distance=event.distance, takeoffs=True)
)
""" Beachball plot """
fig = Plot_GS_BB(
MT[:, 0],
MT[:, 1],
MT[:, 2],
azimuths=[event.az, event.az, event.az],
inc_angles=angles,
phase_names=takeoff_angles,
color=color_plot,
)
plt.savefig(
pjoin(
h5_file_folder,
f"GS_BBB__{event.name}_{depth}_{misfit_name}_{fwd.veloc_name}_Post.svg",
),
dpi=300,
)
plt.close()
""" Generate Green's functions per depth """
syn_tts = []
syn_GFs = []
for i, phase in enumerate(phases):
syn_tt = fwd.get_phase_tt(phase=phase, depth=depth, distance=event.distance)
syn_GF = fwd.get_greens_functions(
comp=components[i],
depth=depth,
distance=event.distance,
lat_src=event.latitude,
lon_src=event.longitude,
rec=rec,
tstar=tstars[i],
LQT=LQT_value,
inc=inc,
baz=baz,
M0=1.0,
filter=filter_par,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
)
syn_GFs.append(syn_GF)
syn_tts.append(syn_tt)
if plot_extra_phases is not None:
extra_arrs = []
for j, extraphase in enumerate(plot_extra_phases):
arr = fwd.get_phase_tt(phase=extraphase, depth=depth, distance=event.distance)
extra_arrs.append(arr)
else:
extra_arrs = None
fig, ax = waveform_plot(
syn_GFs=syn_GFs,
syn_tts=syn_tts,
obs_tts=obs_tt,
fwd=fwd,
misfit_weight_len=misfit_weight_len,
event=event,
phases=phases,
components=components,
t_pre=t_pre,
t_post=t_post,
MTs=MT,
M0s=M0,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
plot_extra_phases=plot_extra_phases,
extra_arrs=extra_arrs,
color_plot=color_plot,
Ylims=Ylims,
)
""" DIRECT: """
h5_file_path = pjoin(
h5_file_folder,
f"Direct_{event.name}_{depth}_{fmin}_{fmax}_{misfit_name}_{fwd.veloc_name}.hdf5",
)
(
depth_Direct,
MT,
DC_MT,
CLVD_MT,
misfit_L2_Direct,
Epsilon,
M0,
M0_DC,
M0_CLVD,
angles,
cond_nr,
) = _ReadH5.Read_Direct_Inversion(h5_file_path, amount_of_phases=5)
Total_L2_Direct = np.sum(misfit_L2_Direct)
color_plot = "r"
MT_FULL_ = MT # np.array([MT[0], MT[2], MT[1], MT[4], MT[3], MT[5],])
DC_MT_ = DC_MT # np.array([DC_MT[0], DC_MT[2], DC_MT[1], DC_MT[4], DC_MT[3], DC_MT[5],])
CLVD_MT_ = CLVD_MT # np.array(
# [CLVD_MT[0], CLVD_MT[2], CLVD_MT[1], CLVD_MT[4], CLVD_MT[3], CLVD_MT[5],]
# )
fig = Plot_Direct_BB(
MT_Full=MT_FULL_ / M0,
Eps=Epsilon,
MT_DC=DC_MT_ / M0_DC,
M0_DC=M0_DC,
MT_CLVD=CLVD_MT_ / M0_CLVD,
M0_CLVD=M0_CLVD,
azimuths=[event.az, event.az, event.az],
inc_angles=angles,
phase_names=["P", "S", "pP"],
color=color_plot,
height=19.0,
horizontal=True,
)
plt.savefig(
pjoin(
h5_file_folder,
f"Direct_BB_{event.name}_{depth}_{misfit_name}_{fwd.veloc_name}_Post.svg",
),
dpi=300,
)
plt.close()
MT = np.expand_dims(DC_MT, axis=0)
M0 = np.expand_dims(M0_DC, axis=0)
fig = waveform_plot(
syn_GFs=syn_GFs,
syn_tts=syn_tts,
obs_tts=obs_tt,
fwd=fwd,
misfit_weight_len=misfit_weight_len,
event=event,
phases=phases,
components=components,
t_pre=t_pre,
t_post=t_post,
MTs=MT,
M0s=M0,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
plot_extra_phases=plot_extra_phases,
extra_arrs=extra_arrs,
color_plot=color_plot,
Ylims=Ylims,
fig=fig,
ax=ax,
)
if Return_Fig:
return fig
else:
plt.savefig(
pjoin(
h5_file_folder,
f"COMBINED_waveforms_{event.name}_{depth}_{misfit_name}_{fwd.veloc_name}_Post.svg",
),
dpi=300,
)
plt.close()
def waveform_plot_copy(
syn_GFs: obspy.Stream,
syn_tts: [float],
obs_tts: [float],
fwd: _Forward._AbstractForward,
misfit_weight_len: float,
event: obspy.core.event.Event,
phases: [str],
components: [float],
t_pre: [float],
t_post: [float],
MTs: _List[float],
M0s: [float],
depth: float,
rec: instaseis.Receiver,
tstar: _Union[float, str],
phase_corrs: [float],
fmin: float = None,
fmax: float = None,
zerophase: bool = None,
plot_extra_phases: [str] = None,
extra_arrs: [float] = None,
color_plot: str = None,
Ylims: [float] = None,
fig: [bool] = None,
ax: [bool] = None,
):
""" Waveform plot """
if (fmin == None) or (fmax == None):
print("Data will not be filtered due to fmin or fmax equal to None")
filter_par = False
else:
filter_par = True
inv_phases = len(phases)
""" Prepare all phases for the plot """
all_phase_combs = set(["PZ", "PR", "SZ", "SR", "ST"])
pc = set([p + c for p, c in zip(phases, components)])
PorS = set([phase[0] for phase in pc])
missing = all_phase_combs - pc
phases_to_plot = []
comps_to_plot = []
corrs_missing = []
tstar_missing = []
t_pres_missing = []
t_posts_missing = []
Ylims_missing = []
for missing_phase in missing:
if missing_phase[0] in PorS:
corrs_missing.append(phase_corrs[phases.index(missing_phase[0])])
tstar_missing.append(tstar[phases.index(missing_phase[0])])
t_pres_missing.append(t_pre[phases.index(missing_phase[0])])
t_posts_missing.append(t_post[phases.index(missing_phase[0])])
Ylims_missing.append(Ylims[phases.index(missing_phase[0])])
phases_to_plot.append(missing_phase[0])
comps_to_plot.append(missing_phase[1])
""" Add missing observed arrival times"""
for i, phase in enumerate(phases_to_plot):
obs_tts.append(utct(event.picks[phase]) - event.origin_time + corrs_missing[i])
""" Add missing synthetic GF """
for i, phase in enumerate(phases_to_plot):
syn_tt = fwd.get_phase_tt(phase=phase, depth=depth, distance=event.distance)
syn_GF = fwd.get_greens_functions(
comp=comps_to_plot[i],
depth=depth,
distance=event.distance,
lat_src=event.latitude,
lon_src=event.longitude,
rec=rec,
tstar=tstar_missing[i],
LQT=False,
inc=None,
baz=None,
M0=1,
filter=filter_par,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
)
syn_GFs.append(syn_GF)
syn_tts.append(syn_tt)
phases_to_plot = phases + phases_to_plot
comps_to_plot = components + comps_to_plot
t_pres_missing = t_pre + t_pres_missing
t_posts_missing = t_post + t_posts_missing
Ylims_missing = Ylims + Ylims_missing
if fig is None and ax is None:
# fig, ax = plt.subplots(nrows=len(phases), ncols=1, sharex="all", figsize=(18, 16))
fig, ax = plt.subplots(nrows=len(phases_to_plot), ncols=1, sharex="all", figsize=(18, 20))
for i, phase in enumerate(phases_to_plot):
for n in range(len(M0s)):
tr_syn_full = fwd.generate_synthetic_data(
st_GF=syn_GFs[i], focal_mech=MTs[n], M0=M0s[n], slice=False,
)
tr_slice = tr_syn_full.slice(
starttime=fwd.or_time + syn_tts[i] - t_pres_missing[i],
endtime=fwd.or_time + syn_tts[i] + t_posts_missing[i],
)
if i < inv_phases:
if n == 0:
if color_plot == "blue":
ax[i].plot(
tr_slice.times() - t_pres_missing[i],
tr_slice.data,
lw=2,
c=color_plot,
label="Synthetic (Grid-search)",
)
else:
ax[i].plot(
tr_slice.times() - t_pres_missing[i],
tr_slice.data,
lw=3,
c=color_plot,
label="Synthetic (Direct)",
)
else:
ax[i].plot(
tr_slice.times() - t_pres_missing[i], tr_slice.data, lw=2, c=color_plot,
)
ax[i].plot(
tr_syn_full.times() - (syn_tts[i] - fwd.start_cut),
tr_syn_full.data,
lw=1,
c=color_plot,
)
# ax[i].legend()
st = obspy.Stream()
st += tr_slice
if n == len(M0s) - 1:
st_obs_full, sigmasPLOT = _PreProcess.prepare_event_data(
event=event,
phases=phases_to_plot,
components=comps_to_plot,
slice=False,
filter=filter_par,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
noise_level=False,
)
st_obs = st_obs_full.slice(
starttime=fwd.or_time + obs_tts[i] - t_pres_missing[i],
endtime=fwd.or_time + obs_tts[i] + t_posts_missing[i],
)
ax[i].plot(
st_obs_full[i].times() - obs_tts[i], st_obs_full[i].data, lw=1, c="k",
)
if i < inv_phases:
ax[i].plot(
st_obs[i].times() - t_pres_missing[i],
st_obs[i].data,
lw=3,
c="k",
label="Observed",
)
st += st_obs[i]
if Ylims_missing is None:
ax[i].set_ylim(global_min, global_max)
else:
ax[i].set_ylim(-Ylims_missing[i], Ylims_missing[i])
global_max = max([tr.data.max() for tr in st]) * 1.2
global_min = min([tr.data.min() for tr in st]) * 1.2
if i < inv_phases:
for axis in ["top", "bottom", "left", "right"]:
ax[i].spines[axis].set_linewidth(5)
ax[i].axvline(x=t_posts_missing[i], c="grey", ls="dashed")
ax[i].axvline(x=-t_pres_missing[i], c="grey", ls="dashed")
ax[i].axvspan(
-t_pres_missing[i], misfit_weight_len, facecolor="grey", alpha=0.3
)
ax[i].axvspan(
misfit_weight_len, t_posts_missing[i], facecolor="grey", alpha=0.1
)
ymin = ax[i].get_ylim()[0]
ymax = ax[i].get_ylim()[1]
if event.name == "S0173a" and phase == "P":
ax[i].axvspan(
t_posts_missing[i], t_posts_missing[i] + 23, facecolor="green", alpha=0.1
)
ax[i].text(
29,
ymin * 0.8,
"Glitch",
verticalalignment="center",
color="green",
fontsize=35,
)
ax[i].axvline(x=0.0, c="dimgrey", lw=2)
ax[i].text(
0 + 0.1,
ymax * 0.8,
phase,
verticalalignment="center",
color="dimgray",
fontsize=30,
)
ax[i].text(
s="%s%s" % (phases_to_plot[i], comps_to_plot[i]),
x=0.99,
y=0.75,
ha="right",
transform=ax[i].transAxes,
color=color_plot,
fontsize=40,
)
# Extra phase arrivals:
if plot_extra_phases is not None:
for j, extraphase in enumerate(plot_extra_phases):
arr = extra_arrs[j]
if arr:
if arr - syn_tts[i] > 0 and arr - syn_tts[i] < 31:
ax[i].axvline(x=arr - syn_tts[i], c="dimgrey", lw=2)
ax[i].text(
arr - syn_tts[i] + 0.1,
ymax * 0.75,
extraphase,
verticalalignment="center",
color="dimgrey",
fontsize=30,
rotation=90,
)
ax[i].tick_params(axis="both", which="major", labelsize=35)
ax[i].tick_params(axis="both", which="minor", labelsize=25)
ax[i].get_yaxis().get_offset_text().set_visible(False)
ax_max = max(ax[i].get_yticks())
exponent_axis = np.floor(np.log10(ax_max)).astype(int)
# ax[i].annotate(
# r"$\times$10$^{%i}$" % (exponent_axis),
# xy=(0.01, 0.75),
# xycoords="axes fraction",
# fontsize=32,
# )
fig.text(
0.9,
0.88,
"M0: %.2e" % (M0s[0]),
ha="right",
va="bottom",
size="medium",
color="black",
fontsize=40,
)
fig.text(0.01, 0.5, "Displacement (nm)", va="center", rotation="vertical", fontsize=45)
fig.text(
0.5,
0.88,
event.name,
ha="center",
va="bottom",
size="x-large",
color=color_plot,
fontsize=45,
)
ax[0].legend(
prop={"size": 35},
loc="center left",
bbox_to_anchor=(0.12, 0.93),
bbox_transform=fig.transFigure,
)
ax[-1].set_xlim(-10.0, 32.0)
ax[-1].set_xlabel("time after phase (s)", fontsize=45)
return fig, ax
def waveform_plot(
syn_GFs: obspy.Stream,
syn_tts: [float],
obs_tts: [float],
fwd: _Forward._AbstractForward,
misfit_weight_len: float,
event: obspy.core.event.Event,
phases: [str],
components: [float],
t_pre: [float],
t_post: [float],
MTs: _List[float],
M0s: [float],
fmin: float = None,
fmax: float = None,
zerophase: bool = None,
plot_extra_phases: [str] = None,
extra_arrs: [float] = None,
color_plot: str = None,
Ylims: [float] = None,
fig: [bool] = None,
ax: [bool] = None,
):
""" Waveform plot """
if (fmin == None) or (fmax == None):
print("Data will not be filtered due to fmin or fmax equal to None")
filter_par = False
else:
filter_par = True
if fig is None and ax is None:
# fig, ax = plt.subplots(nrows=len(phases), ncols=1, sharex="all", figsize=(18, 16))
fig, ax = plt.subplots(nrows=len(phases), ncols=1, sharex="all", figsize=(18, 20))
for i, phase in enumerate(phases):
for n in range(len(M0s)):
tr_syn_full = fwd.generate_synthetic_data(
st_GF=syn_GFs[i], focal_mech=MTs[n], M0=M0s[n], slice=False,
)
tr_slice = tr_syn_full.slice(
starttime=fwd.or_time + syn_tts[i] - t_pre[i],
endtime=fwd.or_time + syn_tts[i] + t_post[i],
)
if n == 0:
if color_plot == "b":
ax[i].plot(
tr_slice.times() - t_pre[i],
tr_slice.data,
lw=2,
c=color_plot,
label="Synthetic (Grid-search)",
)
else:
ax[i].plot(
tr_slice.times() - t_pre[i],
tr_slice.data,
lw=3,
c=color_plot,
label="Synthetic (Direct)",
)
else:
ax[i].plot(
tr_slice.times() - t_pre[i], tr_slice.data, lw=2, c=color_plot,
)
ax[i].plot(
tr_syn_full.times() - (syn_tts[i] - fwd.start_cut),
tr_syn_full.data,
lw=1,
c=color_plot,
)
# ax[i].legend()
st = obspy.Stream()
st += tr_slice
if n == len(M0s) - 1:
st_obs_full, sigmasPLOT = _PreProcess.prepare_event_data(
event=event,
phases=phases,
components=components,
slice=False,
filter=filter_par,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
noise_level=False,
)
st_obs = st_obs_full.slice(
starttime=fwd.or_time + obs_tts[i] - t_pre[i],
endtime=fwd.or_time + obs_tts[i] + t_post[i],
)
ax[i].plot(
st_obs_full[i].times() - obs_tts[i], st_obs_full[i].data, lw=1, c="k",
)
ax[i].plot(
st_obs[i].times() - t_pre[i], st_obs[i].data, lw=3, c="k", label="Observed",
)
st += st_obs[i]
if Ylims is None:
ax[i].set_ylim(global_min, global_max)
else:
ax[i].set_ylim(-Ylims[i], Ylims[i])
global_max = max([tr.data.max() for tr in st]) * 1.2
global_min = min([tr.data.min() for tr in st]) * 1.2
ax[i].axvline(x=t_post[i], c="grey", ls="dashed")
ax[i].axvline(x=-t_pre[i], c="grey", ls="dashed")
ax[i].axvspan(-t_pre[i], misfit_weight_len, facecolor="grey", alpha=0.2)
ymin = ax[i].get_ylim()[0]
ymax = ax[i].get_ylim()[1]
if event.name == "S0173a" and phase == "P":
ax[i].axvspan(t_post[i], t_post[i] + 23, facecolor="green", alpha=0.1)
ax[i].text(
29,
ymin * 0.8,
"Glitch",
verticalalignment="center",
color="green",
fontsize=35,
)
ax[i].axvline(x=0.0, c="dimgrey", lw=2)
ax[i].text(
0 + 0.1,
ymax * 0.8,
phase,
verticalalignment="center",
color="dimgray",
fontsize=30,
)
ax[i].text(
s="%s%s" % (phases[i], components[i]),
x=0.99,
y=0.75,
ha="right",
transform=ax[i].transAxes,
color=color_plot,
fontsize=40,
)
# Extra phase arrivals:
if plot_extra_phases is not None:
for j, extraphase in enumerate(plot_extra_phases):
arr = extra_arrs[j]
if arr:
if arr - syn_tts[i] > 0 and arr - syn_tts[i] < 31:
ax[i].axvline(x=arr - syn_tts[i], c="dimgrey", lw=2)
ax[i].text(
arr - syn_tts[i] + 0.1,
ymax * 0.75,
extraphase,
verticalalignment="center",
color="dimgrey",
fontsize=30,
rotation=90,
)
ax[i].tick_params(axis="both", which="major", labelsize=35)
ax[i].tick_params(axis="both", which="minor", labelsize=25)
ax[i].get_yaxis().get_offset_text().set_visible(False)
ax_max = max(ax[i].get_yticks())
exponent_axis = np.floor(np.log10(ax_max)).astype(int)
# ax[i].annotate(
# r"$\times$10$^{%i}$" % (exponent_axis),
# xy=(0.01, 0.75),
# xycoords="axes fraction",
# fontsize=32,
# )
fig.text(
0.9,
0.88,
"M0: %.2e" % (M0s[0]),
ha="right",
va="bottom",
size="medium",
color="black",
fontsize=40,
)
fig.text(0.01, 0.5, "Displacement (nm)", va="center", rotation="vertical", fontsize=45)
fig.text(
0.5,
0.88,
event.name,
ha="center",
va="bottom",
size="x-large",
color=color_plot,
fontsize=45,
)
ax[0].legend(
prop={"size": 35},
loc="center left",
bbox_to_anchor=(0.12, 0.93),
bbox_transform=fig.transFigure,
)
ax[-1].set_xlim(-10.0, 32.0)
ax[-1].set_xlabel("time after phase (s)", fontsize=45)
return fig, ax
def Source_Uncertainty_OLD(
h5_file_folder: str,
event_name: str,
method: str,
misfit_name: str,
fwd: _Forward._AbstractForward,
phases: [str],
components: [str],
depths: [float],
DOF: float,
fmin: float = None,
fmax: float = None,
):
for idepth, depth in enumerate(depths):
print(depth)
# if method == "GS":
color_plot = "b"
h5_file_path = pjoin(
h5_file_folder,
f"GS_{event_name}_{depth}_{fmin}_{fmax}_{misfit_name}_{fwd.veloc_name}.hdf5",
)
depth_GS, sdr, M0_GS, misfit_L2_GS = _ReadH5.Read_GS_h5(
Filename=h5_file_path, amount_of_phases=len(phases)
)
Total_L2_GS = np.sum(misfit_L2_GS, axis=1)
n_lowest = 50
# n_lowest = int(len(Total_L2_GS) * 0.05)
# lowest_indices = Total_L2_GS.argsort()[0:n_lowest:50]
lowest_indices = Total_L2_GS.argsort()[0:n_lowest]
GOF_GS = (Total_L2_GS / DOF)[lowest_indices]
M0 = M0_GS[lowest_indices]
sdrs = sdr[lowest_indices, :]
MT_Full = np.zeros((sdrs.shape[0], 6))
for i in range(MT_Full.shape[0]):
MT_Full[i, :] = _GreensFunctions.convert_SDR(sdrs[i, 0], sdrs[i, 1], sdrs[i, 2], M0[i])
MT_Full[i, 3] = -MT_Full[i, 4]
MT_Full[i, 5] = -MT_Full[i, 5]
if idepth == 0:
M0_plot_GS = M0
MT_GS = MT_Full
MT_sdrs = sdrs
weights_GS = np.exp(-GOF_GS)
else:
M0_plot_GS = np.hstack((M0_plot_GS, M0))
MT_GS = np.vstack((MT_GS, MT_Full))
MT_sdrs = np.vstack((MT_sdrs, sdrs))
weights_GS = np.hstack((weights_GS, np.exp(-GOF_GS)))
(values, counts) = np.unique(sdrs[:, 0], return_counts=True)
ind = np.argmax(counts)
print("Strike:", values[ind])
(values, counts) = np.unique(sdrs[:, 1], return_counts=True)
ind = np.argmax(counts)
print("Dip:", values[ind])
(values, counts) = np.unique(sdrs[:, 2], return_counts=True)
ind =
|
np.argmax(counts)
|
numpy.argmax
|
import numpy as np
from scipy.fft import fftn,ifftn,fftshift
import h5py
from pathlib import Path
fileprefix_snapshot = 'snapdir_%03d/snapshot_%03d'
fileprefix_subhalo = 'groups_%03d/fof_subhalo_tab_%03d'
fileprefix_subhalo_desc = 'groups_%03d/subhalo_desc_%03d'
fileprefix_subhalo_prog = 'groups_%03d/subhalo_prog_%03d'
def gadget_to_particles(fileprefix, opts={'pos':True,'vel':True,'ID':False,'mass':True}):
'''
Read particles from GADGET HDF5 snapshot.
Parameters:
fileprefix: input file prefix (e.g., snapshot_000, not snapshot_000.0.hdf5)
opts: which fields to read and return
Returns:
pos: position array, shape (3,NP), comoving
vel: velocity array, shape (3,NP), peculiar
ID: ID array, shape (NP,)
mass: mass array, shape (NP,)
header: a dict with header info, use list(header) to see the fields
'''
filepath = [
Path(fileprefix + '.hdf5'),
Path(fileprefix + '.0.hdf5'),
Path(fileprefix),
]
if filepath[0].is_file():
filebase = fileprefix + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase = fileprefix + '.%d.hdf5'
numfiles = 2
elif filepath[2].is_file():
# exact filename was passed - will cause error if >1 files, otherwise fine
filebase = fileprefix
numfiles = 1
fileinst = 0
pinst = 0
while fileinst < numfiles:
if numfiles == 1:
filename = filebase
else:
filename = filebase%fileinst
with h5py.File(filename, 'r') as f:
print('reading %s'%filename)
header = dict(f['Header'].attrs)
MassTable = header['MassTable']
ScaleFactor = 1./(1+header['Redshift'])
NP = header['NumPart_ThisFile']
NPtot = header['NumPart_Total']
numfiles = header['NumFilesPerSnapshot']
if fileinst == 0:
# allocate full-sized memory blocks in advance, for efficiency
if opts.get('pos'): pos = np.zeros((3,np.sum(NPtot)),dtype=np.float32)
if opts.get('vel'): vel = np.zeros((3,np.sum(NPtot)),dtype=np.float32)
if opts.get('mass'): mass = np.zeros(np.sum(NPtot),dtype=np.float32)
if opts.get('ID'): ID = np.zeros(np.sum(NPtot),dtype=np.uint32)
for typ in range(len(NPtot)):
NPtyp = int(NP[typ])
if NPtyp == 0:
continue
if opts.get('pos'): pos[:,pinst:pinst+NPtyp] = np.array(f['PartType%d/Coordinates'%typ]).T
if opts.get('vel'): vel[:,pinst:pinst+NPtyp] = np.array(f['PartType%d/Velocities'%typ]).T * np.sqrt(ScaleFactor)
if opts.get('mass'):
if MassTable[typ] == 0.:
mass[pinst:pinst+NPtyp] = np.array(f['PartType%d/Masses'%typ])
else:
mass[pinst:pinst+NPtyp] = np.full(NPtyp,MassTable[typ])
if opts.get('ID'): ID[pinst:pinst+NPtyp] = np.array(f['PartType%d/ParticleIDs'%typ])
pinst += NPtyp
fileinst += 1
ret = []
if opts.get('pos'): ret += [pos]
if opts.get('vel'): ret += [vel]
if opts.get('mass'): ret += [mass]
if opts.get('ID'): ret += [ID]
ret += [header]
return tuple(ret)
def fof_to_halos(fileprefix,opts={'pos':True,'vel':True,'mass':True}):
'''
Read halos from GADGET HDF5 FOF file.
Parameters:
fileprefix: input file prefix (e.g., fof_tab_000, not fof_tab_000.0.hdf5)
opts: which fields to read and return
Returns:
pos: position array, shape (3,NH), comoving
vel: velocity array, shape (3,NH), peculiar
mass: mass array, shape (NH,)
header: a dict with header info, use list(header) to see the fields
'''
filepath = [
Path(fileprefix + '.hdf5'),
Path(fileprefix + '.0.hdf5'),
Path(fileprefix),
]
if filepath[0].is_file():
filebase = fileprefix + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase = fileprefix + '.%d.hdf5'
numfiles = 2
elif filepath[2].is_file():
# exact filename was passed - will cause error if >1 files, otherwise fine
filebase = fileprefix
numfiles = 1
fileinst = 0
if opts.get('pos'): pos = []
if opts.get('vel'): vel = []
if opts.get('mass'): mass = []
while fileinst < numfiles:
if numfiles == 1:
filename = filebase
else:
filename = filebase%fileinst
with h5py.File(filename, 'r') as f:
print('reading %s'%filename)
header = dict(f['Header'].attrs)
ScaleFactor = 1./(1+header['Redshift'])
numfiles = header['NumFiles']
if header['Ngroups_Total'] == 0:
if opts.get('pos'): pos = [[]]
if opts.get('vel'): vel = [[]]
if opts.get('mass'): mass = [[]]
break
if header['Ngroups_ThisFile'] > 0:
if opts.get('pos'): pos += [np.array(f['Group/GroupPos']).T]
if opts.get('vel'): vel += [np.array(f['Group/GroupVel']).T * np.sqrt(ScaleFactor)]
if opts.get('mass'): mass += [np.array(f['Group/GroupMass'])]
fileinst += 1
ret = []
if opts.get('pos'): ret += [np.concatenate(pos,axis=1)]
if opts.get('vel'): ret += [np.concatenate(vel,axis=1)]
if opts.get('mass'): ret += [np.concatenate(mass)]
ret += [header]
return tuple(ret)
def cic_bin(x,BoxSize,GridSize,weights=1,density=True):
'''
Bin particles into a density field using cloud-in-cell method
Parameters:
x: 3D positions, shape (3,NP) where NP is the number of particles
BoxSize: size of periodic region
GridSize: resolution of output density field, per dimension
weights: weight (e.g. mass) to assign to each particle, either a number or
an array of length NP
density: If False, output the total mass within each cell. If True, output
mass/volume.
Returns:
field of shape (GridSize,GridSize,GridSize)
bin edges
'''
NP = x.shape[1]
N = GridSize
dx = BoxSize / GridSize
bins = dx * np.arange(N+1)
# idea:
# i and i1 are indices of the two adjacent cells (in each dimension)
# f is the fraction from i to i1 where particle lies
# shapes are (3,NP)
f = x / dx
f[f < 0.5] += N
f[f >= N+0.5] -= N
i = (f-0.5).astype(np.int32)
f -= i + 0.5
i1 = i+1
i[i<0] += N
i[i>=N] -= N
i1[i1<0] += N
i1[i1>=N] -= N
# now appropriately add each particle into the 8 adjacent cells
hist = np.zeros((N,N,N))
np.add.at(hist,(i[0],i[1],i[2]),(1-f[0])*(1-f[1])*(1-f[2])*weights)
np.add.at(hist,(i1[0],i[1],i[2]),f[0]*(1-f[1])*(1-f[2])*weights)
np.add.at(hist,(i[0],i1[1],i[2]),(1-f[0])*f[1]*(1-f[2])*weights)
np.add.at(hist,(i[0],i[1],i1[2]),(1-f[0])*(1-f[1])*f[2]*weights)
np.add.at(hist,(i1[0],i1[1],i[2]),f[0]*f[1]*(1-f[2])*weights)
np.add.at(hist,(i[0],i1[1],i1[2]),(1-f[0])*f[1]*f[2]*weights)
np.add.at(hist,(i1[0],i[1],i1[2]),f[0]*(1-f[1])*f[2]*weights)
np.add.at(hist,(i1[0],i1[1],i1[2]),f[0]*f[1]*f[2]*weights)
if density:
hist /= dx**3
return hist,bins
def power_spectrum(delta,BoxSize,bins=None):
'''
Find spherically averaged power spectrum of density field
Parameters:
delta: input density field
BoxSize: width of periodic box
bins: desired k bin edges
Returns:
k: array of wavenumbers
P(k): array comprising the power spectrum as a function of k
'''
GridSize = delta.shape[0]
dk = 2*np.pi/BoxSize
# radial bins for k
if bins is None:
# corner of cube is at distance np.sqrt(3)/2*length from center
bins = np.arange(1,int((GridSize+1) * np.sqrt(3)/2)) * dk
# get wavenumbers associated with k-space grid
k = ((np.indices(delta.shape)+GridSize//2)%GridSize-GridSize//2) * dk
k_mag = np.sqrt(np.sum(k**2,axis=0))
# Fourier transform and get power spectrum
pk = np.abs(fftn(delta,overwrite_x=True))**2*BoxSize**3/GridSize**6
hist_pk,_ = np.histogram(k_mag,bins=bins,weights=pk)
hist_ct,_ = np.histogram(k_mag,bins=bins)
hist_k,_ = np.histogram(k_mag,bins=bins,weights=k_mag)
return hist_k/hist_ct, hist_pk/hist_ct
def density_profile(pos,mass,bins=None,BoxSize=None):
'''
Spherically averaged density profile centered at position (0,0,0)
Parameters:
pos: 3D positions relative to center, shape (3,NP) where NP is the number of particles
mass: masses of particles, shape (NP)
bins: radial bin edges
BoxSize: size of periodic region (None if not periodic)
Returns:
radius, density
'''
NP = pos.shape[1]
# shift periodic box
if BoxSize is not None:
pos[pos >= 0.5*BoxSize] -= BoxSize
pos[pos < -0.5*BoxSize] -= BoxSize
# radii
r = np.sqrt(np.sum(pos**2,axis=0))
# radial bins
if bins is None:
rmin = np.sort(r)[100]/10
rmax = np.max(r)
bins = np.geomspace(rmin,rmax,50)
bin_volume = 4./3 * np.pi * (bins[1:]**3 - bins[:-1]**3)
hist_mass,_ = np.histogram(r,bins=bins,weights=mass)
return 0.5*(bins[1:]+bins[:-1]), hist_mass / bin_volume
def subhalo_tracing_data(snapshot_number,subhalo_number):
'''
Get a subhalo's mass, position, velocity, progenitor, descendant,
and other tracking information.
Parameters:
snapshot_number
subhalo_number
Returns:
prog: best-scoring progenitor
desc: best-scoring descendant
pos: subhalo (comoving) position, shape (3,)
vel: subhalo (peculiar) velocity, shape (3,)
mass: subhalo mass
ID: dict with group ID, subhalo ID, and most bound particle ID
header: a dict with header info, use list(header) to see the fields
'''
prefix_sub = fileprefix_subhalo%(snapshot_number,snapshot_number)
prefix_desc = fileprefix_subhalo_desc%(snapshot_number,snapshot_number)
prefix_prog = fileprefix_subhalo_prog%(snapshot_number,snapshot_number)
filepath = [
Path(prefix_sub + '.hdf5'),
Path(prefix_sub + '.0.hdf5'),
]
if filepath[0].is_file():
filebase_sub = prefix_sub + '.hdf5'
filebase_desc = prefix_desc + '.hdf5'
filebase_prog = prefix_prog + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase_sub = prefix_sub + '.%d.hdf5'
filebase_desc = prefix_desc + '.%d.hdf5'
filebase_prog = prefix_prog + '.%d.hdf5'
numfiles = 2
prog, desc, pos, vel, mass, ID, header = -1, -1, np.zeros(3), np.zeros(3), 0., {}, {}
fileinst = 0
hinst = 0
ginst = 0
while fileinst < numfiles:
if numfiles == 1:
filename_sub = filebase_sub
filename_desc = filebase_desc
filename_prog = filebase_prog
else:
filename_sub = filebase_sub%fileinst
filename_desc = filebase_desc%fileinst
filename_prog = filebase_prog%fileinst
with h5py.File(filename_sub, 'r') as f:
print('reading %s'%filename_sub)
header = dict(f['Header'].attrs)
ScaleFactor = 1./(1+header['Redshift'])
numfiles = header['NumFiles']
if hinst + header['Nsubhalos_ThisFile'] > subhalo_number:
index = subhalo_number - hinst
pos = np.array(f['Subhalo/SubhaloPos'])[index]
vel = np.array(f['Subhalo/SubhaloVel'])[index] * np.sqrt(ScaleFactor)
ID = {'group':np.array(f['Subhalo/SubhaloGroupNr'])[index],
'subhalo':subhalo_number,
'particle':np.array(f['Subhalo/SubhaloIDMostbound'])[index]}
if ID['group']>=ginst:
mass = {'group':np.array(f['Group/GroupMass'])[ID['group']-ginst],
'subhalo':np.array(f['Subhalo/SubhaloMass'])[index]}
else:
ginst2 = ginst
fileinst2 = fileinst
while ID['group'] < ginst2:
fileinst2 -= 1
filename_sub2 = filebase_sub%fileinst2
with h5py.File(filename_sub2, 'r') as f2:
print('reading %s'%filename_sub2)
header2 = dict(f2['Header'].attrs)
ginst2 -= int(header2['Ngroups_ThisFile'])
if ID['group'] >= ginst2:
mass = {'group':np.array(f2['Group/GroupMass'])[ID['group']-ginst2],
'subhalo':np.array(f['Subhalo/SubhaloMass'])[index]}
try:
with h5py.File(filename_desc, 'r') as fd:
print('reading %s'%filename_desc)
if np.array(fd['Subhalo/SubhaloNr'])[index] != subhalo_number:
raise Exception('halo number mismatch, %d != %d'%(np.array(fd['Subhalo/SubhaloNr'])[index],subhalo_number))
desc = np.array(fd['Subhalo/DescSubhaloNr'])[index]
except Exception as e:
print(str(e))
desc = -1
try:
with h5py.File(filename_prog, 'r') as fp:
print('reading %s'%filename_prog)
if np.array(fp['Subhalo/SubhaloNr'])[index] != subhalo_number:
raise Exception('halo number mismatch, %d != %d'%(np.array(fp['Subhalo/SubhaloNr'])[index],subhalo_number))
prog = np.array(fp['Subhalo/ProgSubhaloNr'])[index]
except Exception as e:
print(str(e))
prog = -1
break
hinst += int(header['Nsubhalos_ThisFile'])
ginst += int(header['Ngroups_ThisFile'])
fileinst += 1
else:
print('Warning: halo %d not found'%subhalo_number)
return prog, desc, pos, vel, mass, ID, header
def trace_subhalo(snapshot_number,subhalo_number):
'''
Trace a subhalo's position, mass, and other tracking information across snapshots.
Parameters:
snapshot_number
subhalo_number
Returns:
num: snapshot number
time: scale factor array, shape (NT,)
pos: position array, shape (NT,3), comoving
vel: velocity array, shape (NT,3), peculiar
mass: mass array, shape (NT,)
group: host group, shape (NT,)
ID: list of dicts with group ID, subhalo ID, and most bound particle ID; shape (NT,)
'''
prog, desc, pos_, vel_, mass_, ID_, header_ = subhalo_tracing_data(snapshot_number,subhalo_number)
print('halo: %d in snapshot %d'%(subhalo_number,snapshot_number))
pos = [pos_]
vel = [vel_]
mass = [mass_]
ID = [ID_]
time = [header_['Time']]
num = [snapshot_number]
shift = 0
while prog >= 0:
shift += 1
print('progenitor: %d in snapshot %d'%(prog,snapshot_number-shift))
prog, _, pos_, vel_, mass_, ID_, header_ = subhalo_tracing_data(snapshot_number-shift,prog)
pos += [pos_]
vel += [vel_]
mass += [mass_]
ID += [ID_]
time += [header_['Time']]
num += [snapshot_number-shift]
pos = pos[::-1]
vel = vel[::-1]
mass = mass[::-1]
ID = ID[::-1]
time = time[::-1]
num = num[::-1]
shift = 0
while desc >= 0:
shift += 1
print('descendant: %d in snapshot %d'%(desc,snapshot_number+shift))
_, desc, pos_, vel_, mass_, ID_, header_ = subhalo_tracing_data(snapshot_number+shift,desc)
pos += [pos_]
vel += [vel_]
mass += [mass_]
ID += [ID_]
time += [header_['Time']]
num += [snapshot_number+shift]
return np.array(num), np.array(time), np.array(pos), np.array(vel), mass, ID
def subhalo_group_data(fileprefix,opts={'mass':True,'len':False,'pos':False},parttype=None):
'''
Read halos from GADGET HDF5 FOF+subhalo file and return data relevant to group membership.
Parameters:
fileprefix: input file prefix (e.g., fof_subhalo_tab_000, not fof_subhalo_tab_000.0.hdf5)
opts: which fields to read and return
parttype: if not None, consider only particles of the given type for certain outputs
Returns:
group: host group number
rank: rank of subhalo within host group
parentrank: rank of parent subhalo within host group
mass: subhalo mass
groupmass: mass of host group
length: subhalo particle count
grouplength: host group particle count
pos: subhalo position (NH,3)
grouppos: host group position (NH,3)
header: a dict with header info, use list(header) to see the fields
'''
filepath = [
Path(fileprefix + '.hdf5'),
Path(fileprefix + '.0.hdf5'),
Path(fileprefix),
]
if filepath[0].is_file():
filebase = fileprefix + '.hdf5'
numfiles = 1
elif filepath[1].is_file():
filebase = fileprefix + '.%d.hdf5'
numfiles = 2
elif filepath[2].is_file():
# exact filename was passed - will cause error if >1 files, otherwise fine
filebase = fileprefix
numfiles = 1
fileinst = 0
group = []
rank = []
parentrank = []
if opts.get('mass'):
mass = []
_groupmass = []
if opts.get('len'):
length = []
_grouplength = []
if opts.get('pos'):
pos = []
_grouppos = []
while fileinst < numfiles:
if numfiles == 1:
filename = filebase
else:
filename = filebase%fileinst
with h5py.File(filename, 'r') as f:
print('reading %s'%filename)
header = dict(f['Header'].attrs)
ScaleFactor = 1./(1+header['Redshift'])
numfiles = header['NumFiles']
group += [np.array(f['Subhalo/SubhaloGroupNr'])]
rank += [np.array(f['Subhalo/SubhaloRankInGr'])]
parentrank += [np.array(f['Subhalo/SubhaloParentRank'])]
if parttype is None:
if opts.get('mass'):
mass += [np.array(f['Subhalo/SubhaloMass'])]
_groupmass += [np.array(f['Group/GroupMass'])]
if opts.get('len'):
length += [np.array(f['Subhalo/SubhaloLen'])]
_grouplength += [np.array(f['Group/GroupLen'])]
else:
if opts.get('mass'):
mass += [np.array(f['Subhalo/SubhaloMassType'][:,parttype])]
_groupmass += [np.array(f['Group/GroupMassType'][:,parttype])]
if opts.get('len'):
length += [np.array(f['Subhalo/SubhaloLenType'][:,parttype])]
_grouplength += [np.array(f['Group/GroupLenType'][:,parttype])]
if opts.get('pos'):
pos += [np.array(f['Subhalo/SubhaloPos'])]
_grouppos += [
|
np.array(f['Group/GroupPos'])
|
numpy.array
|
from PIL import Image
import glob
import numpy as np
import os
import shutil
import sys
sys.path.append("..")
from config import ConfigOptions
from Utils.misc import *
config = ConfigOptions()
if os.path.exists("../../Data/NumpyDataDepth"):
shutil.rmtree("../../Data/NumpyDataDepth")
os.makedirs('../../Data/NumpyDataDepth/{}/'.format(config.img_dim))
os.makedirs('../../Data/NumpyDataDepth/Centroids/')
imgs = sorted(glob.glob("../"+config.raw_data_dir+"*.png"),key=natural_keys)
data_length = len(imgs)
print(data_length)
all_centroids =
|
np.zeros((data_length,2))
|
numpy.zeros
|
import numpy as np
import functools
def dominates(fitnesses_1,fitnesses_2):
# fitnesses_1 is a array of objectives of solution 1 [objective1, objective2 ...]
larger_or_equal = fitnesses_1 >= fitnesses_2
larger = fitnesses_1 > fitnesses_2
if np.all(larger_or_equal) and np.any(larger):
return True
return False
# returns a matrix with shape (pop_size,pop_size) to anwer the question does individial i dominates individual j -> domination_matrix[i,j]
# much faster then calling dominates() in 2 for loops
def calculate_domination_matrix(fitnesses):
pop_size = fitnesses.shape[0]
num_objectives = fitnesses.shape[1]
# numpy meshgrid does not work if original array is 2d, so we have to build the mesh grid manually
fitness_grid_x = np.zeros([pop_size,pop_size,num_objectives])
fitness_grid_y = np.zeros([pop_size,pop_size,num_objectives])
for i in range(pop_size):
fitness_grid_x[i,:,:] = fitnesses[i]
fitness_grid_y[:,i,:] = fitnesses[i]
larger_or_equal = fitness_grid_x >= fitness_grid_y
larger = fitness_grid_x > fitness_grid_y
return np.logical_and(
|
np.all(larger_or_equal,axis=2)
|
numpy.all
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# rppy - a geophysical library for Python
# Copyright (c) 2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rppy
import numpy as np
# Test reflectivity.py
def test_shuey():
err = 0.005
Vp1 = 3000
Vp2 = 4000
Vs1 = 1500
Vs2 = 2000
p1 = 2000
p2 = 2200
theta1 = np.array([32])
exp = 0.151
Rpp = rppy.reflectivity.shuey(Vp1, Vs1, p1,
Vp2, Vs2, p2,
theta1)
assert np.abs(Rpp - exp)/exp < err
def test_aki_richards():
err = 0.05
Vp1 = 3000
Vp2 = 4000
Vs1 = 1500
Vs2 = 2000
p1 = 2000
p2 = 2200
theta1 = np.array([32])
exp = 0.15351
Rpp = rppy.reflectivity.aki_richards(Vp1, Vs1, p1,
Vp2, Vs2, p2,
theta1)
assert np.abs(Rpp - exp)/exp < err
def test_bortfeld():
err = 0.01
Vp1 = 3000.
Vp2 = 4000.
Vs1 = 1500.
Vs2 = 2000.
p1 = 2000.
p2 = 2200.
theta1 = np.array([32])
exp = 0.15469135
Rpp = rppy.reflectivity.bortfeld(Vp1, Vs1, p1,
Vp2, Vs2, p2,
theta1)
assert np.abs(Rpp - exp)/exp < err
def test_snell():
err = 0.01
vp1 = 2500
vs1 = 1725
vp2 = 3800
vs2 = 1900
theta1 = np.array([30])
theta2E = 49.46
thetas1E = 20.18
thetas2E = 22.33
theta2, thetas1, thetas2, p = rppy.reflectivity.snell(vp1, vp2,
vs1, vs2,
np.radians(theta1))
assert np.abs(np.rad2deg(theta2) - theta2E) < err
assert np.abs(np.rad2deg(thetas1) - thetas1E) < err
assert np.abs(np.rad2deg(thetas2) - thetas2E) < err
def test_thomsen():
err = 0.05
C = np.zeros(shape=(6, 6))
C[0][0] = 87.26e9
C[1][1] = 87.26e9
C[2][2] = 105.8e9
C[3][3] = 57.15e9
C[4][4] = 57.15e9
C[5][5] = 40.35e9
C[0][2] = 11.95e9
C[0][1] = 6.57e9
C[0][3] = -17.18e9
p = 2646.6
eexp = -0.08762
yexp = -0.14698
dexp = -0.031453
vp, vs, e1, d1, y1, e2, d2, y2, d3 = rppy.reflectivity.thomsen(C, p)
assert np.abs(e1 - eexp)/eexp < err
assert np.abs(y1 - yexp)/yexp < err
assert np.abs(d1 - dexp)/dexp < err
def test_Cij():
err = 0.05
C = np.zeros(shape=(6, 6))
C[0][0] = 87.26e9
C[1][1] = 87.26e9
C[2][2] = 105.8e9
C[3][3] = 57.15e9
C[4][4] = 57.15e9
C[5][5] = 40.35e9
C[0][2] = 11.95e9
C[0][1] = 6.57e9
C[0][3] = -17.18e9
p = 2646.6
vp, vs, e1, d1, y1, e2, d2, y2, d3 = rppy.reflectivity.thomsen(C, p)
C2 = rppy.reflectivity.Cij(vp, vs, p, e1, d1, y1, e2, d2, y2, d3)
assert np.abs(C[0][0] - C2[0][0])/C2[0][0] < err
assert np.abs(C[2][2] - C2[2][2])/C2[2][2] < err
assert np.abs(C[5][5] - C2[5][5])/C2[5][5] < err
assert
|
np.abs(C[0][2] - C2[0][2])
|
numpy.abs
|
import numpy as np, numpy.linalg as LA
from numpy.linalg import multi_dot
from random import randint
"""
Implemented from Moreno et al 2012
"""
class CB_TRANSFER():
def __init__(self,Xtgt,B,max_iter):
# B is a list of codebooks
self.Xtgt = Xtgt
self.B = B
self.p, self.q = Xtgt.shape
self.k, self.l = B.shape
self.max_iter = max_iter
self.N = len(B)
#@staticmethod
#def return_min_idx(a):
#a is a list with numbers
# return min(range(len(a)), key=a.__getitem__)
@staticmethod
def non_j_idx(j,idx_range):
non_j = list(range(idx_range))
non_j.remove(j)
return non_j
def mask_nan(self):
# save a copy of unmasked Xtgt
self.Xtgt_unmasked = np.copy(self.Xtgt)
# mask
self.Xtgt = np.nan_to_num(self.Xtgt)
def initialize_Vtgt(self):
rows = []
for i in range(self.q):
row = np.zeros((1,self.l))
j = randint(0,self.l-1)
row[0,j] = 1
rows.append(row)
Vn = np.concatenate(rows,axis = 0)
self.Vtgt = dict()
for n in range(self.N):
self.Vtgt['V_{}'.format(n)] = Vn
def initialize_Utgt(self):
Un = np.zeros((self.p,self.k))
self.Utgt = dict()
for n in range(self.N):
self.Utgt['U_{}'.format(n)] = Un
def update_Utgt(self):
for i in range(self.p):
predict_sum = np.add([np.array(self.alpha['alpha_{}'.format(n)]*np.dot(self.B[n],self.Vtgt['V_{}'.format(n)])) for n in range(self.N)])
err = [LA.norm(self.Xtgt[i,:]-predict_sum[j,:]) for j in range(self.k)]
j = np.nanargmin(err)
for n in range(self.N):
self.Utgt[n][i,j] = 1
self.Utgt[n][i,self.non_j_idx(j,self.k)] = 0
def update_Vtgt(self,B,n):
for i in range(self.q):
predict_sum = np.add([np.array(self.alpha['alpha_{}'.format(n)]*np.dot(self.Utgt,self.B[n])) for n in range(self.N)])
err = [LA.norm(self.Xtgt[:,i]-predict_sum[:,j]) for j in range(self.l)]
j =
|
np.nanargmin(err)
|
numpy.nanargmin
|
import numpy as np
import os, glob
import struct
from corr_baryon import Corr_2pt_Baryon
def main():
# test code
mxi = 0.665 # in lattice unit
m_red = 0.5 * mxi # reduced mass
hal = HAL_pot(m_red=m_red, result_dir='../data/sample_data',
binned_nbs_dir='sample.nbs.binned', decompressed_nbs_dir='sample.nbs.decomp',
pot_output_dir='sample.results.pot', binned_rcorr_dir='sample.rcorr.binned',
it0=10, channel='xixi',
Ns=48, bin_size=2)
# calculate Vc(r) for XiXi(1S0)
hal.calc_pot(pot_type='cen', spin='1s0')
class HAL_pot(object):
"""
the HAL QCD potential code for V_C in 1S0 and V_C, V_T, V_eff_cen in 3S1
Parameters:
m_red : reduced mass [required]
result_dir : directory including BBwave.dir.Sx.xx and correlator.PS.dir
it0 : euclidean time (integer)
channel : only "nn" or "xixi" are supported
Ns : size of lattice
bin_size : size of jack-knife samples
pot_output_dir : directory for potential (default "results.pot")
binned_rcorr_dir : directory for binned Rcorr (default "results.rcorr.binned")
binned_nbs_dir : directory for binned NBS (default "results.nbs.binned")
decompressed_nbs_dir : directory for decompressed binned NBS (default "results.nbs.decomp")
confMax : default None (use all available files in result_dir dir)
reload_nbs : default True (if False recalculate R-correlator)
"""
def __init__(self, m_red, result_dir='results',
it0=10, channel='xixi', Ns=48, bin_size=1,
binned_nbs_dir='results.nbs.binned', decompressed_nbs_dir='results.nbs.decomp',
pot_output_dir='results.pot', binned_rcorr_dir='results.rcorr.binned',
confMax=None, reload_nbs=True):
self.m_red = m_red # reduced mass
self.it0 = it0
self.channel = channel # xixi or nn
self.Ns = Ns
self.bin_size = bin_size
self.result_dir = result_dir
self.binned_nbs_dir = binned_nbs_dir
self.decompressed_nbs_dir = decompressed_nbs_dir
self.pot_output_dir = pot_output_dir
self.binned_rcorr_dir = binned_rcorr_dir
self.confMax = confMax
self.reload_nbs = reload_nbs
single_lbl = {'xixi': 'Xi_CG05_CG05', 'nn': 'proton_CG05_CG05'}[channel]
self.C_N = Corr_2pt_Baryon(single_lbl, bin_size, result_dir, confMax=confMax)
self.bin_num = self.C_N.bin_num
for _dir in [binned_nbs_dir, decompressed_nbs_dir, pot_output_dir, binned_rcorr_dir]:
if not os.path.isdir(_dir): os.mkdir(_dir)
def calc_pot(self, pot_type='cen', spin='1s0'):
"""
calculate the HAL QCD potential
Parameters
pot_type : cen (default) or ten
spin : 1s0 (default) or 3s1_+1, 3s1_+0, 3s1_-1
Returns
pot_jk (and pot_ten_jk) : dictionary
jack-knife samples of (effective)central (tensor) potential
keys
lap : H0-term
dt : d/dt-term
dt2 : d2/dt2-term
tot : sum of these terms
rs : r (distance in lattice unit)
"""
self.spin = spin
Rcorr_tm_jk = self.load_Rcorr(self.it0-1, spin)
Rcorr_t_jk = self.load_Rcorr(self.it0, spin)
Rcorr_tp_jk = self.load_Rcorr(self.it0+1, spin)
if pot_type == 'cen':
self.pot_jk = self.calc_t_dep_HAL(np.real(Rcorr_tm_jk[spin]),
np.real(Rcorr_t_jk[spin]),
np.real(Rcorr_tp_jk[spin]))
print('>> return jackknife samples of (effective) central')
if '3s1' in spin:
self.save_pot(self.pot_jk, spin + '_eff')
else:
self.save_pot(self.pot_jk, spin + '_cen')
return self.pot_jk
elif pot_type == 'ten':
if spin == '1s0': print('for tensor force use spin = 3s1')
self.pot_jk, self.pot_ten_jk = self.calc_vc_vt(Rcorr_tm_jk, Rcorr_t_jk, Rcorr_tp_jk)
print('>> return jackknife samples of central and tensor')
self.save_pot(self.pot_jk, spin + '_cen')
self.save_pot(self.pot_ten_jk, spin + '_ten')
return self.pot_jk, self.pot_ten_jk
else:
print('>> select pot_type = cen or ten')
def calc_t_dep_HAL(self, R_tm_jk, R_t_jk, R_tp_jk):
Ns = self.Ns
lap = lambda vec: - 6.0*vec + ( np.roll(vec,+1,0) + np.roll(vec,-1,0)
+ np.roll(vec,+1,1) + np.roll(vec,-1,1)
+ np.roll(vec,+1,2) + np.roll(vec,-1,2))
pot_jk = {}
pot_jk['lap'] = np.array([ A1_proj(lap(R_t_jk[ibin,:,:,:])/R_t_jk[ibin,:,:,:])/(2.0*self.m_red)
for ibin in range(self.bin_num)]).reshape(self.bin_num,Ns**3)
pot_jk['dt'] = np.array([ - A1_proj(R_tp_jk[ibin,:,:,:] - R_tm_jk[ibin,:,:,:])/(2.0*R_t_jk[ibin,:,:,:])
for ibin in range(self.bin_num)]).reshape(self.bin_num,Ns**3)
pot_jk['dt2'] = np.array([
A1_proj((R_tp_jk[ibin,:,:,:] - 2.0*R_t_jk[ibin,:,:,:] + R_tm_jk[ibin,:,:,:]) /(R_t_jk[ibin,:,:,:]))
/(8.0*self.m_red)
for ibin in range(self.bin_num)]).reshape(self.bin_num,Ns**3)
rs = np.array([np.sqrt(x**2 + y**2 + z**2) for z in range(-Ns//2+1,Ns//2+1)
for y in range(-Ns//2+1,Ns//2+1)
for x in range(-Ns//2+1,Ns//2+1)]).reshape(Ns,Ns,Ns)
pot_jk['rs'] = np.roll(rs,(Ns//2+1,Ns//2+1,Ns//2+1),(0,1,2)).flatten()
pot_jk['tot'] = pot_jk['lap'] + pot_jk['dt'] + pot_jk['dt2']
return pot_jk
def calc_vc_vt(self, R_tm_jk, R_t_jk, R_tp_jk):
Ns = self.Ns
wave_p_jk = self.get_p_phi(R_tm_jk, R_t_jk, R_tp_jk)
wave_q_jk = self.get_q_phi(R_tm_jk, R_t_jk, R_tp_jk)
det = wave_p_jk['wave'] * wave_q_jk['ten'] - wave_q_jk['wave'] * wave_p_jk['ten']
pot_vc_jk = {}
pot_vt_jk = {}
with np.errstate(divide='ignore',invalid='ignore'):
for t in ['lap', 'dt', 'dt2']:
pot_vc_jk[t] = np.real(1.0/det * (wave_q_jk['ten'] * wave_p_jk[t] - wave_p_jk['ten'] * wave_q_jk[t]))
pot_vc_jk[t][:,0] = np.real(wave_p_jk[t][:,0]/wave_p_jk['wave'][:,0])
pot_vt_jk[t] = np.real(-1.0/det * (wave_q_jk['wave'] * wave_p_jk[t] - wave_p_jk['wave'] * wave_q_jk[t]))
pot_vt_jk[t][:,0] = 0.0
pot_vc_jk['tot'] = pot_vc_jk['lap'] + pot_vc_jk['dt'] + pot_vc_jk['dt2']
pot_vt_jk['tot'] = pot_vt_jk['lap'] + pot_vt_jk['dt'] + pot_vt_jk['dt2']
rs = np.array([np.sqrt(x**2 + y**2 + z**2) for z in range(-Ns//2+1,Ns//2+1)
for y in range(-Ns//2+1,Ns//2+1)
for x in range(-Ns//2+1,Ns//2+1)]).reshape(Ns,Ns,Ns)
pot_vc_jk['rs'] = np.roll(rs,(Ns//2+1,Ns//2+1,Ns//2+1),(0,1,2)).flatten()
pot_vt_jk['rs'] = np.roll(rs,(Ns//2+1,Ns//2+1,Ns//2+1),(0,1,2)).flatten()
return pot_vc_jk, pot_vt_jk
def save_pot(self, pot_jk, spin):
Ns = self.Ns
with open('{}/pot_{}_{}_t{:03d}_{:03d}conf_{:03d}bin.dat'.format(self.pot_output_dir,
spin, self.channel, self.it0, self.bin_num * self.bin_size, self.bin_size), 'w') as fout:
print('# output potential >> ', fout.name)
uniq_a1 = [ix + Ns*(iy + Ns*iz) for iz in range(0,Ns//2+1)
for iy in range(iz,Ns//2+1) for ix in range(iy,Ns//2+1)]
fout.write('# M_red = {:4.3f}\n'.format(self.m_red))
fout.write('# r H0R/R dR/R d2R/R tot.\n')
for r in uniq_a1:
vtot_av, vtot_err = pot_jk['tot'][:,r].mean(axis=0), pot_jk['tot'][:,r].std(axis=0)*np.sqrt(self.bin_num - 1)
vlap_av, vlap_err = pot_jk['lap'][:,r].mean(axis=0), pot_jk['lap'][:,r].std(axis=0)*np.sqrt(self.bin_num - 1)
vdt_av, vdt_err = pot_jk['dt'][:,r].mean(axis=0), pot_jk['dt'][:,r].std(axis=0)*np.sqrt(self.bin_num - 1)
vdt2_av, vdt2_err = pot_jk['dt2'][:,r].mean(axis=0), pot_jk['dt2'][:,r].std(axis=0)*np.sqrt(self.bin_num - 1)
fout.write('{:e} {:e} {:e} {:e} {:e} {:e} {:e} {:e} {:e}\n'.format(pot_jk['rs'][r],
vlap_av, vlap_err, vdt_av, vdt_err, vdt2_av, vdt2_err, vtot_av, vtot_err))
def get_p_phi(self, R_tm_jk, R_t_jk, R_tp_jk):
Ns = self.Ns
lap = lambda vec: - 6.0*vec + ( np.roll(vec,+1,0) + np.roll(vec,-1,0)
+ np.roll(vec,+1,1) + np.roll(vec,-1,1)
+ np.roll(vec,+1,2) + np.roll(vec,-1,2))
wave_p_jk = {}
wave_p_jk['wave'] = np.array([A1_proj(R_t_jk[self.spin][ibin,:,:,:])
for ibin in range(self.bin_num)]).reshape(self.bin_num, Ns**3)
wave_p_jk['lap'] = np.array([ A1_proj(lap(R_t_jk[self.spin][ibin,:,:,:]))/(2.0*self.m_red)
for ibin in range(self.bin_num)]).reshape(self.bin_num, Ns**3)
wave_p_jk['dt'] = np.array([ - A1_proj(0.5*(R_tp_jk[self.spin][ibin,:,:,:] - R_tm_jk[self.spin][ibin,:,:,:]))
for ibin in range(self.bin_num)]).reshape(self.bin_num,Ns**3)
wave_p_jk['dt2'] = np.array([ A1_proj((R_tp_jk[self.spin][ibin,:,:,:] - 2.0*R_t_jk[self.spin][ibin,:,:,:] + R_tm_jk[self.spin][ibin,:,:,:])) /(8.0*self.m_red)
for ibin in range(self.bin_num)]).reshape(self.bin_num,Ns**3)
wave_p_jk['ten'] = np.array([A1_proj(R_t_jk['ten_' + self.spin][ibin,:,:,:]) for ibin in range(self.bin_num)]).reshape(self.bin_num, Ns**3)
return wave_p_jk
def get_q_phi(self, R_tm_jk, R_t_jk, R_tp_jk):
Ns = self.Ns
unit = lambda vec: vec
lap = lambda vec: - 6.0*vec + ( np.roll(vec,+1,0) + np.roll(vec,-1,0)
+ np.roll(vec,+1,1) + np.roll(vec,-1,1)
+ np.roll(vec,+1,2) + np.roll(vec,-1,2))
wave_q_jk = {}
for kind, R_jk in zip(['wave', 'wave_p', 'wave_m'], [R_t_jk, R_tp_jk, R_tm_jk]):
wave_q_jk[kind] = self.sum_d_waves(R_jk, unit)
wave_q_jk['ten'] = self.sum_d_waves(R_t_jk, unit, ten='ten_').reshape(self.bin_num, Ns**3)
wave_q_jk['lap'] = self.sum_d_waves(R_t_jk, lap).reshape(self.bin_num, Ns**3)/(2.0*self.m_red)
wave_q_jk['dt'] = np.array([ -(0.5*(wave_q_jk['wave_p'][ibin,:,:,:] - wave_q_jk['wave_m'][ibin,:,:,:]))
for ibin in range(self.bin_num)]).reshape(self.bin_num,Ns**3)
wave_q_jk['dt2'] = np.array([ ((wave_q_jk['wave_p'][ibin,:,:,:] - 2.0*wave_q_jk['wave'][ibin,:,:,:] + wave_q_jk['wave_m'][ibin,:,:,:])) /(8.0*self.m_red)
for ibin in range(self.bin_num)]).reshape(self.bin_num,Ns**3)
wave_q_jk['wave'] = wave_q_jk['wave'].reshape(self.bin_num, Ns**3)
return wave_q_jk
def sum_d_waves(self, Rcorr, op, ten=''):
Ns = self.Ns
Ylms = spherical_harmonics(Ns)
jz0 = int(self.spin.split('_')[1]) # +1, +0, -1
label = lambda jz: '3d{:+d}_y2{:+d}'.format(jz0, jz0 + jz)
tmp_q = [np.array(
[A1_subt(op(Rcorr[ten + label(m)][ibin,:,:,:]))
* np.conjugate(Ylms[jz0+m]) for ibin in range(self.bin_num)]).reshape(self.bin_num, Ns, Ns, Ns)
for m in [+1, 0, -1]]
coeff = {'3s1_+1': [np.sqrt(6), - np.sqrt(3), 1.0],
'3s1_+0': [np.sqrt(3), -2.0, np.sqrt(3)],
'3s1_-1': [1.0, - np.sqrt(3), np.sqrt(6)]}[self.spin]
return (coeff[0] * tmp_q[0] + coeff[1] * tmp_q[1] + coeff[2] * tmp_q[2]) / np.sqrt(10)
def load_Rcorr(self, it, spin):
ch_index = {'nn': '0.00', 'xixi': '4.00'}[self.channel]
mode_list = {'1s0': ['1s0'],
'3s1_+1': ['3s1_+1', 'ten_3s1_+1', '3d+1_y2+2', 'ten_3d+1_y2+2',
'3d+1_y2+1', 'ten_3d+1_y2+1', '3d+1_y2+0', 'ten_3d+1_y2+0'],
'3s1_+0': ['3s1_+0', 'ten_3s1_+0', '3d+0_y2+1', 'ten_3d+0_y2+1',
'3d+0_y2+0', 'ten_3d+0_y2+0', '3d+0_y2-1', 'ten_3d+0_y2-1'],
'3s1_-1': ['3s1_-1', 'ten_3s1_-1', '3d-1_y2+0', 'ten_3d-1_y2+0',
'3d-1_y2-1', 'ten_3d-1_y2-1', '3d-1_y2-2', 'ten_3d-1_y2-2'] }[spin]
f_name = lambda ch: '{}/Rcorr_{}_{}_t{:03d}_{:d}bin_{:d}conf.dat'.format(self.binned_rcorr_dir,
ch, self.channel, it, self.bin_num, self.bin_num * self.bin_size)
fsize = self.Ns**3*self.bin_num * 2
Rcorr_jk = {}
if os.path.isfile(f_name(mode_list[0])) and self.reload_nbs:
for ch in mode_list:
print('# load Rcorr ', f_name(ch))
with open(f_name(ch), 'rb') as infile:
tmpr = np.array(struct.unpack('{:d}d'.format(fsize), infile.read(8*fsize))).reshape(self.bin_num,self.Ns,self.Ns,self.Ns,2)
Rcorr_jk[ch] = tmpr[:,:,:,:,0] + tmpr[:,:,:,:,1]*1j
else:
print('# calc. Rcorr ', f_name(mode_list[0]))
flist = glob.glob(f'{self.decompressed_nbs_dir}/NBSwave.S{ch_index}.t{it:03d}.binned.*.decomp.dat')
if len(flist) == 0: # call binning NBS
print('# binning and decompress NBS')
nbs_bin = NBS_binning(self.channel, it, result_dir=self.result_dir,
binned_nbs_dir=self.binned_nbs_dir, decompressed_nbs_dir=self.decompressed_nbs_dir,
bin_size=self.bin_size, confMax=self.confMax)
nbs = NBSwave(Ns=self.Ns, channel=self.channel, spin=spin, it=it, decompressed_nbs_dir=self.decompressed_nbs_dir)
Rcorr_jk = {}
for ch in mode_list:
if 'ten' in ch:
Rcorr_jk[ch] = np.array([nbs.wave_wf_ten_jk[ch][ibin,:,:,:]/
self.C_N.corr_jk[ibin,it]**2 for ibin in range(self.bin_num)])
else:
Rcorr_jk[ch] = np.array([nbs.wave_wf_jk[ch][ibin,:,:,:]/
self.C_N.corr_jk[ibin,it]**2 for ibin in range(self.bin_num)])
with open(f_name(ch), 'wb') as fout:
print(f'# save {f_name(ch)}')
fout.write(bytearray(Rcorr_jk[ch].flatten()))
return Rcorr_jk
class NBSwave(object):
def __init__(self, Ns, channel, spin, it, decompressed_nbs_dir):
self.Ns = Ns
self.spin = spin
ch_index = {'nn': '0.00', 'xixi': '4.00'}[channel]
flist = glob.glob(f'{decompressed_nbs_dir}/NBSwave.S{ch_index}.t{it:03d}.binned.*.decomp.dat')
flist.sort()
waves_bin = []
for fname in flist:
print('# load ', fname)
waves_bin.append(self.load_wavefunc(fname))
waves_bin = np.array(waves_bin)
self.bin_num = waves_bin.shape[0]
wave_jk = (np.sum(waves_bin[:,:,:,:,:,:], axis=0)
- waves_bin)/float(self.bin_num - 1)
wave_ten_jk = np.array([self.mult_tensor(wave_jk[ibin,:,:,:,:,:])
for ibin in range(self.bin_num)])
self.calc_wave_func(wave_jk, wave_ten_jk)
def load_wavefunc(self, fname):
ntotal = self.Ns**3 * 2 * 2*2 * 2*2
with open(fname, 'rb') as infile:
tmpw = np.array(struct.unpack('>{:d}d'.format(ntotal), infile.read(8*ntotal)))
wave_spin_proj = self.spin_projection(tmpw)
# parity proj.
tmpw = wave_spin_proj.reshape(self.Ns,self.Ns,self.Ns,2,2)
wave_proj = 0.5 * (tmpw[:,:,:,:,:] + np.roll(tmpw[::-1,::-1,::-1,:,:],(1,1,1),(0,1,2)))
return wave_proj.reshape(self.Ns, self.Ns, self.Ns,2,2)
def spin_projection(self, wave_in):
wave_fw = wave_in.reshape(2,2,self.Ns**3,2,2,2)
wave_fw = wave_fw[:,:,:,:,:,0] + wave_fw[:,:,:,:,:,1]*1j
if self.spin == '1s0':
return 1.0/np.sqrt(2.0) * (wave_fw[1,0,:,:,:] - wave_fw[0,1,:,:,:])
elif self.spin == '3s1_+1':
return wave_fw[0,0,:,:,:]
elif self.spin == '3s1_+0':
return 1.0/np.sqrt(2.0) * (wave_fw[1,0,:,:,:] + wave_fw[0,1,:,:,:])
elif self.spin == '3s1_-1':
return wave_fw[1,1,:,:,:]
def mult_tensor(self, wave_in):
"""
! S12 = 3/r^2 (sigma1.r)(sigma2.r) - (sigma1).(sigma2)
! S12 * ( A, B ) = \sqrt(pi/5) * ( A', B' )
! ( B, C ) ( B', D' )
! S12 * ( 0, B ) = 0
! (-B, 0 )
"""
A = wave_in[:,:,:,0,0]
B = 0.5 * (wave_in[:,:,:,0,1] + wave_in[:,:,:,1,0])
C = wave_in[:,:,:,1,1]
Ylms = spherical_harmonics(self.Ns)
wave_ten = np.zeros((self.Ns,self.Ns,self.Ns,2,2), dtype=complex)
wave_ten[:,:,:,0,0] = np.sqrt(np.pi/5.0) * (
4.0*Ylms[0]*A
+ 4.0*np.sqrt(6.0)*Ylms[-1]*B
+ 4.0*np.sqrt(6.0)*Ylms[-2]*C)
wave_ten[:,:,:,0,1] = np.sqrt(np.pi/5.0) * (
- 2.0*np.sqrt(6.0)*Ylms[1]*A
- 8.0*Ylms[0]*B
- 2.0*np.sqrt(6.0)*Ylms[-1]*C)
wave_ten[:,:,:,1,0] = wave_ten[:,:,:,0,1]
wave_ten[:,:,:,1,1] = np.sqrt(np.pi/5.0) * (
4.0*np.sqrt(6.0)*Ylms[2]*A
+ 4.0*np.sqrt(6.0)*Ylms[1]*B
+ 4.0*Ylms[0]*C)
return wave_ten
def calc_wave_func(self, wave_jk, wave_ten_jk):
self.wave_wf_jk = {}
self.wave_wf_ten_jk = {}
if self.spin == '1s0':
self.wave_wf_jk[self.spin] = 1.0/np.sqrt(2.0) * np.array(
[A1_proj(wave_jk[ibin,:,:,:,1,0] - wave_jk[ibin,:,:,:,0,1])
for ibin in range(self.bin_num)])
self.wave_wf_ten_jk[self.spin] = 1.0/np.sqrt(2.0) * np.array(
[A1_proj(wave_ten_jk[ibin,:,:,:,1,0] - wave_ten_jk[ibin,:,:,:,0,1])
for ibin in range(self.bin_num)])
elif self.spin == '3s1_+1':
self.wave_wf_jk[self.spin] = np.array(
[A1_proj(wave_jk[ibin,:,:,:,0,0]) for ibin in range(self.bin_num)])
self.wave_wf_ten_jk['ten_' + self.spin] = np.array(
[A1_proj(wave_ten_jk[ibin,:,:,:,0,0]) for ibin in range(self.bin_num)])
elif self.spin == '3s1_+0':
self.wave_wf_jk[self.spin] = 1.0/np.sqrt(2.0) * np.array(
[A1_proj(wave_jk[ibin,:,:,:,0,1] + wave_jk[ibin,:,:,:,1,0])
for ibin in range(self.bin_num)])
self.wave_wf_ten_jk['ten_' + self.spin] = 1.0/np.sqrt(2.0) * np.array(
[A1_proj(wave_ten_jk[ibin,:,:,:,0,1] + wave_ten_jk[ibin,:,:,:,1,0])
for ibin in range(self.bin_num)])
elif self.spin == '3s1_-1':
self.wave_wf_jk[self.spin] = np.array(
[A1_proj(wave_jk[ibin,:,:,:,1,1]) for ibin in range(self.bin_num)])
self.wave_wf_ten_jk['ten_' + self.spin] = np.array(
[A1_proj(wave_ten_jk[ibin,:,:,:,1,1]) for ibin in range(self.bin_num)])
if '3s1' in self.spin:
jz0 = int(self.spin.split('_')[1])
label = lambda jz: '3d{:+d}_y2{:+d}'.format(jz0, jz0 + jz)
self.wave_wf_jk[label(1)] = np.array([A1_subt(wave_jk[ibin,:,:,:,1,1]) for ibin in range(self.bin_num)])
self.wave_wf_ten_jk['ten_' + label(1)] = np.array([A1_subt(wave_ten_jk[ibin,:,:,:,1,1]) for ibin in range(self.bin_num)])
self.wave_wf_jk[label(0)] = 1.0/np.sqrt(2.0) * np.array(
[A1_subt(wave_jk[ibin,:,:,:,1,0] + wave_jk[ibin,:,:,:,0,1]) for ibin in range(self.bin_num)])
self.wave_wf_ten_jk['ten_' + label(0)] = 1.0/np.sqrt(2.0) * np.array(
[A1_subt(wave_ten_jk[ibin,:,:,:,1,0] + wave_ten_jk[ibin,:,:,:,0,1]) for ibin in range(self.bin_num)])
self.wave_wf_jk[label(-1)] = np.array([A1_subt(wave_jk[ibin,:,:,:,0,0]) for ibin in range(self.bin_num)])
self.wave_wf_ten_jk['ten_' + label(-1)] = np.array([A1_subt(wave_ten_jk[ibin,:,:,:,0,0]) for ibin in range(self.bin_num)])
def spherical_harmonics(Ns):
tmp_ind = np.array([[ix, iy, iz]
for iz in np.arange(-Ns//2+1,Ns//2+1)
for iy in np.arange(-Ns//2+1,Ns//2+1)
for ix in np.arange(-Ns//2+1,Ns//2+1)]).reshape(Ns,Ns,Ns,3)
xyz = np.roll(np.roll(np.roll(tmp_ind,Ns//2+1,0),
Ns//2+1,1),Ns//2+1,2).reshape(Ns**3,3)
# x = 0, 1, ..., N/2, -N/2+1, ...
def spherical_harmonics_Dwave(m):
Ylm = np.zeros(Ns**3,dtype=complex)
rsq = xyz[:,0]**2+xyz[:,1]**2+xyz[:,2]**2
if m == -2:
Ylm[1:] = 1.0/4.0 * ( np.sqrt(7.5/np.pi)
* (xyz[1:,0]**2
- 2.0j*xyz[1:,0]*xyz[1:,1]
- xyz[1:,1]**2) / rsq[1:])
elif m == -1:
Ylm[1:] = 1.0/2.0 * ( np.sqrt(7.5/np.pi)
* (xyz[1:,0]*xyz[1:,2]
- 1.0j*xyz[1:,1]*xyz[1:,2])
/ rsq[1:])
elif m == 0:
Ylm[1:] = 1.0/4.0 * ( np.sqrt(5.0/np.pi)
* (-xyz[1:,0]**2 -xyz[1:,1]**2
+2.0*xyz[1:,2]**2) / rsq[1:])
elif m == 1:
Ylm[1:] = -1.0/2.0 * ( np.sqrt(7.5/np.pi)
* (xyz[1:,0]*xyz[1:,2]
+ 1.0j*xyz[1:,1]*xyz[1:,2])
/ rsq[1:])
elif m == 2:
Ylm[1:] = 1.0/4.0 * ( np.sqrt(7.5/np.pi)
* (xyz[1:,0]**2
+ 2.0j*xyz[1:,0]*xyz[1:,1]
- xyz[1:,1]**2) / rsq[1:])
return Ylm.reshape(Ns,Ns,Ns)
Ylms = {m: spherical_harmonics_Dwave(m)
for m in [-2,-1,0,1,2]}
return Ylms
def A1_subt(wave_in):
return wave_in - A1_proj(wave_in)
def A1_proj(wave_in):
wave_tmp0 = wave_in
wave_tmp1 = (wave_tmp0[:,:,:] + np.roll(wave_tmp0,-1,0)[::-1,:,:]
+ np.roll(wave_tmp0,-1,1)[:,::-1,:]
+ np.roll(wave_tmp0,-1,2)[:,:,::-1]
+ np.roll(np.roll(wave_tmp0,-1,0),-1,1)[::-1,::-1,:]
+ np.roll(np.roll(wave_tmp0,-1,1),-1,2)[:,::-1,::-1]
+ np.roll(np.roll(wave_tmp0,-1,2),-1,0)[::-1,:,::-1]
+ np.roll(np.roll(np.roll(wave_tmp0,-1,0),-1,1),-1,2)[::-1,::-1,::-1])/8.0
wave_tmp2 = (wave_tmp1
+ np.swapaxes(wave_tmp1,0,1)
+ np.swapaxes(wave_tmp1,1,2)
+
|
np.swapaxes(wave_tmp1,2,0)
|
numpy.swapaxes
|
# -*- coding: utf-8 -*-
u"""
.. _glow:
xrtGlow -- an interactive 3D beamline viewer
--------------------------------------------
The beamline created in xrtQook can be interactively viewed in an OpenGL based
widget xrtGlow. It visualizes beams, footprints, surfaces, apertures and
screens. The brightness represents intensity and the color represents an
auxiliary user-selected distribution, typically energy. A virtual screen can be
put at any position and dragged by mouse with simultaneous observation of the
beam distribution on it. See two example screenshots below (click to expand and
read the captions).
The primary purpose of xrtGlow is to demonstrate the alignment correctness
given the fact that xrtQook can automatically calculate several positional and
angular parameters.
See aslo :ref:`Notes on using xrtGlow <glow_notes>`.
+-------------+-------------+
| |glow1| | |glow2| |
+-------------+-------------+
.. |glow1| imagezoom:: _images/xrtGlow1.png
:alt:  A view of xrtQook with embedded xrtGlow. Visible is a virtual
screen draggable by mouse, a curved mirror surface with a footprint on
it and the color (energy) distribution on the virtual screen. The scale
along the beamline is compressed by a factor of 100.
.. |glow2| imagezoom:: _images/xrtGlow2.png
:loc: upper-right-corner
:alt:   xrtGlow with three double-paraboloid lenses. The scaling on
this image is isotropic. The source (on the left) is a parallel
geometric source. The coloring is by axial divergence (red=0), showing
the effect of refractive focusing.
"""
from __future__ import print_function
__author__ = "<NAME>, <NAME>"
import sys
import os
import numpy as np
from functools import partial
import matplotlib as mpl
# import inspect
import re
import copy
# import time
from collections import OrderedDict
from ...backends import raycing
from ...backends.raycing import sources as rsources
from ...backends.raycing import screens as rscreens
from ...backends.raycing import oes as roes
from ...backends.raycing import apertures as rapertures
from ...backends.raycing import materials as rmats
from ..commons import qt
from ..commons import gl
from ...plotter import colorFactor, colorSaturation
_DEBUG_ = False # If False, exceptions inside the module are ignored
class xrtGlow(qt.QWidget):
def __init__(self, arrayOfRays, parent=None, progressSignal=None):
super(xrtGlow, self).__init__()
self.parentRef = parent
self.cAxisLabelSize = 10
mplFont = {'size': self.cAxisLabelSize}
mpl.rc('font', **mplFont)
self.setWindowTitle('xrtGlow')
iconsDir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'_icons')
self.setWindowIcon(qt.QIcon(os.path.join(iconsDir, 'icon-GLow.ico')))
self.populateOEsList(arrayOfRays)
self.segmentsModel = self.initSegmentsModel()
self.segmentsModelRoot = self.segmentsModel.invisibleRootItem()
self.populateSegmentsModel(arrayOfRays)
self.fluxDataModel = qt.QStandardItemModel()
for colorField in raycing.allBeamFields:
self.fluxDataModel.appendRow(qt.QStandardItem(colorField))
self.customGlWidget = xrtGlWidget(self, arrayOfRays,
self.segmentsModelRoot,
self.oesList,
self.beamsToElements,
progressSignal)
self.customGlWidget.rotationUpdated.connect(self.updateRotationFromGL)
self.customGlWidget.scaleUpdated.connect(self.updateScaleFromGL)
self.customGlWidget.histogramUpdated.connect(self.updateColorMap)
self.customGlWidget.setContextMenuPolicy(qt.CustomContextMenu)
self.customGlWidget.customContextMenuRequested.connect(self.glMenu)
self.makeNavigationPanel()
self.makeTransformationPanel()
self.makeColorsPanel()
self.makeGridAndProjectionsPanel()
self.makeScenePanel()
mainLayout = qt.QHBoxLayout()
sideLayout = qt.QVBoxLayout()
tabs = qt.QTabWidget()
tabs.addTab(self.navigationPanel, "Navigation")
tabs.addTab(self.transformationPanel, "Transformations")
tabs.addTab(self.colorOpacityPanel, "Colors")
tabs.addTab(self.projectionPanel, "Grid/Projections")
tabs.addTab(self.scenePanel, "Scene")
sideLayout.addWidget(tabs)
self.canvasSplitter = qt.QSplitter()
self.canvasSplitter.setChildrenCollapsible(False)
self.canvasSplitter.setOrientation(qt.Horizontal)
mainLayout.addWidget(self.canvasSplitter)
sideWidget = qt.QWidget()
sideWidget.setLayout(sideLayout)
self.canvasSplitter.addWidget(self.customGlWidget)
self.canvasSplitter.addWidget(sideWidget)
self.setLayout(mainLayout)
self.customGlWidget.oesList = self.oesList
toggleHelp = qt.QShortcut(self)
toggleHelp.setKey(qt.Key_F1)
toggleHelp.activated.connect(self.customGlWidget.toggleHelp)
fastSave = qt.QShortcut(self)
fastSave.setKey(qt.Key_F5)
fastSave.activated.connect(partial(self.saveScene, '_xrtScnTmp_.npy'))
fastLoad = qt.QShortcut(self)
fastLoad.setKey(qt.Key_F6)
fastLoad.activated.connect(partial(self.loadScene, '_xrtScnTmp_.npy'))
startMovie = qt.QShortcut(self)
startMovie.setKey(qt.Key_F7)
startMovie.activated.connect(self.startRecordingMovie)
toggleScreen = qt.QShortcut(self)
toggleScreen.setKey(qt.Key_F3)
toggleScreen.activated.connect(self.customGlWidget.toggleVScreen)
self.dockToQook = qt.QShortcut(self)
self.dockToQook.setKey(qt.Key_F4)
self.dockToQook.activated.connect(self.toggleDock)
tiltScreen = qt.QShortcut(self)
tiltScreen.setKey(qt.CTRL + qt.Key_T)
tiltScreen.activated.connect(self.customGlWidget.switchVScreenTilt)
def makeNavigationPanel(self):
self.navigationLayout = qt.QVBoxLayout()
centerCBLabel = qt.QLabel('Center view at:')
self.centerCB = qt.QComboBox()
self.centerCB.setMaxVisibleItems(48)
for key in self.oesList.keys():
self.centerCB.addItem(str(key))
# centerCB.addItem('customXYZ')
self.centerCB.currentIndexChanged['QString'].connect(self.centerEl)
self.centerCB.setCurrentIndex(0)
layout = qt.QHBoxLayout()
layout.addWidget(centerCBLabel)
layout.addWidget(self.centerCB)
layout.addStretch()
self.navigationLayout.addLayout(layout)
self.oeTree = qt.QTreeView()
self.oeTree.setModel(self.segmentsModel)
self.oeTree.setContextMenuPolicy(qt.CustomContextMenu)
self.oeTree.customContextMenuRequested.connect(self.oeTreeMenu)
self.oeTree.resizeColumnToContents(0)
self.navigationLayout.addWidget(self.oeTree)
self.navigationPanel = qt.QWidget(self)
self.navigationPanel.setLayout(self.navigationLayout)
def makeTransformationPanel(self):
self.zoomPanel = qt.QGroupBox(self)
self.zoomPanel.setFlat(False)
self.zoomPanel.setTitle("Log scale")
zoomLayout = qt.QVBoxLayout()
fitLayout = qt.QHBoxLayout()
scaleValidator = qt.QDoubleValidator()
scaleValidator.setRange(0, 7, 7)
self.zoomSliders = []
self.zoomEditors = []
for iaxis, axis in enumerate(['x', 'y', 'z']):
axLabel = qt.QLabel(axis)
axEdit = qt.QLineEdit()
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(0, 7, 0.01)
value = 1 if iaxis == 1 else 3
axSlider.setValue(value)
axEdit.setText("{0:.2f}".format(value))
axEdit.setValidator(scaleValidator)
axEdit.editingFinished.connect(
partial(self.updateScaleFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateScale, iaxis, axEdit))
self.zoomSliders.append(axSlider)
self.zoomEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(12)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
zoomLayout.addLayout(layout)
for iaxis, axis in enumerate(['x', 'y', 'z', 'all']):
fitX = qt.QPushButton("fit {}".format(axis))
dim = [iaxis] if iaxis < 3 else [0, 1, 2]
fitX.clicked.connect(partial(self.fitScales, dim))
fitLayout.addWidget(fitX)
zoomLayout.addLayout(fitLayout)
self.zoomPanel.setLayout(zoomLayout)
self.rotationPanel = qt.QGroupBox(self)
self.rotationPanel.setFlat(False)
self.rotationPanel.setTitle("Rotation (deg)")
rotationLayout = qt.QVBoxLayout()
fixedViewsLayout = qt.QHBoxLayout()
# rotModeCB = qt.QCheckBox('Use Eulerian rotation')
# rotModeCB.setCheckState(2)
# rotModeCB.stateChanged.connect(self.checkEulerian)
# rotationLayout.addWidget(rotModeCB, 0, 0)
rotValidator = qt.QDoubleValidator()
rotValidator.setRange(-180., 180., 9)
self.rotationSliders = []
self.rotationEditors = []
for iaxis, axis in enumerate(['pitch (Rx)', 'roll (Ry)', 'yaw (Rz)']):
axLabel = qt.QLabel(axis)
axEdit = qt.QLineEdit("0.")
axEdit.setValidator(rotValidator)
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(-180, 180, 0.01)
axSlider.setValue(0)
axEdit.editingFinished.connect(
partial(self.updateRotationFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateRotation, iaxis, axEdit))
self.rotationSliders.append(axSlider)
self.rotationEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(64)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
rotationLayout.addLayout(layout)
for axis, angles in zip(['Side', 'Front', 'Top', 'Isometric'],
[[[0.], [0.], [0.]],
[[0.], [0.], [90.]],
[[0.], [90.], [0.]],
[[0.], [35.264], [-45.]]]):
setView = qt.QPushButton(axis)
setView.clicked.connect(partial(self.updateRotationFromGL, angles))
fixedViewsLayout.addWidget(setView)
rotationLayout.addLayout(fixedViewsLayout)
self.rotationPanel.setLayout(rotationLayout)
self.transformationPanel = qt.QWidget(self)
transformationLayout = qt.QVBoxLayout()
transformationLayout.addWidget(self.zoomPanel)
transformationLayout.addWidget(self.rotationPanel)
transformationLayout.addStretch()
self.transformationPanel.setLayout(transformationLayout)
def fitScales(self, dims):
for dim in dims:
dimMin = np.min(self.customGlWidget.footprintsArray[:, dim])
dimMax = np.max(self.customGlWidget.footprintsArray[:, dim])
newScale = 1.9 * self.customGlWidget.aPos[dim] /\
(dimMax - dimMin) * self.customGlWidget.maxLen
self.customGlWidget.tVec[dim] = -0.5 * (dimMin + dimMax)
self.customGlWidget.scaleVec[dim] = newScale
self.updateScaleFromGL(self.customGlWidget.scaleVec)
def makeColorsPanel(self):
self.opacityPanel = qt.QGroupBox(self)
self.opacityPanel.setFlat(False)
self.opacityPanel.setTitle("Opacity")
opacityLayout = qt.QVBoxLayout()
self.opacitySliders = []
self.opacityEditors = []
for iaxis, (axis, rstart, rend, rstep, val) in enumerate(zip(
('Line opacity', 'Line width', 'Point opacity', 'Point size'),
(0, 0, 0, 0), (1., 20., 1., 20.), (0.001, 0.01, 0.001, 0.01),
(0.2, 2., 0.25, 3.))):
axLabel = qt.QLabel(axis)
opacityValidator = qt.QDoubleValidator()
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(rstart, rend, rstep)
axSlider.setValue(val)
axEdit = qt.QLineEdit()
opacityValidator.setRange(rstart, rend, 5)
self.updateOpacity(iaxis, axEdit, val)
axEdit.setValidator(opacityValidator)
axEdit.editingFinished.connect(
partial(self.updateOpacityFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateOpacity, iaxis, axEdit))
self.opacitySliders.append(axSlider)
self.opacityEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(80)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
opacityLayout.addLayout(layout)
self.opacityPanel.setLayout(opacityLayout)
self.colorPanel = qt.QGroupBox(self)
self.colorPanel.setFlat(False)
self.colorPanel.setTitle("Color")
colorLayout = qt.QVBoxLayout()
self.mplFig = mpl.figure.Figure(dpi=self.logicalDpiX()*0.8)
self.mplFig.patch.set_alpha(0.)
self.mplFig.subplots_adjust(left=0.15, bottom=0.15, top=0.92)
self.mplAx = self.mplFig.add_subplot(111)
self.mplFig.suptitle("")
self.drawColorMap('energy')
self.paletteWidget = qt.FigCanvas(self.mplFig)
self.paletteWidget.setSizePolicy(qt.QSizePolicy.Maximum,
qt.QSizePolicy.Maximum)
self.paletteWidget.span = mpl.widgets.RectangleSelector(
self.mplAx, self.updateColorSelFromMPL, drawtype='box',
useblit=True, rectprops=dict(alpha=0.4, facecolor='white'),
button=1, interactive=True)
layout = qt.QHBoxLayout()
self.colorControls = []
colorCBLabel = qt.QLabel('Color Axis:')
colorCB = qt.QComboBox()
colorCB.setMaxVisibleItems(48)
colorCB.setModel(self.fluxDataModel)
colorCB.setCurrentIndex(colorCB.findText('energy'))
colorCB.currentIndexChanged['QString'].connect(self.changeColorAxis)
self.colorControls.append(colorCB)
layout.addWidget(colorCBLabel)
layout.addWidget(colorCB)
layout.addStretch()
colorLayout.addLayout(layout)
colorLayout.addWidget(self.paletteWidget)
layout = qt.QHBoxLayout()
for icSel, cSelText in enumerate(['Color Axis min', 'Color Axis max']):
if icSel > 0:
layout.addStretch()
selLabel = qt.QLabel(cSelText)
selValidator = qt.QDoubleValidator()
selValidator.setRange(-1.0e20 if icSel == 0 else
self.customGlWidget.colorMin,
self.customGlWidget.colorMax if icSel == 0
else 1.0e20, 5)
selQLE = qt.QLineEdit()
selQLE.setValidator(selValidator)
selQLE.setText('{0:.6g}'.format(
self.customGlWidget.colorMin if icSel == 0 else
self.customGlWidget.colorMax))
selQLE.editingFinished.connect(
partial(self.updateColorAxis, icSel))
selQLE.setMaximumWidth(80)
self.colorControls.append(selQLE)
layout.addWidget(selLabel)
layout.addWidget(selQLE)
colorLayout.addLayout(layout)
layout = qt.QHBoxLayout()
for icSel, cSelText in enumerate(['Selection min', 'Selection max']):
if icSel > 0:
layout.addStretch()
selLabel = qt.QLabel(cSelText)
selValidator = qt.QDoubleValidator()
selValidator.setRange(self.customGlWidget.colorMin,
self.customGlWidget.colorMax, 5)
selQLE = qt.QLineEdit()
selQLE.setValidator(selValidator)
selQLE.setText('{0:.6g}'.format(
self.customGlWidget.colorMin if icSel == 0 else
self.customGlWidget.colorMax))
selQLE.editingFinished.connect(
partial(self.updateColorSelFromQLE, icSel))
selQLE.setMaximumWidth(80)
self.colorControls.append(selQLE)
layout.addWidget(selLabel)
layout.addWidget(selQLE)
colorLayout.addLayout(layout)
selSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
rStep = (self.customGlWidget.colorMax -
self.customGlWidget.colorMin) / 100.
rValue = (self.customGlWidget.colorMax +
self.customGlWidget.colorMin) * 0.5
selSlider.setRange(self.customGlWidget.colorMin,
self.customGlWidget.colorMax, rStep)
selSlider.setValue(rValue)
selSlider.sliderMoved.connect(self.updateColorSel)
self.colorControls.append(selSlider)
colorLayout.addWidget(selSlider)
layout = qt.QHBoxLayout()
axLabel = qt.QLabel("Intensity cut-off")
axEdit = qt.QLineEdit("0.01")
cutValidator = qt.QDoubleValidator()
cutValidator.setRange(0, 1, 3)
axEdit.setValidator(cutValidator)
axEdit.editingFinished.connect(self.updateCutoffFromQLE)
axLabel.setMinimumWidth(144)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addStretch()
colorLayout.addLayout(layout)
layout = qt.QHBoxLayout()
explLabel = qt.QLabel("Color bump height, mm")
explEdit = qt.QLineEdit("0.0")
explValidator = qt.QDoubleValidator()
explValidator.setRange(-1000, 1000, 3)
explEdit.setValidator(explValidator)
explEdit.editingFinished.connect(self.updateExplosionDepth)
explLabel.setMinimumWidth(144)
layout.addWidget(explLabel)
explEdit.setMaximumWidth(48)
layout.addWidget(explEdit)
layout.addStretch()
colorLayout.addLayout(layout)
# axSlider = qt.glowSlider(
# self, qt.Horizontal, qt.glowTopScale)
# axSlider.setRange(0, 1, 0.001)
# axSlider.setValue(0.01)
# axSlider.valueChanged.connect(self.updateCutoff)
# colorLayout.addWidget(axSlider, 3+3, 0, 1, 2)
glNormCB = qt.QCheckBox('Global Normalization')
glNormCB.setChecked(True)
glNormCB.stateChanged.connect(self.checkGNorm)
colorLayout.addWidget(glNormCB)
self.glNormCB = glNormCB
iHSVCB = qt.QCheckBox('Intensity as HSV Value')
iHSVCB.setChecked(False)
iHSVCB.stateChanged.connect(self.checkHSV)
colorLayout.addWidget(iHSVCB)
self.iHSVCB = iHSVCB
self.colorPanel.setLayout(colorLayout)
self.colorOpacityPanel = qt.QWidget(self)
colorOpacityLayout = qt.QVBoxLayout()
colorOpacityLayout.addWidget(self.colorPanel)
colorOpacityLayout.addWidget(self.opacityPanel)
colorOpacityLayout.addStretch()
self.colorOpacityPanel.setLayout(colorOpacityLayout)
def makeGridAndProjectionsPanel(self):
self.gridPanel = qt.QGroupBox(self)
self.gridPanel.setFlat(False)
self.gridPanel.setTitle("Show coordinate grid")
self.gridPanel.setCheckable(True)
self.gridPanel.toggled.connect(self.checkDrawGrid)
scaleValidator = qt.QDoubleValidator()
scaleValidator.setRange(0, 7, 7)
xyzGridLayout = qt.QVBoxLayout()
self.gridSliders = []
self.gridEditors = []
for iaxis, axis in enumerate(['x', 'y', 'z']):
axLabel = qt.QLabel(axis)
axEdit = qt.QLineEdit("0.9")
axEdit.setValidator(scaleValidator)
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(0, 10, 0.01)
axSlider.setValue(0.9)
axEdit.editingFinished.connect(
partial(self.updateGridFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateGrid, iaxis, axEdit))
self.gridSliders.append(axSlider)
self.gridEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(20)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
xyzGridLayout.addLayout(layout)
checkBox = qt.QCheckBox('Fine grid')
checkBox.setChecked(False)
checkBox.stateChanged.connect(self.checkFineGrid)
xyzGridLayout.addWidget(checkBox)
self.checkBoxFineGrid = checkBox
self.gridControls = []
projectionLayout = qt.QVBoxLayout()
checkBox = qt.QCheckBox('Perspective')
checkBox.setChecked(True)
checkBox.stateChanged.connect(self.checkPerspect)
self.checkBoxPerspective = checkBox
projectionLayout.addWidget(self.checkBoxPerspective)
self.gridControls.append(self.checkBoxPerspective)
self.gridControls.append(self.gridPanel)
self.gridControls.append(self.checkBoxFineGrid)
self.gridPanel.setLayout(xyzGridLayout)
self.projVisPanel = qt.QGroupBox(self)
self.projVisPanel.setFlat(False)
self.projVisPanel.setTitle("Projections visibility")
projVisLayout = qt.QVBoxLayout()
self.projLinePanel = qt.QGroupBox(self)
self.projLinePanel.setFlat(False)
self.projLinePanel.setTitle("Projections opacity")
self.projectionControls = []
for iaxis, axis in enumerate(['Side (YZ)', 'Front (XZ)', 'Top (XY)']):
checkBox = qt.QCheckBox(axis)
checkBox.setChecked(False)
checkBox.stateChanged.connect(partial(self.projSelection, iaxis))
self.projectionControls.append(checkBox)
projVisLayout.addWidget(checkBox)
self.projLinePanel.setEnabled(False)
self.projVisPanel.setLayout(projVisLayout)
projLineLayout = qt.QVBoxLayout()
self.projectionOpacitySliders = []
self.projectionOpacityEditors = []
for iaxis, axis in enumerate(
['Line opacity', 'Line width', 'Point opacity', 'Point size']):
axLabel = qt.QLabel(axis)
projectionValidator = qt.QDoubleValidator()
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
if iaxis in [0, 2]:
axSlider.setRange(0, 1., 0.001)
axSlider.setValue(0.1)
axEdit = qt.QLineEdit("0.1")
projectionValidator.setRange(0, 1., 5)
else:
axSlider.setRange(0, 20, 0.01)
axSlider.setValue(1.)
axEdit = qt.QLineEdit("1")
projectionValidator.setRange(0, 20., 5)
axEdit.setValidator(projectionValidator)
axEdit.editingFinished.connect(
partial(self.updateProjectionOpacityFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateProjectionOpacity, iaxis, axEdit))
self.projectionOpacitySliders.append(axSlider)
self.projectionOpacityEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(80)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
projLineLayout.addLayout(layout)
self.projLinePanel.setLayout(projLineLayout)
self.projectionPanel = qt.QWidget(self)
projectionLayout.addWidget(self.gridPanel)
projectionLayout.addWidget(self.projVisPanel)
projectionLayout.addWidget(self.projLinePanel)
projectionLayout.addStretch()
self.projectionPanel.setLayout(projectionLayout)
def makeScenePanel(self):
sceneLayout = qt.QVBoxLayout()
self.sceneControls = []
for iCB, (cbText, cbFunc) in enumerate(zip(
['Enable antialiasing',
'Enable blending',
'Depth test for Lines',
'Depth test for Points',
'Invert scene color',
'Use scalable font',
'Show Virtual Screen label',
'Virtual Screen for Indexing',
'Show lost rays',
'Show local axes'],
[self.checkAA,
self.checkBlending,
self.checkLineDepthTest,
self.checkPointDepthTest,
self.invertSceneColor,
self.checkScalableFont,
self.checkShowLabels,
self.checkVSColor,
self.checkShowLost,
self.checkShowLocalAxes])):
aaCheckBox = qt.QCheckBox(cbText)
aaCheckBox.setChecked(iCB in [1, 2])
aaCheckBox.stateChanged.connect(cbFunc)
self.sceneControls.append(aaCheckBox)
sceneLayout.addWidget(aaCheckBox)
axLabel = qt.QLabel('Font Size')
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(1, 20, 0.5)
axSlider.setValue(5)
axSlider.valueChanged.connect(self.updateFontSize)
layout = qt.QHBoxLayout()
layout.addWidget(axLabel)
layout.addWidget(axSlider)
sceneLayout.addLayout(layout)
labelPrec = qt.QComboBox()
for order in range(5):
labelPrec.addItem("{}mm".format(10**-order))
labelPrec.setCurrentIndex(1)
labelPrec.currentIndexChanged['int'].connect(self.setLabelPrec)
aaLabel = qt.QLabel('Label Precision')
layout = qt.QHBoxLayout()
aaLabel.setMinimumWidth(100)
layout.addWidget(aaLabel)
labelPrec.setMaximumWidth(120)
layout.addWidget(labelPrec)
layout.addStretch()
sceneLayout.addLayout(layout)
oeTileValidator = qt.QIntValidator()
oeTileValidator.setRange(1, 20)
for ia, axis in enumerate(['OE tessellation X', 'OE tessellation Y']):
axLabel = qt.QLabel(axis)
axEdit = qt.QLineEdit("2")
axEdit.setValidator(oeTileValidator)
axEdit.editingFinished.connect(partial(self.updateTileFromQLE, ia))
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(100)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addStretch()
sceneLayout.addLayout(layout)
self.scenePanel = qt.QWidget(self)
sceneLayout.addStretch()
self.scenePanel.setLayout(sceneLayout)
def toggleDock(self):
if self.parentRef is not None:
self.parentRef.catchViewer()
self.parentRef = None
def initSegmentsModel(self, isNewModel=True):
newModel = qt.QStandardItemModel()
newModel.setHorizontalHeaderLabels(['Rays',
'Footprint',
'Surface',
'Label'])
if isNewModel:
headerRow = []
for i in range(4):
child = qt.QStandardItem("")
child.setEditable(False)
child.setCheckable(True)
child.setCheckState(2 if i < 2 else 0)
headerRow.append(child)
newModel.invisibleRootItem().appendRow(headerRow)
newModel.itemChanged.connect(self.updateRaysList)
return newModel
def updateOEsList(self, arrayOfRays):
self.oesList = None
self.beamsToElements = None
self.populateOEsList(arrayOfRays)
self.updateSegmentsModel(arrayOfRays)
self.oeTree.resizeColumnToContents(0)
self.centerCB.blockSignals(True)
tmpIndex = self.centerCB.currentIndex()
for i in range(self.centerCB.count()):
self.centerCB.removeItem(0)
for key in self.oesList.keys():
self.centerCB.addItem(str(key))
# self.segmentsModel.layoutChanged.emit()
try:
self.centerCB.setCurrentIndex(tmpIndex)
except: # analysis:ignore
pass
self.centerCB.blockSignals(False)
self.customGlWidget.arrayOfRays = arrayOfRays
self.customGlWidget.beamsDict = arrayOfRays[1]
self.customGlWidget.oesList = self.oesList
self.customGlWidget.beamsToElements = self.beamsToElements
# self.customGlWidget.newColorAxis = True
# self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.changeColorAxis(None)
self.customGlWidget.positionVScreen()
self.customGlWidget.glDraw()
def populateOEsList(self, arrayOfRays):
self.oesList = OrderedDict()
self.beamsToElements = OrderedDict()
oesList = arrayOfRays[2]
for segment in arrayOfRays[0]:
if segment[0] == segment[2]:
oesList[segment[0]].append(segment[1])
oesList[segment[0]].append(segment[3])
for segOE, oeRecord in oesList.items():
if len(oeRecord) > 2: # DCM
elNames = [segOE+'_Entrance', segOE+'_Exit']
else:
elNames = [segOE]
for elName in elNames:
self.oesList[elName] = [oeRecord[0]] # pointer to object
if len(oeRecord) < 3 or elName.endswith('_Entrance'):
center = list(oeRecord[0].center)
is2ndXtal = False
else:
is2ndXtal = True
# center = arrayOfRays[1][oeRecord[3]].wCenter
gb = self.oesList[elName][0].local_to_global(
rsources.Beam(nrays=2), returnBeam=True,
is2ndXtal=is2ndXtal)
center = [gb.x[0], gb.y[0], gb.z[0]]
for segment in arrayOfRays[0]:
ind = oeRecord[1]*2
if str(segment[ind]) == str(segOE):
if len(oeRecord) < 3 or\
(elName.endswith('Entrance') and
str(segment[3]) == str(oeRecord[2])) or\
(elName.endswith('Exit') and
str(segment[3]) == str(oeRecord[3])):
if len(self.oesList[elName]) < 2:
self.oesList[elName].append(
str(segment[ind+1]))
self.beamsToElements[segment[ind+1]] =\
elName
break
else:
self.oesList[elName].append(None)
self.oesList[elName].append(center)
self.oesList[elName].append(is2ndXtal)
def createRow(self, text, segMode):
newRow = []
for iCol in range(4):
newItem = qt.QStandardItem(str(text) if iCol == 0 else "")
newItem.setCheckable(True if (segMode == 3 and iCol == 0) or
(segMode == 1 and iCol > 0) else False)
if newItem.isCheckable():
newItem.setCheckState(2 if iCol < 2 else 0)
newItem.setEditable(False)
newRow.append(newItem)
return newRow
def updateSegmentsModel(self, arrayOfRays):
def copyRow(item, row):
newRow = []
for iCol in range(4):
oldItem = item.child(row, iCol)
newItem = qt.QStandardItem(str(oldItem.text()))
newItem.setCheckable(oldItem.isCheckable())
if newItem.isCheckable():
newItem.setCheckState(oldItem.checkState())
newItem.setEditable(oldItem.isEditable())
newRow.append(newItem)
return newRow
newSegmentsModel = self.initSegmentsModel(isNewModel=False)
newSegmentsModel.invisibleRootItem().appendRow(
copyRow(self.segmentsModelRoot, 0))
for element, elRecord in self.oesList.items():
for iel in range(self.segmentsModelRoot.rowCount()):
elItem = self.segmentsModelRoot.child(iel, 0)
elName = str(elItem.text())
if str(element) == elName:
elRow = copyRow(self.segmentsModelRoot, iel)
for segment in arrayOfRays[0]:
if segment[3] is not None:
endBeamText = "to {}".format(
self.beamsToElements[segment[3]])
if str(segment[1]) == str(elRecord[1]):
if elItem.hasChildren():
for ich in range(elItem.rowCount()):
if str(elItem.child(ich, 0).text()) ==\
endBeamText:
elRow[0].appendRow(
copyRow(elItem, ich))
break
else:
elRow[0].appendRow(self.createRow(
endBeamText, 3))
else:
elRow[0].appendRow(self.createRow(
endBeamText, 3))
newSegmentsModel.invisibleRootItem().appendRow(elRow)
break
else:
elRow = self.createRow(str(element), 1)
for segment in arrayOfRays[0]:
if str(segment[1]) == str(elRecord[1]) and\
segment[3] is not None:
endBeamText = "to {}".format(
self.beamsToElements[segment[3]])
elRow[0].appendRow(self.createRow(endBeamText, 3))
newSegmentsModel.invisibleRootItem().appendRow(elRow)
self.segmentsModel = newSegmentsModel
self.segmentsModelRoot = self.segmentsModel.invisibleRootItem()
self.oeTree.setModel(self.segmentsModel)
def populateSegmentsModel(self, arrayOfRays):
for element, elRecord in self.oesList.items():
newRow = self.createRow(element, 1)
for segment in arrayOfRays[0]:
cond = str(segment[1]) == str(elRecord[1]) # or\
# str(segment[0])+"_Entrance" == element
if cond:
try: # if segment[3] is not None:
endBeamText = "to {}".format(
self.beamsToElements[segment[3]])
newRow[0].appendRow(self.createRow(endBeamText, 3))
except: # analysis:ignore
continue
self.segmentsModelRoot.appendRow(newRow)
def drawColorMap(self, axis):
xv, yv = np.meshgrid(np.linspace(0, colorFactor, 200),
np.linspace(0, 1, 200))
xv = xv.flatten()
yv = yv.flatten()
self.im = self.mplAx.imshow(mpl.colors.hsv_to_rgb(np.vstack((
xv, np.ones_like(xv)*colorSaturation, yv)).T).reshape((
200, 200, 3)),
aspect='auto', origin='lower',
extent=(self.customGlWidget.colorMin,
self.customGlWidget.colorMax,
0, 1))
self.mplAx.set_xlabel(axis)
self.mplAx.set_ylabel('Intensity')
def updateColorMap(self, histArray):
if histArray[0] is not None:
size = len(histArray[0])
histImage = np.zeros((size, size, 3))
colorMin = self.customGlWidget.colorMin
colorMax = self.customGlWidget.colorMax
hMax = np.float(np.max(histArray[0]))
intensity = np.float64(np.array(histArray[0]) / hMax)
histVals = np.int32(intensity * (size-1))
for col in range(size):
histImage[0:histVals[col], col, :] = mpl.colors.hsv_to_rgb(
(colorFactor * (histArray[1][col] - colorMin) /
(colorMax - colorMin),
colorSaturation, intensity[col]))
self.im.set_data(histImage)
try:
topEl = np.where(intensity >= 0.5)[0]
hwhm = (np.abs(histArray[1][topEl[0]] -
histArray[1][topEl[-1]])) * 0.5
cntr = (histArray[1][topEl[0]] + histArray[1][topEl[-1]]) * 0.5
newLabel = u"{0:.3f}\u00b1{1:.3f}".format(cntr, hwhm)
self.mplAx.set_title(newLabel, fontsize=self.cAxisLabelSize)
except: # analysis:ignore
pass
self.mplFig.canvas.draw()
self.mplFig.canvas.blit()
self.paletteWidget.span.extents = self.paletteWidget.span.extents
else:
xv, yv = np.meshgrid(np.linspace(0, colorFactor, 200),
np.linspace(0, 1, 200))
xv = xv.flatten()
yv = yv.flatten()
self.im.set_data(mpl.colors.hsv_to_rgb(np.vstack((
xv, np.ones_like(xv)*colorSaturation, yv)).T).reshape((
200, 200, 3)))
self.mplAx.set_title("")
self.mplFig.canvas.draw()
self.mplFig.canvas.blit()
if self.paletteWidget.span.visible:
self.paletteWidget.span.extents =\
self.paletteWidget.span.extents
self.mplFig.canvas.blit()
def checkGNorm(self, state):
self.customGlWidget.globalNorm = True if state > 0 else False
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def checkHSV(self, state):
self.customGlWidget.iHSV = True if state > 0 else False
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def checkDrawGrid(self, state):
self.customGlWidget.drawGrid = True if state > 0 else False
self.customGlWidget.glDraw()
def checkFineGrid(self, state):
self.customGlWidget.fineGridEnabled = True if state > 0 else False
self.customGlWidget.glDraw()
def checkPerspect(self, state):
self.customGlWidget.perspectiveEnabled = True if state > 0 else False
self.customGlWidget.glDraw()
def checkAA(self, state):
self.customGlWidget.enableAA = True if state > 0 else False
self.customGlWidget.glDraw()
def checkBlending(self, state):
self.customGlWidget.enableBlending = True if state > 0 else False
self.customGlWidget.glDraw()
def checkLineDepthTest(self, state):
self.customGlWidget.linesDepthTest = True if state > 0 else False
self.customGlWidget.glDraw()
def checkPointDepthTest(self, state):
self.customGlWidget.pointsDepthTest = True if state > 0 else False
self.customGlWidget.glDraw()
def invertSceneColor(self, state):
self.customGlWidget.invertColors = True if state > 0 else False
self.customGlWidget.glDraw()
def checkScalableFont(self, state):
self.customGlWidget.useScalableFont = True if state > 0 else False
self.customGlWidget.glDraw()
def checkShowLabels(self, state):
self.customGlWidget.showOeLabels = True if state > 0 else False
self.customGlWidget.glDraw()
def checkVSColor(self, state):
self.customGlWidget.vScreenForColors = True if state > 0 else False
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def checkShowLost(self, state):
self.customGlWidget.showLostRays = True if state > 0 else False
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def checkShowLocalAxes(self, state):
self.customGlWidget.showLocalAxes = True if state > 0 else False
self.customGlWidget.glDraw()
def setSceneParam(self, iAction, state):
self.sceneControls[iAction].setChecked(state)
def setProjectionParam(self, iAction, state):
self.projectionControls[iAction].setChecked(state)
def setGridParam(self, iAction, state):
self.gridControls[iAction].setChecked(state)
def setLabelPrec(self, prec):
self.customGlWidget.labelCoordPrec = prec
self.customGlWidget.glDraw()
def updateColorAxis(self, icSel):
if icSel == 0:
txt = re.sub(',', '.', str(self.colorControls[1].text()))
if txt == "{0:.3f}".format(self.customGlWidget.colorMin):
return
newColorMin = float(txt)
self.customGlWidget.colorMin = newColorMin
self.colorControls[2].validator().setBottom(newColorMin)
else:
txt = re.sub(',', '.', str(self.colorControls[2].text()))
if txt == "{0:.3f}".format(self.customGlWidget.colorMax):
return
newColorMax = float(txt)
self.customGlWidget.colorMax = newColorMax
self.colorControls[1].validator().setTop(newColorMax)
self.changeColorAxis(None, newLimits=True)
def changeColorAxis(self, selAxis, newLimits=False):
if selAxis is None:
selAxis = self.colorControls[0].currentText()
self.customGlWidget.newColorAxis = False if\
self.customGlWidget.selColorMin is not None else True
else:
self.customGlWidget.getColor = getattr(
raycing, 'get_{}'.format(selAxis))
self.customGlWidget.newColorAxis = True
oldColorMin = self.customGlWidget.colorMin
oldColorMax = self.customGlWidget.colorMax
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.mplAx.set_xlabel(selAxis)
if oldColorMin == self.customGlWidget.colorMin and\
oldColorMax == self.customGlWidget.colorMax and not newLimits:
return
self.customGlWidget.selColorMin = self.customGlWidget.colorMin
self.customGlWidget.selColorMax = self.customGlWidget.colorMax
extents = (self.customGlWidget.colorMin,
self.customGlWidget.colorMax, 0, 1)
self.im.set_extent(extents)
self.mplFig.gca().ticklabel_format(useOffset=True)
# self.mplFig.gca().autoscale_view()
extents = list(extents)
self.colorControls[1].setText(
'{0:.3f}'.format(self.customGlWidget.colorMin))
self.colorControls[2].setText(
'{0:.3f}'.format(self.customGlWidget.colorMax))
self.colorControls[3].setText(
'{0:.3f}'.format(self.customGlWidget.colorMin))
self.colorControls[4].setText(
'{0:.3f}'.format(self.customGlWidget.colorMax))
self.colorControls[3].validator().setRange(
self.customGlWidget.colorMin, self.customGlWidget.colorMax, 5)
self.colorControls[4].validator().setRange(
self.customGlWidget.colorMin, self.customGlWidget.colorMax, 5)
slider = self.colorControls[5]
center = 0.5 * (extents[0] + extents[1])
newMin = self.customGlWidget.colorMin
newMax = self.customGlWidget.colorMax
newRange = (newMax - newMin) * 0.01
slider.setRange(newMin, newMax, newRange)
slider.setValue(center)
self.mplFig.canvas.draw()
self.paletteWidget.span.active_handle = None
self.paletteWidget.span.to_draw.set_visible(False)
self.customGlWidget.glDraw()
def updateColorSelFromMPL(self, eclick, erelease):
try:
extents = list(self.paletteWidget.span.extents)
self.customGlWidget.selColorMin = np.min([extents[0], extents[1]])
self.customGlWidget.selColorMax = np.max([extents[0], extents[1]])
self.colorControls[3].setText(
"{0:.3f}".format(self.customGlWidget.selColorMin))
self.colorControls[4].setText(
"{0:.3f}".format(self.customGlWidget.selColorMax))
self.colorControls[3].validator().setTop(
self.customGlWidget.selColorMax)
self.colorControls[4].validator().setBottom(
self.customGlWidget.selColorMin)
slider = self.colorControls[5]
center = 0.5 * (extents[0] + extents[1])
halfWidth = (extents[1] - extents[0]) * 0.5
newMin = self.customGlWidget.colorMin + halfWidth
newMax = self.customGlWidget.colorMax - halfWidth
newRange = (newMax - newMin) * 0.01
slider.setRange(newMin, newMax, newRange)
slider.setValue(center)
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
except: # analysis:ignore
pass
def updateColorSel(self, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
try:
extents = list(self.paletteWidget.span.extents)
width = np.abs(extents[1] - extents[0])
self.customGlWidget.selColorMin = position - 0.5*width
self.customGlWidget.selColorMax = position + 0.5*width
self.colorControls[3].setText('{0:.3f}'.format(position-0.5*width))
self.colorControls[4].setText('{0:.3f}'.format(position+0.5*width))
self.colorControls[3].validator().setTop(position + 0.5*width)
self.colorControls[4].validator().setBottom(position - 0.5*width)
newExtents = (position - 0.5*width, position + 0.5*width,
extents[2], extents[3])
self.paletteWidget.span.extents = newExtents
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
except: # analysis:ignore
pass
def updateColorSelFromQLE(self, icSel):
try:
editor = self.sender()
txt = str(editor.text())
value = float(txt)
extents = list(self.paletteWidget.span.extents)
if icSel == 0:
if txt == "{0:.3f}".format(self.customGlWidget.selColorMin):
return
if value < self.customGlWidget.colorMin:
self.im.set_extent(
[value, self.customGlWidget.colorMax, 0, 1])
self.customGlWidget.colorMin = value
self.customGlWidget.selColorMin = value
newExtents = (value, extents[1], extents[2], extents[3])
# self.colorControls[2].validator().setBottom(value)
else:
if txt == "{0:.3f}".format(self.customGlWidget.selColorMax):
return
if value > self.customGlWidget.colorMax:
self.im.set_extent(
[self.customGlWidget.colorMin, value, 0, 1])
self.customGlWidget.colorMax = value
self.customGlWidget.selColorMax = value
newExtents = (extents[0], value, extents[2], extents[3])
# self.colorControls[1].validator().setTop(value)
center = 0.5 * (newExtents[0] + newExtents[1])
halfWidth = (newExtents[1] - newExtents[0]) * 0.5
newMin = self.customGlWidget.colorMin + halfWidth
newMax = self.customGlWidget.colorMax - halfWidth
newRange = (newMax - newMin) * 0.01
slider = self.colorControls[5]
slider.setRange(newMin, newMax, newRange)
slider.setValue(center)
self.paletteWidget.span.extents = newExtents
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
self.mplFig.canvas.draw()
except: # analysis:ignore
pass
def projSelection(self, ind, state):
self.customGlWidget.projectionsVisibility[ind] = state
self.customGlWidget.glDraw()
anyOf = False
for proj in self.projectionControls:
anyOf = anyOf or proj.isChecked()
if anyOf:
break
self.projLinePanel.setEnabled(anyOf)
def updateRotation(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
editor.setText("{0:.2f}".format(position))
self.customGlWidget.rotations[iax][0] = np.float32(position)
self.customGlWidget.updateQuats()
self.customGlWidget.glDraw()
def updateRotationFromGL(self, actPos):
for iaxis, (slider, editor) in\
enumerate(zip(self.rotationSliders, self.rotationEditors)):
value = actPos[iaxis][0]
slider.setValue(value)
editor.setText("{0:.2f}".format(value))
def updateRotationFromQLE(self, slider):
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
slider.setValue(value)
def updateScale(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
editor.setText("{0:.2f}".format(position))
self.customGlWidget.scaleVec[iax] = np.float32(np.power(10, position))
self.customGlWidget.glDraw()
def updateScaleFromGL(self, scale):
if isinstance(scale, (int, float)):
scale = [scale, scale, scale]
for iaxis, (slider, editor) in \
enumerate(zip(self.zoomSliders, self.zoomEditors)):
value = np.log10(scale[iaxis])
slider.setValue(value)
editor.setText("{0:.2f}".format(value))
def updateScaleFromQLE(self, slider):
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
slider.setValue(value)
def updateFontSize(self, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
self.customGlWidget.fontSize = position
self.customGlWidget.glDraw()
def updateRaysList(self, item):
if item.parent() is None:
if item.row() == 0:
if item.checkState != 1:
model = item.model()
column = item.column()
model.blockSignals(True)
parent = self.segmentsModelRoot
try:
for iChild in range(parent.rowCount()):
if iChild > 0:
cItem = parent.child(iChild, column)
if cItem.isCheckable():
cItem.setCheckState(
item.checkState())
if cItem.hasChildren():
for iGChild in range(cItem.rowCount()):
gcItem = cItem.child(iGChild, 0)
if gcItem.isCheckable():
gcItem.setCheckState(
item.checkState())
finally:
model.blockSignals(False)
model.layoutChanged.emit()
else:
parent = self.segmentsModelRoot
model = item.model()
for iChild in range(parent.rowCount()):
outState = item.checkState()
if iChild > 0:
cItem = parent.child(iChild, item.column())
if item.column() > 0:
if cItem.checkState() != item.checkState():
outState = 1
break
model.blockSignals(True)
parent.child(0, item.column()).setCheckState(outState)
model.blockSignals(False)
model.layoutChanged.emit()
else:
parent = self.segmentsModelRoot
model = item.model()
for iChild in range(parent.rowCount()):
outState = item.checkState()
if iChild > 0:
cItem = parent.child(iChild, item.column())
if cItem.hasChildren():
for iGChild in range(cItem.rowCount()):
gcItem = cItem.child(iGChild, 0)
if gcItem.isCheckable():
if gcItem.checkState() !=\
item.checkState():
outState = 1
break
if outState == 1:
break
model.blockSignals(True)
parent.child(0, item.column()).setCheckState(outState)
model.blockSignals(False)
model.layoutChanged.emit()
if item.column() == 3:
self.customGlWidget.labelsToPlot = []
for ioe in range(self.segmentsModelRoot.rowCount() - 1):
if self.segmentsModelRoot.child(ioe + 1, 3).checkState() == 2:
self.customGlWidget.labelsToPlot.append(str(
self.segmentsModelRoot.child(ioe + 1, 0).text()))
else:
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def oeTreeMenu(self, position):
indexes = self.oeTree.selectedIndexes()
level = 100
if len(indexes) > 0:
level = 0
index = indexes[0]
selectedItem = self.segmentsModel.itemFromIndex(index)
while index.parent().isValid():
index = index.parent()
level += 1
if level == 0:
menu = qt.QMenu()
menu.addAction('Center here',
partial(self.centerEl, str(selectedItem.text())))
menu.exec_(self.oeTree.viewport().mapToGlobal(position))
else:
pass
def updateGrid(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
editor.setText("{0:.2f}".format(position))
if position != 0:
self.customGlWidget.aPos[iax] = np.float32(position)
self.customGlWidget.glDraw()
def updateGridFromQLE(self, slider):
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
slider.setValue(value)
def updateGridFromGL(self, aPos):
for iaxis, (slider, editor) in\
enumerate(zip(self.gridSliders, self.gridEditors)):
value = aPos[iaxis]
slider.setValue(value)
editor.setText("{0:.2f}".format(value))
def glMenu(self, position):
menu = qt.QMenu()
subMenuF = menu.addMenu('File')
for actText, actFunc in zip(['Export to image', 'Save scene geometry',
'Load scene geometry'],
[self.exportToImage, self.saveSceneDialog,
self.loadSceneDialog]):
mAction = qt.QAction(self)
mAction.setText(actText)
mAction.triggered.connect(actFunc)
subMenuF.addAction(mAction)
menu.addSeparator()
mAction = qt.QAction(self)
mAction.setText("Show Virtual Screen")
mAction.setCheckable(True)
mAction.setChecked(False if self.customGlWidget.virtScreen is None
else True)
mAction.triggered.connect(self.customGlWidget.toggleVScreen)
menu.addAction(mAction)
for iAction, actCnt in enumerate(self.sceneControls):
if 'Virtual Screen' not in actCnt.text():
continue
mAction = qt.QAction(self)
mAction.setText(actCnt.text())
mAction.setCheckable(True)
mAction.setChecked(bool(actCnt.checkState()))
mAction.triggered.connect(partial(self.setSceneParam, iAction))
menu.addAction(mAction)
menu.addSeparator()
for iAction, actCnt in enumerate(self.gridControls):
mAction = qt.QAction(self)
if actCnt.staticMetaObject.className() == 'QCheckBox':
actText = actCnt.text()
actCheck = bool(actCnt.checkState())
else:
actText = actCnt.title()
actCheck = actCnt.isChecked()
mAction.setText(actText)
mAction.setCheckable(True)
mAction.setChecked(actCheck)
mAction.triggered.connect(
partial(self.setGridParam, iAction))
if iAction == 0: # perspective
menu.addAction(mAction)
elif iAction == 1: # show grid
subMenuG = menu.addMenu('Coordinate grid')
subMenuG.addAction(mAction)
elif iAction == 2: # fine grid
subMenuG.addAction(mAction)
menu.addSeparator()
subMenuP = menu.addMenu('Projections')
for iAction, actCnt in enumerate(self.projectionControls):
mAction = qt.QAction(self)
mAction.setText(actCnt.text())
mAction.setCheckable(True)
mAction.setChecked(bool(actCnt.checkState()))
mAction.triggered.connect(
partial(self.setProjectionParam, iAction))
subMenuP.addAction(mAction)
menu.addSeparator()
subMenuS = menu.addMenu('Scene')
for iAction, actCnt in enumerate(self.sceneControls):
if 'Virtual Screen' in actCnt.text():
continue
mAction = qt.QAction(self)
mAction.setText(actCnt.text())
mAction.setCheckable(True)
mAction.setChecked(bool(actCnt.checkState()))
mAction.triggered.connect(partial(self.setSceneParam, iAction))
subMenuS.addAction(mAction)
menu.addSeparator()
menu.exec_(self.customGlWidget.mapToGlobal(position))
def exportToImage(self):
saveDialog = qt.QFileDialog()
saveDialog.setFileMode(qt.QFileDialog.AnyFile)
saveDialog.setAcceptMode(qt.QFileDialog.AcceptSave)
saveDialog.setNameFilter("BMP files (*.bmp);;JPG files (*.jpg);;JPEG files (*.jpeg);;PNG files (*.png);;TIFF files (*.tif)") # analysis:ignore
saveDialog.selectNameFilter("JPG files (*.jpg)")
if (saveDialog.exec_()):
image = self.customGlWidget.grabFrameBuffer(withAlpha=True)
filename = saveDialog.selectedFiles()[0]
extension = str(saveDialog.selectedNameFilter())[-5:-1].strip('.')
if not filename.endswith(extension):
filename = "{0}.{1}".format(filename, extension)
image.save(filename)
def saveSceneDialog(self):
saveDialog = qt.QFileDialog()
saveDialog.setFileMode(qt.QFileDialog.AnyFile)
saveDialog.setAcceptMode(qt.QFileDialog.AcceptSave)
saveDialog.setNameFilter("Numpy files (*.npy)") # analysis:ignore
if (saveDialog.exec_()):
filename = saveDialog.selectedFiles()[0]
extension = 'npy'
if not filename.endswith(extension):
filename = "{0}.{1}".format(filename, extension)
self.saveScene(filename)
def loadSceneDialog(self):
loadDialog = qt.QFileDialog()
loadDialog.setFileMode(qt.QFileDialog.AnyFile)
loadDialog.setAcceptMode(qt.QFileDialog.AcceptOpen)
loadDialog.setNameFilter("Numpy files (*.npy)") # analysis:ignore
if (loadDialog.exec_()):
filename = loadDialog.selectedFiles()[0]
extension = 'npy'
if not filename.endswith(extension):
filename = "{0}.{1}".format(filename, extension)
self.loadScene(filename)
def saveScene(self, filename):
params = dict()
for param in ['aspect', 'cameraAngle', 'projectionsVisibility',
'lineOpacity', 'lineWidth', 'pointOpacity', 'pointSize',
'lineProjectionOpacity', 'lineProjectionWidth',
'pointProjectionOpacity', 'pointProjectionSize',
'coordOffset', 'cutoffI', 'drawGrid', 'aPos', 'scaleVec',
'tVec', 'cameraPos', 'rotations',
'visibleAxes', 'signs', 'selColorMin', 'selColorMax',
'colorMin', 'colorMax', 'fineGridEnabled',
'useScalableFont', 'invertColors', 'perspectiveEnabled',
'globalNorm', 'viewPortGL', 'iHSV']:
params[param] = getattr(self.customGlWidget, param)
params['size'] = self.geometry()
params['sizeGL'] = self.canvasSplitter.sizes()
params['colorAxis'] = str(self.colorControls[0].currentText())
try:
np.save(filename, params)
except: # analysis:ignore
print('Error saving file')
return
print('Saved scene to {}'.format(filename))
def loadScene(self, filename):
try:
params = np.load(filename).item()
except: # analysis:ignore
print('Error loading file')
return
for param in ['aspect', 'cameraAngle', 'projectionsVisibility',
'lineOpacity', 'lineWidth', 'pointOpacity', 'pointSize',
'lineProjectionOpacity', 'lineProjectionWidth',
'pointProjectionOpacity', 'pointProjectionSize',
'coordOffset', 'cutoffI', 'drawGrid', 'aPos', 'scaleVec',
'tVec', 'cameraPos', 'rotations',
'visibleAxes', 'signs', 'selColorMin', 'selColorMax',
'colorMin', 'colorMax', 'fineGridEnabled',
'useScalableFont', 'invertColors', 'perspectiveEnabled',
'globalNorm', 'viewPortGL', 'iHSV']:
setattr(self.customGlWidget, param, params[param])
self.setGeometry(params['size'])
self.canvasSplitter.setSizes(params['sizeGL'])
self.updateScaleFromGL(self.customGlWidget.scaleVec)
self.blockSignals(True)
self.updateRotationFromGL(self.customGlWidget.rotations)
self.updateOpacityFromGL([self.customGlWidget.lineOpacity,
self.customGlWidget.lineWidth,
self.customGlWidget.pointOpacity,
self.customGlWidget.pointSize])
for iax, checkBox in enumerate(self.projectionControls):
checkBox.setChecked(self.customGlWidget.projectionsVisibility[iax])
self.gridPanel.setChecked(self.customGlWidget.drawGrid)
self.checkBoxFineGrid.setChecked(self.customGlWidget.fineGridEnabled)
self.checkBoxPerspective.setChecked(
self.customGlWidget.perspectiveEnabled)
self.updateProjectionOpacityFromGL(
[self.customGlWidget.lineProjectionOpacity,
self.customGlWidget.lineProjectionWidth,
self.customGlWidget.pointProjectionOpacity,
self.customGlWidget.pointProjectionSize])
self.updateGridFromGL(self.customGlWidget.aPos)
self.sceneControls[4].setChecked(self.customGlWidget.invertColors)
self.sceneControls[5].setChecked(self.customGlWidget.useScalableFont)
self.sceneControls[4].setChecked(self.customGlWidget.invertColors)
self.glNormCB.setChecked(self.customGlWidget.globalNorm)
self.iHSVCB.setChecked(self.customGlWidget.iHSV)
self.blockSignals(False)
self.mplFig.canvas.draw()
colorCB = self.colorControls[0]
colorCB.setCurrentIndex(colorCB.findText(params['colorAxis']))
newExtents = list(self.paletteWidget.span.extents)
newExtents[0] = params['selColorMin']
newExtents[1] = params['selColorMax']
try:
self.paletteWidget.span.extents = newExtents
except: # analysis:ignore
pass
self.updateColorSelFromMPL(0, 0)
print('Loaded scene from {}'.format(filename))
def startRecordingMovie(self): # by F7
if self.generator is None:
return
startFrom = self.startFrom if hasattr(self, 'startFrom') else 0
for it in self.generator(*self.generatorArgs):
self.bl.propagate_flow(startFrom=startFrom)
rayPath = self.bl.export_to_glow()
self.updateOEsList(rayPath)
self.customGlWidget.glDraw()
if self.isHidden():
self.show()
image = self.customGlWidget.grabFrameBuffer(withAlpha=True)
try:
image.save(self.bl.glowFrameName)
cNameSp = os.path.splitext(self.bl.glowFrameName)
cName = cNameSp[0] + "_color" + cNameSp[1]
self.mplFig.savefig(cName)
except AttributeError:
print('no glowFrameName was given!')
print("Finished with the movie.")
def centerEl(self, oeName):
self.customGlWidget.coordOffset = list(self.oesList[str(oeName)][2])
self.customGlWidget.tVec = np.float32([0, 0, 0])
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def updateCutoffFromQLE(self):
try:
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
extents = list(self.paletteWidget.span.extents)
self.customGlWidget.cutoffI = np.float32(value)
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
newExtents = (extents[0], extents[1],
self.customGlWidget.cutoffI, extents[3])
self.paletteWidget.span.extents = newExtents
self.customGlWidget.glDraw()
except: # analysis:ignore
pass
def updateExplosionDepth(self):
try:
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
self.customGlWidget.depthScaler = np.float32(value)
if self.customGlWidget.virtScreen is not None:
self.customGlWidget.populateVScreen()
self.customGlWidget.glDraw()
except: # analysis:ignore
pass
def updateOpacity(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
if _DEBUG_:
raise
else:
pass
editor.setText("{0:.2f}".format(position))
if iax == 0:
self.customGlWidget.lineOpacity = np.float32(position)
elif iax == 1:
self.customGlWidget.lineWidth = np.float32(position)
elif iax == 2:
self.customGlWidget.pointOpacity = np.float32(position)
elif iax == 3:
self.customGlWidget.pointSize = np.float32(position)
self.customGlWidget.glDraw()
def updateOpacityFromQLE(self, slider):
editor = self.sender()
value = float(str(editor.text()))
slider.setValue(value)
self.customGlWidget.glDraw()
def updateOpacityFromGL(self, ops):
for iaxis, (slider, editor, op) in\
enumerate(zip(self.opacitySliders, self.opacityEditors, ops)):
slider.setValue(op)
editor.setText("{0:.2f}".format(op))
def updateTileFromQLE(self, ia):
editor = self.sender()
value = float(str(editor.text()))
self.customGlWidget.tiles[ia] = np.int(value)
self.customGlWidget.glDraw()
def updateProjectionOpacity(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
if _DEBUG_:
raise
else:
pass
editor.setText("{0:.2f}".format(position))
if iax == 0:
self.customGlWidget.lineProjectionOpacity = np.float32(position)
elif iax == 1:
self.customGlWidget.lineProjectionWidth = np.float32(position)
elif iax == 2:
self.customGlWidget.pointProjectionOpacity = np.float32(position)
elif iax == 3:
self.customGlWidget.pointProjectionSize = np.float32(position)
self.customGlWidget.glDraw()
def updateProjectionOpacityFromQLE(self, slider):
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
slider.setValue(value)
self.customGlWidget.glDraw()
def updateProjectionOpacityFromGL(self, ops):
for iaxis, (slider, editor, op) in\
enumerate(zip(self.projectionOpacitySliders,
self.projectionOpacityEditors, ops)):
slider.setValue(op)
editor.setText("{0:.2f}".format(op))
class xrtGlWidget(qt.QGLWidget):
rotationUpdated = qt.pyqtSignal(np.ndarray)
scaleUpdated = qt.pyqtSignal(np.ndarray)
histogramUpdated = qt.pyqtSignal(tuple)
def __init__(self, parent, arrayOfRays, modelRoot, oesList, b2els, signal):
qt.QGLWidget.__init__(self, parent)
self.QookSignal = signal
self.virtScreen = None
self.virtBeam = None
self.virtDotsArray = None
self.virtDotsColor = None
self.vScreenForColors = False
self.globalColorIndex = None
self.isVirtScreenNormal = False
self.segmentModel = modelRoot
self.vScreenSize = 0.5
self.setMinimumSize(400, 400)
self.aspect = 1.
self.depthScaler = 0.
self.viewPortGL = [0, 0, 500, 500]
self.perspectiveEnabled = True
self.cameraAngle = 60
self.setMouseTracking(True)
self.surfCPOrder = 4
self.oesToPlot = []
self.labelsToPlot = []
self.tiles = [2, 2]
self.arrayOfRays = arrayOfRays
self.beamsDict = arrayOfRays[1]
self.oesList = oesList
self.oeContour = dict()
self.slitEdges = dict()
self.beamsToElements = b2els
self.slitThickness = 2. # mm
self.contourWidth = 2
self.projectionsVisibility = [0, 0, 0]
self.lineOpacity = 0.1
self.lineWidth = 1
self.pointOpacity = 0.1
self.pointSize = 1
self.linesDepthTest = True
self.pointsDepthTest = False
self.labelCoordPrec = 1
self.lineProjectionOpacity = 0.1
self.lineProjectionWidth = 1
self.pointProjectionOpacity = 0.1
self.pointProjectionSize = 1
self.coordOffset = [0., 0., 0.]
self.enableAA = False
self.enableBlending = True
self.cutoffI = 0.01
self.getColor = raycing.get_energy
self.globalNorm = True
self.iHSV = False
self.newColorAxis = True
self.colorMin = -1e20
self.colorMax = 1e20
self.selColorMin = None
self.selColorMax = None
self.scaleVec = np.array([1e3, 1e1, 1e3])
self.maxLen = 1.
self.showLostRays = False
self.showLocalAxes = False
self.populateVerticesArray(modelRoot)
self.drawGrid = True
self.fineGridEnabled = False
self.showOeLabels = False
self.aPos = [0.9, 0.9, 0.9]
self.prevMPos = [0, 0]
self.prevWC = np.float32([0, 0, 0])
self.coordinateGridLineWidth = 1
# self.fixedFontType = 'GLUT_BITMAP_TIMES_ROMAN'
self.fixedFontType = 'GLUT_BITMAP_HELVETICA'
self.fixedFontSize = '12' # 10, 12, 18 for Helvetica; 10, 24 for Roman
self.fixedFont = getattr(gl, "{0}_{1}".format(self.fixedFontType,
self.fixedFontSize))
self.useScalableFont = False
self.fontSize = 5
self.scalableFontType = gl.GLUT_STROKE_ROMAN
# self.scalableFontType = gl.GLUT_STROKE_MONO_ROMAN
self.scalableFontWidth = 1
self.useFontAA = False
self.tVec = np.array([0., 0., 0.])
self.cameraTarget = [0., 0., 0.]
self.cameraPos = np.float32([3.5, 0., 0.])
self.isEulerian = False
self.rotations = np.float32([[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
self.textOrientation = [0.5, 0.5, 0.5, 0.5]
self.updateQuats()
pModelT = np.identity(4)
self.visibleAxes = np.argmax(np.abs(pModelT), axis=1)
self.signs = np.ones_like(pModelT)
self.invertColors = False
self.showHelp = False
# self.glDraw()
def eulerToQ(self, rotMatrXYZ):
hPitch = np.radians(rotMatrXYZ[0][0]) * 0.5
hRoll = np.radians(rotMatrXYZ[1][0]) * 0.5
hYaw = np.radians(rotMatrXYZ[2][0]) * 0.5
cosPitch = np.cos(hPitch)
sinPitch = np.sin(hPitch)
cosRoll = np.cos(hRoll)
sinRoll = np.sin(hRoll)
cosYaw = np.cos(hYaw)
sinYaw = np.sin(hYaw)
return [cosPitch*cosRoll*cosYaw - sinPitch*sinRoll*sinYaw,
sinRoll*sinYaw*cosPitch + sinPitch*cosRoll*cosYaw,
sinRoll*cosPitch*cosYaw - sinPitch*sinYaw*cosRoll,
sinYaw*cosPitch*cosRoll + sinPitch*sinRoll*cosYaw]
def qToVec(self, quat):
angle = 2 * np.arccos(quat[0])
q2v = np.sin(angle * 0.5)
qbt1 = quat[1] / q2v if q2v != 0 else 0
qbt2 = quat[2] / q2v if q2v != 0 else 0
qbt3 = quat[3] / q2v if q2v != 0 else 0
return [np.degrees(angle), qbt1, qbt2, qbt3]
def rotateZYX(self):
if self.isEulerian:
gl.glRotatef(*self.rotations[0])
gl.glRotatef(*self.rotations[1])
gl.glRotatef(*self.rotations[2])
else:
gl.glRotatef(*self.rotationVec)
def updateQuats(self):
self.qRot = self.eulerToQ(self.rotations)
self.rotationVec = self.qToVec(self.qRot)
self.qText = self.qToVec(
self.quatMult([self.qRot[0], -self.qRot[1],
-self.qRot[2], -self.qRot[3]],
self.textOrientation))
def vecToQ(self, vec, alpha):
""" Quaternion from vector and angle"""
return np.insert(vec*np.sin(alpha*0.5), 0, np.cos(alpha*0.5))
def rotateVecQ(self, vec, q):
qn = np.copy(q)
qn[1:] *= -1
return self.quatMult(self.quatMult(
q, self.vecToQ(vec, np.pi*0.25)), qn)[1:]
def setPointSize(self, pSize):
self.pointSize = pSize
self.glDraw()
def setLineWidth(self, lWidth):
self.lineWidth = lWidth
self.glDraw()
def populateVerticesOnly(self, segmentsModelRoot):
if segmentsModelRoot is None:
return
self.segmentModel = segmentsModelRoot
# signal = self.QookSignal
self.verticesArray = None
self.footprintsArray = None
self.oesToPlot = []
self.labelsToPlot = []
self.footprints = dict()
colorsRays = None
alphaRays = None
colorsDots = None
alphaDots = None
globalColorsDots = None
globalColorsRays = None
verticesArrayLost = None
colorsRaysLost = None
footprintsArrayLost = None
colorsDotsLost = None
maxLen = 1.
tmpMax = -1.0e12 * np.ones(3)
tmpMin = -1. * tmpMax
if self.newColorAxis:
newColorMax = -1e20
newColorMin = 1e20
# self.selColorMax = newColorMax
# self.selColorMin = newColorMin
else:
newColorMax = self.colorMax
newColorMin = self.colorMin
# totalOEs = range(segmentsModelRoot.rowCount() - 2)
for ioe in range(segmentsModelRoot.rowCount() - 1):
ioeItem = segmentsModelRoot.child(ioe + 1, 0)
# try:
# if signal is not None:
# signalStr = "Plotting beams for {}, %p% done.".format(
# str(ioeItem.text()))
# signal.emit((float(ioe) / float(totalOEs),
# signalStr))
# except:
# pass
if segmentsModelRoot.child(ioe + 1, 2).checkState() == 2:
self.oesToPlot.append(str(ioeItem.text()))
self.footprints[str(ioeItem.text())] = None
if segmentsModelRoot.child(ioe + 1, 3).checkState() == 2:
self.labelsToPlot.append(str(ioeItem.text()))
try:
startBeam = self.beamsDict[
self.oesList[str(ioeItem.text())][1]]
# lostNum = self.oesList[str(ioeItem.text())][0].lostNum
# good = startBeam.state > 0
good = (startBeam.state == 1) | (startBeam.state == 2)
if len(startBeam.state[good]) > 0:
for tmpCoord, tAxis in enumerate(['x', 'y', 'z']):
axMin = np.min(getattr(startBeam, tAxis)[good])
axMax = np.max(getattr(startBeam, tAxis)[good])
if axMin < tmpMin[tmpCoord]:
tmpMin[tmpCoord] = axMin
if axMax > tmpMax[tmpCoord]:
tmpMax[tmpCoord] = axMax
newColorMax = max(np.max(
self.getColor(startBeam)[good]),
newColorMax)
newColorMin = min(np.min(
self.getColor(startBeam)[good]),
newColorMin)
except: # analysis:ignore
if _DEBUG_:
raise
else:
continue
if self.newColorAxis:
if newColorMin != self.colorMin:
self.colorMin = newColorMin
self.selColorMin = self.colorMin
if newColorMax != self.colorMax:
self.colorMax = newColorMax
self.selColorMax = self.colorMax
if ioeItem.hasChildren():
for isegment in range(ioeItem.rowCount()):
segmentItem0 = ioeItem.child(isegment, 0)
if segmentItem0.checkState() == 2:
endBeam = self.beamsDict[
self.oesList[str(segmentItem0.text())[3:]][1]]
# good = startBeam.state > 0
good = (startBeam.state == 1) | (startBeam.state == 2)
if len(startBeam.state[good]) == 0:
continue
intensity = startBeam.Jss + startBeam.Jpp
intensityAll = intensity / np.max(intensity[good])
good = np.logical_and(good,
intensityAll >= self.cutoffI)
goodC = np.logical_and(
self.getColor(startBeam) <= self.selColorMax,
self.getColor(startBeam) >= self.selColorMin)
good = np.logical_and(good, goodC)
if self.vScreenForColors and\
self.globalColorIndex is not None:
good = np.logical_and(good, self.globalColorIndex)
globalColorsRays = np.repeat(
self.globalColorArray[good], 2, axis=0) if\
globalColorsRays is None else np.concatenate(
(globalColorsRays,
np.repeat(self.globalColorArray[good], 2,
axis=0)))
else:
if self.globalNorm:
alphaMax = 1.
else:
if len(intensity[good]) > 0:
alphaMax = np.max(intensity[good])
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
alphaRays = np.repeat(intensity[good] / alphaMax,
2).T\
if alphaRays is None else np.concatenate(
(alphaRays.T,
np.repeat(intensity[good] / alphaMax,
2).T))
colorsRays = np.repeat(np.array(self.getColor(
startBeam)[good]), 2).T if\
colorsRays is None else np.concatenate(
(colorsRays.T,
np.repeat(np.array(self.getColor(
startBeam)[good]), 2).T))
vertices = np.array(
[startBeam.x[good] - self.coordOffset[0],
endBeam.x[good] - self.coordOffset[0]]).flatten(
'F')
vertices = np.vstack((vertices, np.array(
[startBeam.y[good] - self.coordOffset[1],
endBeam.y[good] - self.coordOffset[1]]).flatten(
'F')))
vertices = np.vstack((vertices, np.array(
[startBeam.z[good] - self.coordOffset[2],
endBeam.z[good] - self.coordOffset[2]]).flatten(
'F')))
self.verticesArray = vertices.T if\
self.verticesArray is None else\
np.vstack((self.verticesArray, vertices.T))
if self.showLostRays:
try:
lostNum = self.oesList[str(
segmentItem0.text())[3:]][0].lostNum
except: # analysis:ignore
lostNum = 1e3
lost = startBeam.state == lostNum
try:
lostOnes = len(startBeam.x[lost]) * 2
except: # analysis:ignore
lostOnes = 0
colorsRaysLost = lostOnes if colorsRaysLost is\
None else colorsRaysLost + lostOnes
if lostOnes > 0:
verticesLost = np.array(
[startBeam.x[lost] - self.coordOffset[0],
endBeam.x[lost] -
self.coordOffset[0]]).flatten('F')
verticesLost = np.vstack((verticesLost, np.array( # analysis:ignore
[startBeam.y[lost] - self.coordOffset[1],
endBeam.y[lost] -
self.coordOffset[1]]).flatten('F')))
verticesLost = np.vstack((verticesLost, np.array( # analysis:ignore
[startBeam.z[lost] - self.coordOffset[2],
endBeam.z[lost] -
self.coordOffset[2]]).flatten('F')))
verticesArrayLost = verticesLost.T if\
verticesArrayLost is None else\
np.vstack((verticesArrayLost, verticesLost.T)) # analysis:ignore
if segmentsModelRoot.child(ioe + 1, 1).checkState() == 2:
# good = startBeam.state > 0
good = (startBeam.state == 1) | (startBeam.state == 2)
if len(startBeam.state[good]) == 0:
continue
intensity = startBeam.Jss + startBeam.Jpp
try:
intensityAll = intensity / np.max(intensity[good])
good = np.logical_and(good, intensityAll >= self.cutoffI)
goodC = np.logical_and(
self.getColor(startBeam) <= self.selColorMax,
self.getColor(startBeam) >= self.selColorMin)
good = np.logical_and(good, goodC)
except: # analysis:ignore
if _DEBUG_:
raise
else:
continue
if self.vScreenForColors and self.globalColorIndex is not None:
good = np.logical_and(good, self.globalColorIndex)
globalColorsDots = self.globalColorArray[good] if\
globalColorsDots is None else np.concatenate(
(globalColorsDots, self.globalColorArray[good]))
else:
if self.globalNorm:
alphaMax = 1.
else:
if len(intensity[good]) > 0:
alphaMax = np.max(intensity[good])
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
alphaDots = intensity[good].T / alphaMax if\
alphaDots is None else np.concatenate(
(alphaDots.T, intensity[good].T / alphaMax))
colorsDots = np.array(self.getColor(
startBeam)[good]).T if\
colorsDots is None else np.concatenate(
(colorsDots.T, np.array(self.getColor(
startBeam)[good]).T))
vertices = np.array(startBeam.x[good] - self.coordOffset[0])
vertices = np.vstack((vertices, np.array(
startBeam.y[good] - self.coordOffset[1])))
vertices = np.vstack((vertices, np.array(
startBeam.z[good] - self.coordOffset[2])))
self.footprintsArray = vertices.T if\
self.footprintsArray is None else\
np.vstack((self.footprintsArray, vertices.T))
if self.showLostRays:
try:
lostNum = self.oesList[str(ioeItem.text())][0].lostNum
except: # analysis:ignore
lostNum = 1e3
lost = startBeam.state == lostNum
try:
lostOnes = len(startBeam.x[lost])
except: # analysis:ignore
lostOnes = 0
colorsDotsLost = lostOnes if\
colorsDotsLost is None else\
colorsDotsLost + lostOnes
if lostOnes > 0:
verticesLost = np.array(startBeam.x[lost] -
self.coordOffset[0])
verticesLost = np.vstack((verticesLost, np.array(
startBeam.y[lost] - self.coordOffset[1])))
verticesLost = np.vstack((verticesLost, np.array(
startBeam.z[lost] - self.coordOffset[2])))
footprintsArrayLost = verticesLost.T if\
footprintsArrayLost is None else\
np.vstack((footprintsArrayLost, verticesLost.T))
try:
if self.colorMin == self.colorMax:
if self.colorMax == 0: # and self.colorMin == 0 too
self.colorMin, self.colorMax = -0.1, 0.1
else:
self.colorMin = self.colorMax * 0.99
self.colorMax *= 1.01
if self.vScreenForColors and self.globalColorIndex is not None:
self.raysColor = globalColorsRays
elif colorsRays is not None:
colorsRays = colorFactor * (colorsRays-self.colorMin) /\
(self.colorMax - self.colorMin)
colorsRays = np.dstack((colorsRays,
np.ones_like(alphaRays)*colorSaturation, # analysis:ignore
alphaRays if self.iHSV else
np.ones_like(alphaRays)))
colorsRGBRays = np.squeeze(mpl.colors.hsv_to_rgb(colorsRays))
if self.globalNorm and len(alphaRays) > 0:
alphaMax = np.max(alphaRays)
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
alphaColorRays = np.array([alphaRays / alphaMax]).T
self.raysColor = np.float32(np.hstack([colorsRGBRays,
alphaColorRays]))
if self.showLostRays:
if colorsRaysLost is not None:
lostColor = np.zeros((colorsRaysLost, 4))
lostColor[:, 0] = 0.5
lostColor[:, 3] = 0.25
self.raysColor = np.float32(np.vstack((self.raysColor,
lostColor)))
if verticesArrayLost is not None:
self.verticesArray = np.float32(np.vstack((
self.verticesArray, verticesArrayLost)))
except: # analysis:ignore
if _DEBUG_:
raise
else:
pass
try:
if self.colorMin == self.colorMax:
if self.colorMax == 0: # and self.colorMin == 0 too
self.colorMin, self.colorMax = -0.1, 0.1
else:
self.colorMin = self.colorMax * 0.99
self.colorMax *= 1.01
if self.vScreenForColors and self.globalColorIndex is not None:
self.dotsColor = globalColorsDots
elif colorsDots is not None:
colorsDots = colorFactor * (colorsDots-self.colorMin) /\
(self.colorMax - self.colorMin)
colorsDots = np.dstack((colorsDots,
np.ones_like(alphaDots)*colorSaturation, # analysis:ignore
alphaDots if self.iHSV else
np.ones_like(alphaDots)))
colorsRGBDots = np.squeeze(mpl.colors.hsv_to_rgb(colorsDots))
if self.globalNorm and len(alphaDots) > 0:
alphaMax = np.max(alphaDots)
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
alphaColorDots = np.array([alphaDots / alphaMax]).T
self.dotsColor = np.float32(np.hstack([colorsRGBDots,
alphaColorDots]))
if self.showLostRays:
if colorsDotsLost is not None:
lostColor = np.zeros((colorsDotsLost, 4))
lostColor[:, 0] = 0.5
lostColor[:, 3] = 0.25
self.dotsColor = np.float32(np.vstack((self.dotsColor,
lostColor)))
if footprintsArrayLost is not None:
self.footprintsArray = np.float32(np.vstack((
self.footprintsArray, footprintsArrayLost)))
except: # analysis:ignore
if _DEBUG_:
raise
else:
pass
tmpMaxLen = np.max(tmpMax - tmpMin)
if tmpMaxLen > maxLen:
maxLen = tmpMaxLen
self.maxLen = maxLen
self.newColorAxis = False
def populateVerticesArray(self, segmentsModelRoot):
self.populateVerticesOnly(segmentsModelRoot)
self.populateVScreen()
if self.vScreenForColors:
self.populateVerticesOnly(segmentsModelRoot)
def modelToWorld(self, coords, dimension=None):
self.maxLen = self.maxLen if self.maxLen != 0 else 1.
if dimension is None:
return np.float32(((coords + self.tVec) * self.scaleVec) /
self.maxLen)
else:
return np.float32(((coords[dimension] + self.tVec[dimension]) *
self.scaleVec[dimension]) / self.maxLen)
def worldToModel(self, coords):
return np.float32(coords * self.maxLen / self.scaleVec - self.tVec)
def drawText(self, coord, text, noScalable=False, alignment=None,
useCaption=False):
useScalableFont = False if noScalable else self.useScalableFont
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
if not useScalableFont:
gl.glRasterPos3f(*coord)
for symbol in text:
gl.glutBitmapCharacter(self.fixedFont, ord(symbol))
else:
tLineWidth = gl.glGetDoublev(gl.GL_LINE_WIDTH)
tLineAA = gl.glIsEnabled(gl.GL_LINE_SMOOTH)
if self.useFontAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
else:
gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glLineWidth(self.scalableFontWidth)
fontScale = self.fontSize / 12500.
coordShift = np.zeros(3, dtype=np.float32)
fontSizeLoc = np.float32(np.array([104.76, 119.05, 0])*fontScale)
if alignment is not None:
if alignment[0] == 'left':
coordShift[0] = -fontSizeLoc[0] * len(text)
else:
coordShift[0] = fontSizeLoc[0]
if alignment[1] == 'top':
vOffset = 0.5
elif alignment[1] == 'bottom':
vOffset = -1.5
else:
vOffset = -0.5
coordShift[1] = vOffset * fontSizeLoc[1]
if useCaption:
textWidth = 0
for symbol in text.strip(" "):
textWidth += gl.glutStrokeWidth(self.scalableFontType,
ord(symbol))
gl.glPushMatrix()
gl.glTranslatef(*coord)
gl.glRotatef(*self.qText)
gl.glTranslatef(*coordShift)
gl.glScalef(fontScale, fontScale, fontScale)
depthCounter = 1
spaceFound = False
while not spaceFound:
depthCounter += 1
for dy in [-1, 1]:
for dx in [1, -1]:
textShift = (depthCounter+0.5*dy) * 119.05*1.5
gl.glPushMatrix()
textPos = [dx*depthCounter * 119.05*1.5 +
(0 if dx > 0 else -1) * textWidth,
dy*textShift, 0]
gl.glTranslatef(*textPos)
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
bottomLeft = np.array(gl.gluProject(
*[0, 0, 0], model=pModel, proj=pProjection,
view=pView)[:-1])
topRight = np.array(gl.gluProject(
*[textWidth, 119.05*2.5, 0],
model=pModel, proj=pProjection,
view=pView)[:-1])
gl.glPopMatrix()
spaceFound = True
for oeLabel in list(self.labelsBounds.values()):
if not (bottomLeft[0] > oeLabel[1][0] or
bottomLeft[1] > oeLabel[1][1] or
topRight[0] < oeLabel[0][0] or
topRight[1] < oeLabel[0][1]):
spaceFound = False
if spaceFound:
self.labelsBounds[text] = [0]*2
self.labelsBounds[text][0] = bottomLeft
self.labelsBounds[text][1] = topRight
break
if spaceFound:
break
gl.glPopMatrix()
gl.glPushMatrix()
gl.glTranslatef(*coord)
gl.glRotatef(*self.qText)
gl.glScalef(fontScale, fontScale, fontScale)
captionPos = depthCounter * 119.05*1.5
gl.glBegin(gl.GL_LINE_STRIP)
gl.glVertex3f(0, 0, 0)
gl.glVertex3f(captionPos*dx, captionPos*dy, 0)
gl.glVertex3f(captionPos*dx + textWidth*dx,
captionPos*dy, 0)
gl.glEnd()
gl.glTranslatef(*textPos)
for symbol in text.strip(" "):
gl.glutStrokeCharacter(self.scalableFontType, ord(symbol))
gl.glPopMatrix()
else:
gl.glPushMatrix()
gl.glTranslatef(*coord)
gl.glRotatef(*self.qText)
gl.glTranslatef(*coordShift)
gl.glScalef(fontScale, fontScale, fontScale)
for symbol in text:
gl.glutStrokeCharacter(self.scalableFontType, ord(symbol))
gl.glPopMatrix()
gl.glLineWidth(tLineWidth)
if tLineAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
else:
gl.glDisable(gl.GL_LINE_SMOOTH)
def setMaterial(self, mat):
if mat == 'Cu':
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.3, 0.15, 0.15, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.4, 0.25, 0.15, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[1., 0.7, 0.3, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 1])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
elif mat == 'magRed':
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.6, 0.1, 0.1, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.8, 0.1, 0.1, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[1., 0.1, 0.1, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 1])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
elif mat == 'magBlue':
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.1, 0.1, 0.6, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.1, 0.1, 0.8, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[0.1, 0.1, 1., 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 1])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
elif mat == 'semiSi':
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.1, 0.1, 0.1, 0.75])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.3, 0.3, 0.3, 0.75])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[1., 0.9, 0.8, 0.75])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 0.75])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
else:
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.1, 0.1, 0.1, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.3, 0.3, 0.3, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[1., 0.9, 0.8, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 1])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
def paintGL(self):
def makeCenterStr(centerList, prec):
retStr = '('
for dim in centerList:
retStr += '{0:.{1}f}, '.format(dim, prec)
return retStr[:-2] + ')'
if self.invertColors:
gl.glClearColor(1.0, 1.0, 1.0, 1.)
else:
gl.glClearColor(0.0, 0.0, 0.0, 1.)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
if self.perspectiveEnabled:
gl.gluPerspective(self.cameraAngle, self.aspect, 0.001, 10000)
else:
orthoView = self.cameraPos[0]*0.45
gl.glOrtho(-orthoView*self.aspect, orthoView*self.aspect,
-orthoView, orthoView, -100, 100)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.gluLookAt(self.cameraPos[0], self.cameraPos[1], self.cameraPos[2],
self.cameraTarget[0], self.cameraTarget[1],
self.cameraTarget[2],
0.0, 0.0, 1.0)
if self.enableBlending:
gl.glEnable(gl.GL_MULTISAMPLE)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# gl.glBlendFunc(gl.GL_SRC_ALPHA, GL_ONE)
gl.glEnable(gl.GL_POINT_SMOOTH)
gl.glHint(gl.GL_POINT_SMOOTH_HINT, gl.GL_NICEST)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
self.rotateZYX()
pModel = np.array(gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX))[:-1, :-1]
self.visibleAxes = np.argmax(np.abs(pModel), axis=0)
self.signs = np.sign(pModel)
self.axPosModifier = np.ones(3)
if self.enableAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POLYGON_SMOOTH_HINT, gl.GL_NICEST)
# gl.glHint(GL_PERSPECTIVE_CORRECTION_HINT, gl.GL_NICEST)
for dim in range(3):
for iAx in range(3):
self.axPosModifier[iAx] = (self.signs[iAx][2] if
self.signs[iAx][2] != 0 else 1)
if self.projectionsVisibility[dim] > 0:
if self.lineProjectionWidth > 0 and\
self.lineProjectionOpacity > 0 and\
self.verticesArray is not None:
projectionRays = self.modelToWorld(
np.copy(self.verticesArray))
projectionRays[:, dim] =\
-self.aPos[dim] * self.axPosModifier[dim]
self.drawArrays(
0, gl.GL_LINES, projectionRays, self.raysColor,
self.lineProjectionOpacity, self.lineProjectionWidth)
if self.pointProjectionSize > 0 and\
self.pointProjectionOpacity > 0:
if self.footprintsArray is not None:
projectionDots = self.modelToWorld(
np.copy(self.footprintsArray))
projectionDots[:, dim] =\
-self.aPos[dim] * self.axPosModifier[dim]
self.drawArrays(
0, gl.GL_POINTS, projectionDots, self.dotsColor,
self.pointProjectionOpacity,
self.pointProjectionSize)
if self.virtDotsArray is not None:
projectionDots = self.modelToWorld(
np.copy(self.virtDotsArray))
projectionDots[:, dim] =\
-self.aPos[dim] * self.axPosModifier[dim]
self.drawArrays(
0, gl.GL_POINTS, projectionDots,
self.virtDotsColor,
self.pointProjectionOpacity,
self.pointProjectionSize)
if self.enableAA:
gl.glDisable(gl.GL_LINE_SMOOTH)
if self.linesDepthTest:
gl.glEnable(gl.GL_DEPTH_TEST)
if self.enableAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POLYGON_SMOOTH_HINT, gl.GL_NICEST)
if self.lineWidth > 0 and self.lineOpacity > 0 and\
self.verticesArray is not None:
self.drawArrays(1, gl.GL_LINES, self.verticesArray, self.raysColor,
self.lineOpacity, self.lineWidth)
if self.linesDepthTest:
gl.glDisable(gl.GL_DEPTH_TEST)
if self.enableAA:
gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
if len(self.oesToPlot) > 0: # Surfaces of optical elements
gl.glEnableClientState(gl.GL_NORMAL_ARRAY)
gl.glEnable(gl.GL_NORMALIZE)
self.addLighting(3.)
for oeString in self.oesToPlot:
try:
oeToPlot = self.oesList[oeString][0]
is2ndXtal = self.oesList[oeString][3]
if isinstance(oeToPlot, roes.OE):
self.plotOeSurface(oeToPlot, is2ndXtal)
elif isinstance(oeToPlot, rscreens.HemisphericScreen):
self.setMaterial('semiSi')
self.plotHemiScreen(oeToPlot)
elif isinstance(oeToPlot, rscreens.Screen):
self.setMaterial('semiSi')
self.plotScreen(oeToPlot)
if isinstance(oeToPlot, (rapertures.RectangularAperture,
rapertures.RoundAperture)):
self.setMaterial('Cu')
self.plotAperture(oeToPlot)
else:
continue
except: # analysis:ignore
if _DEBUG_:
raise
else:
continue
gl.glDisable(gl.GL_LIGHTING)
gl.glDisable(gl.GL_NORMALIZE)
gl.glDisableClientState(gl.GL_NORMAL_ARRAY)
gl.glDisable(gl.GL_DEPTH_TEST)
if self.enableAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glEnable(gl.GL_DEPTH_TEST)
if len(self.oesToPlot) > 0:
for oeString in self.oesToPlot:
oeToPlot = self.oesList[oeString][0]
if isinstance(oeToPlot, (rsources.BendingMagnet,
rsources.Wiggler,
rsources.Undulator)):
self.plotSource(oeToPlot)
# elif isinstance(oeToPlot, rscreens.HemisphericScreen):
# self.plotHemiScreen(oeToPlot)
# elif isinstance(oeToPlot, rscreens.Screen):
# self.plotScreen(oeToPlot)
# elif isinstance(oeToPlot, roes.OE):
# self.drawOeContour(oeToPlot)
# elif isinstance(oeToPlot, rapertures.RectangularAperture):
# self.drawSlitEdges(oeToPlot)
else:
continue
if self.virtScreen is not None:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POLYGON_SMOOTH_HINT, gl.GL_NICEST)
self.plotScreen(self.virtScreen, [self.vScreenSize]*2,
[1, 0, 0, 1], plotFWHM=True)
# if not self.enableAA:
# gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
if self.pointsDepthTest:
gl.glEnable(gl.GL_DEPTH_TEST)
if self.pointSize > 0 and self.pointOpacity > 0:
if self.footprintsArray is not None:
self.drawArrays(1, gl.GL_POINTS, self.footprintsArray,
self.dotsColor, self.pointOpacity,
self.pointSize)
if self.virtDotsArray is not None:
self.drawArrays(1, gl.GL_POINTS, self.virtDotsArray,
self.virtDotsColor, self.pointOpacity,
self.pointSize)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
if self.enableAA:
gl.glDisable(gl.GL_LINE_SMOOTH)
if self.pointsDepthTest:
gl.glDisable(gl.GL_DEPTH_TEST)
# oeLabels = OrderedDict()
self.labelsBounds = OrderedDict()
if len(self.labelsToPlot) > 0:
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.)
gl.glLineWidth(1)
# for oeKey, oeValue in self.oesList.items():
for oeKey in self.labelsToPlot:
oeValue = self.oesList[oeKey]
oeCenterStr = makeCenterStr(oeValue[2],
self.labelCoordPrec)
oeCoord = np.array(oeValue[2])
oeCenterStr = ' {0}: {1}mm'.format(
oeKey, oeCenterStr)
oeLabelPos = self.modelToWorld(oeCoord - self.coordOffset)
self.drawText(oeLabelPos, oeCenterStr, useCaption=True)
if self.showOeLabels and self.virtScreen is not None:
vsCenterStr = ' {0}: {1}mm'.format(
'Virtual Screen', makeCenterStr(self.virtScreen.center,
self.labelCoordPrec))
try:
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
m1 = self.modelToWorld(
self.virtScreen.frame[1] - self.coordOffset)
m2 = self.modelToWorld(
self.virtScreen.frame[2] - self.coordOffset)
scr1 = gl.gluProject(
*m1, model=pModel,
proj=pProjection, view=pView)[0]
scr2 = gl.gluProject(
*m2, model=pModel,
proj=pProjection, view=pView)[0]
lblCenter = self.virtScreen.frame[1] if scr1 > scr2 else\
self.virtScreen.frame[2]
except: # analysis:ignore
if _DEBUG_:
raise
else:
lblCenter = self.virtScreen.center
vsLabelPos = self.modelToWorld(lblCenter - self.coordOffset)
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.)
gl.glLineWidth(1)
self.drawText(vsLabelPos, vsCenterStr)
if len(self.oesToPlot) > 0 and self.showLocalAxes: # Local axes
for oeString in self.oesToPlot:
try:
oeToPlot = self.oesList[oeString][0]
is2ndXtal = self.oesList[oeString][3]
if hasattr(oeToPlot, 'local_to_global'):
self.drawLocalAxes(oeToPlot, is2ndXtal)
except:
if _DEBUG_:
raise
else:
continue
gl.glEnable(gl.GL_DEPTH_TEST)
if self.drawGrid: # Coordinate grid box
self.drawCoordinateGrid()
gl.glFlush()
self.drawDirectionAxes()
if self.showHelp:
self.drawHelp()
if self.enableBlending:
gl.glDisable(gl.GL_MULTISAMPLE)
gl.glDisable(gl.GL_BLEND)
gl.glDisable(gl.GL_POINT_SMOOTH)
gl.glFlush()
def quatMult(self, qf, qt):
return [qf[0]*qt[0]-qf[1]*qt[1]-qf[2]*qt[2]-qf[3]*qt[3],
qf[0]*qt[1]+qf[1]*qt[0]+qf[2]*qt[3]-qf[3]*qt[2],
qf[0]*qt[2]-qf[1]*qt[3]+qf[2]*qt[0]+qf[3]*qt[1],
qf[0]*qt[3]+qf[1]*qt[2]-qf[2]*qt[1]+qf[3]*qt[0]]
def drawCoordinateGrid(self):
def populateGrid(grids):
axisLabelC = []
axisLabelC.extend([np.vstack(
(self.modelToWorld(grids, 0),
np.ones(len(grids[0]))*self.aPos[1]*self.axPosModifier[1],
np.ones(len(grids[0]))*-self.aPos[2]*self.axPosModifier[2]
))])
axisLabelC.extend([np.vstack(
(np.ones(len(grids[1]))*self.aPos[0]*self.axPosModifier[0],
self.modelToWorld(grids, 1),
np.ones(len(grids[1]))*-self.aPos[2]*self.axPosModifier[2]
))])
zAxis = np.vstack(
(np.ones(len(grids[2]))*-self.aPos[0]*self.axPosModifier[0],
np.ones(len(grids[2]))*self.aPos[1]*self.axPosModifier[1],
self.modelToWorld(grids, 2)))
xAxisB = np.vstack(
(self.modelToWorld(grids, 0),
np.ones(len(grids[0]))*-self.aPos[1]*self.axPosModifier[1],
np.ones(len(grids[0]))*-self.aPos[2]*self.axPosModifier[2]))
yAxisB = np.vstack(
(np.ones(len(grids[1]))*-self.aPos[0]*self.axPosModifier[0],
self.modelToWorld(grids, 1),
np.ones(len(grids[1]))*-self.aPos[2]*self.axPosModifier[2]))
zAxisB = np.vstack(
(np.ones(len(grids[2]))*-self.aPos[0]*self.axPosModifier[0],
np.ones(len(grids[2]))*-self.aPos[1]*self.axPosModifier[1],
self.modelToWorld(grids, 2)))
xAxisC = np.vstack(
(self.modelToWorld(grids, 0),
np.ones(len(grids[0]))*-self.aPos[1]*self.axPosModifier[1],
np.ones(len(grids[0]))*self.aPos[2]*self.axPosModifier[2]))
yAxisC = np.vstack(
(np.ones(len(grids[1]))*-self.aPos[0]*self.axPosModifier[0],
self.modelToWorld(grids, 1),
np.ones(len(grids[1]))*self.aPos[2]*self.axPosModifier[2]))
axisLabelC.extend([np.vstack(
(np.ones(len(grids[2]))*self.aPos[0]*self.axPosModifier[0],
np.ones(len(grids[2]))*-self.aPos[1]*self.axPosModifier[1],
self.modelToWorld(grids, 2)))])
xLines = np.vstack(
(axisLabelC[0], xAxisB, xAxisB, xAxisC)).T.flatten().reshape(
4*xAxisB.shape[1], 3)
yLines = np.vstack(
(axisLabelC[1], yAxisB, yAxisB, yAxisC)).T.flatten().reshape(
4*yAxisB.shape[1], 3)
zLines = np.vstack(
(zAxis, zAxisB, zAxisB, axisLabelC[2])).T.flatten().reshape(
4*zAxisB.shape[1], 3)
return axisLabelC, np.vstack((xLines, yLines, zLines))
def drawGridLines(gridArray, lineWidth, lineOpacity, figType):
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gridColor = np.ones((len(gridArray), 4)) * lineOpacity
gridArrayVBO = gl.vbo.VBO(np.float32(gridArray))
gridArrayVBO.bind()
gl.glVertexPointerf(gridArrayVBO)
gridColorArray = gl.vbo.VBO(np.float32(gridColor))
gridColorArray.bind()
gl.glColorPointerf(gridColorArray)
gl.glLineWidth(lineWidth)
gl.glDrawArrays(figType, 0, len(gridArrayVBO))
gridArrayVBO.unbind()
gridColorArray.unbind()
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
def getAlignment(point, hDim, vDim=None):
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
sp0 = np.array(gl.gluProject(
*point, model=pModel, proj=pProjection, view=pView))
pointH = np.copy(point)
pointH[hDim] *= 1.1
spH = np.array(gl.gluProject(*pointH, model=pModel,
proj=pProjection, view=pView))
pointV = np.copy(point)
if vDim is None:
vAlign = 'middle'
else:
pointV[vDim] *= 1.1
spV = np.array(gl.gluProject(*pointV, model=pModel,
proj=pProjection, view=pView))
vAlign = 'top' if spV[1] - sp0[1] > 0 else 'bottom'
hAlign = 'left' if spH[0] - sp0[0] < 0 else 'right'
return (hAlign, vAlign)
back = np.array([[-self.aPos[0], self.aPos[1], -self.aPos[2]],
[-self.aPos[0], self.aPos[1], self.aPos[2]],
[-self.aPos[0], -self.aPos[1], self.aPos[2]],
[-self.aPos[0], -self.aPos[1], -self.aPos[2]]])
side = np.array([[self.aPos[0], -self.aPos[1], -self.aPos[2]],
[-self.aPos[0], -self.aPos[1], -self.aPos[2]],
[-self.aPos[0], -self.aPos[1], self.aPos[2]],
[self.aPos[0], -self.aPos[1], self.aPos[2]]])
bottom = np.array([[self.aPos[0], -self.aPos[1], -self.aPos[2]],
[self.aPos[0], self.aPos[1], -self.aPos[2]],
[-self.aPos[0], self.aPos[1], -self.aPos[2]],
[-self.aPos[0], -self.aPos[1], -self.aPos[2]]])
back[:, 0] *= self.axPosModifier[0]
side[:, 1] *= self.axPosModifier[1]
bottom[:, 2] *= self.axPosModifier[2]
# Calculating regular grids in world coordinates
limits = np.array([-1, 1])[:, np.newaxis] * np.array(self.aPos)
allLimits = limits * self.maxLen / self.scaleVec - self.tVec\
+ self.coordOffset
axisGridArray = []
gridLabels = []
precisionLabels = []
if self.fineGridEnabled:
fineGridArray = []
for iAx in range(3):
m2 = self.aPos[iAx] / 0.9
dx1 = np.abs(allLimits[:, iAx][0] - allLimits[:, iAx][1]) / m2
order = np.floor(np.log10(dx1))
m1 = dx1 * 10**-order
if (m1 >= 1) and (m1 < 2):
step = 0.2 * 10**order
elif (m1 >= 2) and (m1 < 4):
step = 0.5 * 10**order
else:
step = 10**order
if step < 1:
decimalX = int(np.abs(order)) + 1 if m1 < 4 else\
int(np.abs(order))
else:
decimalX = 0
gridX = np.arange(np.int(allLimits[:, iAx][0]/step)*step,
allLimits[:, iAx][1], step)
gridX = gridX if gridX[0] >= allLimits[:, iAx][0] else\
gridX[1:]
gridLabels.extend([gridX])
precisionLabels.extend([np.ones_like(gridX)*decimalX])
axisGridArray.extend([gridX - self.coordOffset[iAx]])
if self.fineGridEnabled:
fineStep = step * 0.2
fineGrid = np.arange(
np.int(allLimits[:, iAx][0]/fineStep)*fineStep,
allLimits[:, iAx][1], fineStep)
fineGrid = fineGrid if\
fineGrid[0] >= allLimits[:, iAx][0] else fineGrid[1:]
fineGridArray.extend([fineGrid - self.coordOffset[iAx]])
axisL, axGrid = populateGrid(axisGridArray)
if self.fineGridEnabled:
tmp, fineAxGrid = populateGrid(fineGridArray)
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.)
for iAx in range(3):
if not (not self.perspectiveEnabled and
iAx == self.visibleAxes[2]):
tAlign = None
midp = int(len(axisL[iAx][0, :])/2)
if iAx == self.visibleAxes[1]: # Side plane,
if self.useScalableFont:
tAlign = getAlignment(axisL[iAx][:, midp],
self.visibleAxes[0])
else:
axisL[iAx][self.visibleAxes[2], :] *= 1.05 # depth
axisL[iAx][self.visibleAxes[0], :] *= 1.05 # side
if iAx == self.visibleAxes[0]: # Bottom plane, left-right
if self.useScalableFont:
tAlign = getAlignment(axisL[iAx][:, midp],
self.visibleAxes[2],
self.visibleAxes[1])
else:
axisL[iAx][self.visibleAxes[1], :] *= 1.05 # height
axisL[iAx][self.visibleAxes[2], :] *= 1.05 # side
if iAx == self.visibleAxes[2]: # Bottom plane, left-right
if self.useScalableFont:
tAlign = getAlignment(axisL[iAx][:, midp],
self.visibleAxes[0],
self.visibleAxes[1])
else:
axisL[iAx][self.visibleAxes[1], :] *= 1.05 # height
axisL[iAx][self.visibleAxes[0], :] *= 1.05 # side
for tick, tText, pcs in list(zip(axisL[iAx].T, gridLabels[iAx],
precisionLabels[iAx])):
valueStr = "{0:.{1}f}".format(tText, int(pcs))
self.drawText(tick, valueStr, alignment=tAlign)
# if not self.enableAA:
# gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POINT_SMOOTH_HINT, gl.GL_NICEST)
tLineWidth = gl.glGetDoublev(gl.GL_LINE_WIDTH)
drawGridLines(np.vstack((back, side, bottom)),
self.coordinateGridLineWidth * 2, 0.75, gl.GL_QUADS)
drawGridLines(axGrid, self.coordinateGridLineWidth, 0.5, gl.GL_LINES)
if self.fineGridEnabled:
drawGridLines(fineAxGrid, self.coordinateGridLineWidth, 0.25,
gl.GL_LINES)
gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glLineWidth(tLineWidth)
def drawArrays(self, tr, geom, vertices, colors, lineOpacity, lineWidth):
if vertices is None or colors is None:
return
if bool(tr):
vertexArray = gl.vbo.VBO(self.modelToWorld(vertices))
else:
vertexArray = gl.vbo.VBO(vertices)
vertexArray.bind()
gl.glVertexPointerf(vertexArray)
pureOpacity = np.copy(colors[:, 3])
colors[:, 3] = np.float32(pureOpacity * lineOpacity)
colorArray = gl.vbo.VBO(colors)
colorArray.bind()
gl.glColorPointerf(colorArray)
if geom == gl.GL_LINES:
gl.glLineWidth(lineWidth)
else:
gl.glPointSize(lineWidth)
gl.glDrawArrays(geom, 0, len(vertices))
colors[:, 3] = pureOpacity
colorArray.unbind()
vertexArray.unbind()
def plotSource(self, oe):
# gl.glEnable(gl.GL_MAP2_VERTEX_3)
# gl.glEnable(gl.GL_MAP2_NORMAL)
nPeriods = int(oe.Np) if hasattr(oe, 'Np') else 0.5
if hasattr(oe, 'L0'):
lPeriod = oe.L0
maghL = 0.25 * lPeriod * 0.5
else:
try:
lPeriod = (oe.Theta_max - oe.Theta_min) * oe.ro * 1000
except AttributeError:
if _DEBUG_:
raise
else:
lPeriod = 500.
maghL = lPeriod
maghH = 10 * 0.5
maghW = 10 * 0.5
surfRot = [[0, 0, 0, 1], [180, 0, 1, 0],
[-90, 0, 1, 0], [90, 0, 1, 0],
[-90, 1, 0, 0], [90, 1, 0, 0]]
surfTrans = np.array([[0, 0, maghH], [0, 0, -maghH],
[-maghW, 0, 0], [maghW, 0, 0],
[0, maghL, 0], [0, -maghL, 0]])
surfScales = np.array([[maghW*2, maghL*2, 0], [maghW*2, maghL*2, 0],
[0, maghL*2, maghH*2], [0, maghL*2, maghH*2],
[maghW*2, 0, maghH*2], [maghW*2, 0, maghH*2]])
# deltaX = 1. / 2. # float(self.tiles[0])
# deltaY = 1. / 2. # float(self.tiles[1])
magToggle = True
gl.glLineWidth(1)
gl.glPushMatrix()
gl.glTranslatef(*(self.modelToWorld(np.array(oe.center) -
self.coordOffset)))
gl.glRotatef(np.degrees(oe.pitch * self.scaleVec[2] /
self.scaleVec[1]), 1, 0, 0)
yaw = oe.yaw
try:
az = oe.bl.azimuth
except: # analysis:ignore
if _DEBUG_:
raise
else:
az = 0
gl.glRotatef(np.degrees((yaw-az) * self.scaleVec[0] /
self.scaleVec[1]), 0, 0, 1)
gl.glTranslatef(*(-1. * self.modelToWorld(np.array(oe.center) -
self.coordOffset)))
for period in range(int(nPeriods) if nPeriods > 0.5 else 1):
for hp in ([0, 0.5] if nPeriods > 0.5 else [0.25]):
pY = list(oe.center)[1] - lPeriod * (0.5 * nPeriods -
period - hp)
magToggle = not magToggle
for gap in [maghH*1.25, -maghH*1.25]:
cubeCenter = np.array([oe.center[0], pY, oe.center[2]+gap])
# self.setMaterial('magRed' if magToggle else 'magBlue')
magColor = [0.7, 0.1, 0.1, 1.] if magToggle \
else [0.1, 0.1, 0.7, 1.]
magToggle = not magToggle
for surf in range(6):
gl.glPushMatrix()
gl.glTranslatef(*(self.modelToWorld(
cubeCenter + surfTrans[surf] - self.coordOffset)))
gl.glScalef(*(self.modelToWorld(surfScales[surf] -
self.tVec)))
gl.glRotatef(*surfRot[surf])
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
gl.glColor4f(*magColor)
gl.glVertex3f(-0.5, -0.5, 0)
gl.glVertex3f(-0.5, 0.5, 0)
gl.glVertex3f(0.5, 0.5, 0)
gl.glVertex3f(0.5, -0.5, 0)
gl.glEnd()
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glBegin(gl.GL_QUADS)
gl.glColor4f(0, 0, 0, 1.)
gl.glVertex3f(-0.5, -0.5, 0)
gl.glVertex3f(-0.5, 0.5, 0)
gl.glVertex3f(0.5, 0.5, 0)
gl.glVertex3f(0.5, -0.5, 0)
gl.glEnd()
# for i in range(2):
# xGridOe = np.linspace(-0.5 + i*deltaX,
# -0.5 + (i+1)*deltaX,
# self.surfCPOrder)
# for k in range(2):
# yGridOe = np.linspace(-0.5 + k*deltaY,
# -0.5 + (k+1)*deltaY,
# self.surfCPOrder)
# xv, yv = np.meshgrid(xGridOe, yGridOe)
# xv = xv.flatten()
# yv = yv.flatten()
# zv = np.zeros_like(xv)
#
# surfCP = np.vstack((xv, yv, zv)).T
# surfNorm = np.vstack((np.zeros_like(xv),
# np.zeros_like(xv),
# np.ones_like(zv),
# np.ones_like(zv))).T
#
# gl.glMap2f(gl.GL_MAP2_VERTEX_3, 0, 1, 0, 1,
# surfCP.reshape(
# self.surfCPOrder,
# self.surfCPOrder, 3))
#
# gl.glMap2f(gl.GL_MAP2_NORMAL, 0, 1, 0, 1,
# surfNorm.reshape(
# self.surfCPOrder,
# self.surfCPOrder, 4))
#
# gl.glMapGrid2f(self.surfCPOrder, 0.0, 1.0,
# self.surfCPOrder, 0.0, 1.0)
#
# gl.glEvalMesh2(gl.GL_FILL, 0,
# self.surfCPOrder,
# 0, self.surfCPOrder)
gl.glPopMatrix()
gl.glPopMatrix()
# gl.glDisable(gl.GL_MAP2_VERTEX_3)
# gl.glDisable(gl.GL_MAP2_NORMAL)
def plotCurvedMesh(self, x, y, z, a, b, c, shift):
surfCP = np.vstack((x - self.coordOffset[0] - shift[0],
y - self.coordOffset[1] - shift[1],
z - self.coordOffset[2] - shift[2])).T
gl.glMap2f(gl.GL_MAP2_VERTEX_3, 0, 1, 0, 1,
self.modelToWorld(surfCP.reshape(
self.surfCPOrder,
self.surfCPOrder, 3)))
surfNorm = np.vstack((a, b, c,
np.ones_like(a))).T
gl.glMap2f(gl.GL_MAP2_NORMAL, 0, 1, 0, 1,
surfNorm.reshape(
self.surfCPOrder,
self.surfCPOrder, 4))
gl.glMapGrid2f(self.surfCPOrder, 0.0, 1.0,
self.surfCPOrder, 0.0, 1.0)
gl.glEvalMesh2(gl.GL_FILL, 0, self.surfCPOrder,
0, self.surfCPOrder)
def plotOeSurface(self, oe, is2ndXtal):
def getThickness(element):
thickness = 0
if isinstance(oe, roes.Plate):
if oe.t is not None:
return oe.t
if hasattr(oe, "material"):
if oe.material is not None:
thickness = 10.
if hasattr(oe.material, "t"):
thickness = oe.material.t if oe.material.t is not None\
else thickness
elif isinstance(oe.material, rmats.Multilayer):
if oe.material.substrate is not None:
if hasattr(oe.material.substrate, 't'):
if oe.material.substrate.t is not None:
thickness = oe.material.substrate.t
return thickness
thickness = getThickness(oe)
self.setMaterial('Si')
gl.glEnable(gl.GL_MAP2_VERTEX_3)
gl.glEnable(gl.GL_MAP2_NORMAL)
# Top and Bottom Surfaces
nsIndex = int(is2ndXtal)
if is2ndXtal:
xLimits = list(oe.limPhysX2)
# xLimits = list(oe.limOptX2) if\
# oe.limOptX2 is not None else oe.limPhysX2
if np.any(np.abs(xLimits) == raycing.maxHalfSizeOfOE):
if oe.footprint is not None:
xLimits = oe.footprint[nsIndex][:, 0]
yLimits = list(oe.limPhysY2)
# yLimits = list(oe.limOptY2) if\
# oe.limOptY2 is not None else oe.limPhysY2
if np.any(np.abs(yLimits) == raycing.maxHalfSizeOfOE):
if oe.footprint is not None:
yLimits = oe.footprint[nsIndex][:, 1]
else:
xLimits = list(oe.limPhysX)
# xLimits = list(oe.limOptX) if\
# oe.limOptX is not None else oe.limPhysX
if np.any(np.abs(xLimits) == raycing.maxHalfSizeOfOE):
if oe.footprint is not None:
xLimits = oe.footprint[nsIndex][:, 0]
yLimits = list(oe.limPhysY)
# yLimits = list(oe.limOptY) if\
# oe.limOptY is not None else oe.limPhysY
if np.any(np.abs(yLimits) == raycing.maxHalfSizeOfOE):
if oe.footprint is not None:
yLimits = oe.footprint[nsIndex][:, 1]
localTiles = np.array(self.tiles)
if oe.shape == 'round':
rX = np.abs((xLimits[1] - xLimits[0]))*0.5
rY = np.abs((yLimits[1] - yLimits[0]))*0.5
cX = (xLimits[1] + xLimits[0])*0.5
cY = (yLimits[1] + yLimits[0])*0.5
xLimits = [0, 1.]
yLimits = [0, 2*np.pi]
localTiles[1] *= 3
for i in range(localTiles[0]):
deltaX = (xLimits[1] - xLimits[0]) /\
float(localTiles[0])
xGridOe = np.linspace(xLimits[0] + i*deltaX,
xLimits[0] + (i+1)*deltaX,
self.surfCPOrder) + oe.dx
for k in range(localTiles[1]):
deltaY = (yLimits[1] - yLimits[0]) /\
float(localTiles[1])
yGridOe = np.linspace(yLimits[0] + k*deltaY,
yLimits[0] + (k+1)*deltaY,
self.surfCPOrder)
xv, yv = np.meshgrid(xGridOe, yGridOe)
if oe.shape == 'round':
xv, yv = rX*xv*np.cos(yv)+cX, rY*xv*np.sin(yv)+cY
xv = xv.flatten()
yv = yv.flatten()
if is2ndXtal:
zExt = '2'
else:
zExt = '1' if hasattr(oe, 'local_z1') else ''
local_z = getattr(oe, 'local_r{}'.format(zExt)) if\
oe.isParametric else getattr(oe, 'local_z{}'.format(zExt))
local_n = getattr(oe, 'local_n{}'.format(zExt))
xv = np.copy(xv)
yv = np.copy(yv)
zv = np.zeros_like(xv)
if oe.isParametric:
xv, yv, zv = oe.xyz_to_param(xv, yv, zv)
zv = local_z(xv, yv)
nv = local_n(xv, yv)
gbT = rsources.Beam(nrays=len(xv))
if oe.isParametric:
xv, yv, zv = oe.param_to_xyz(xv, yv, zv)
gbT.x = xv
gbT.y = yv
gbT.z = zv
gbT.a = nv[0] * np.ones_like(zv)
gbT.b = nv[1] * np.ones_like(zv)
gbT.c = nv[2] * np.ones_like(zv)
if thickness > 0:
gbB = rsources.Beam(copyFrom=gbT)
if isinstance(oe, roes.LauePlate):
gbB.z[:] = gbT.z - thickness
gbB.a = -gbT.a
gbB.b = -gbT.b
gbB.c = -gbT.c
else:
gbB.z[:] = -thickness
gbB.a[:] = 0
gbB.b[:] = 0
gbB.c[:] = -1.
oe.local_to_global(gbB, is2ndXtal=is2ndXtal)
oe.local_to_global(gbT, is2ndXtal=is2ndXtal)
if hasattr(oe, '_nCRL'):
cShift = oe.centerShift
nSurf = oe._nCRL
else:
cShift = np.zeros(3)
nSurf = 1
for iSurf in range(nSurf):
dC = cShift * iSurf
self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
gbT.a, gbT.b, gbT.c, dC)
if thickness > 0 and\
not isinstance(oe, roes.DoubleParaboloidLens):
self.plotCurvedMesh(gbB.x, gbB.y, gbB.z,
gbB.a, gbB.b, gbB.c, dC)
# Side faces
if isinstance(oe, roes.Plate):
self.setMaterial('semiSi')
if thickness > 0:
for ie, yPos in enumerate(yLimits):
for i in range(localTiles[0]):
if oe.shape == 'round':
continue
deltaX = (xLimits[1] - xLimits[0]) /\
float(localTiles[0])
xGridOe = np.linspace(xLimits[0] + i*deltaX,
xLimits[0] + (i+1)*deltaX,
self.surfCPOrder) + oe.dx
edgeX = xGridOe
edgeY = np.ones_like(xGridOe)*yPos
edgeZ = np.zeros_like(xGridOe)
if oe.isParametric:
edgeX, edgeY, edgeZ = oe.xyz_to_param(
edgeX, edgeY, edgeZ)
edgeZ = local_z(edgeX, edgeY)
if oe.isParametric:
edgeX, edgeY, edgeZ = oe.param_to_xyz(
edgeX, edgeY, edgeZ)
gridZ = None
for zTop in edgeZ:
gridZ = np.linspace(-thickness, zTop,
self.surfCPOrder) if\
gridZ is None else np.concatenate((
gridZ, np.linspace(-thickness, zTop,
self.surfCPOrder)))
gridX = np.repeat(edgeX, len(edgeZ))
gridY = np.ones_like(gridX) * yPos
xN = np.zeros_like(gridX)
yN = (1 if ie == 1 else -1)*np.ones_like(gridX)
zN = np.zeros_like(gridX)
faceBeam = rsources.Beam(nrays=len(gridX))
faceBeam.x = gridX
faceBeam.y = gridY
faceBeam.z = gridZ
faceBeam.a = xN
faceBeam.b = yN
faceBeam.c = zN
oe.local_to_global(faceBeam, is2ndXtal=is2ndXtal)
self.plotCurvedMesh(faceBeam.x, faceBeam.y, faceBeam.z,
faceBeam.a, faceBeam.b, faceBeam.c,
[0]*3)
for ie, xPos in enumerate(xLimits):
if ie == 0 and oe.shape == 'round':
continue
for i in range(localTiles[1]):
deltaY = (yLimits[1] - yLimits[0]) /\
float(localTiles[1])
yGridOe = np.linspace(yLimits[0] + i*deltaY,
yLimits[0] + (i+1)*deltaY,
self.surfCPOrder)
edgeY = yGridOe
edgeX = np.ones_like(yGridOe)*xPos
edgeZ = np.zeros_like(xGridOe)
if oe.shape == 'round':
edgeX, edgeY = rX*edgeX*np.cos(edgeY)+cX,\
rY*edgeX*np.sin(edgeY)+cY
if oe.isParametric:
edgeX, edgeY, edgeZ = oe.xyz_to_param(
edgeX, edgeY, edgeZ)
edgeZ = local_z(edgeX, edgeY)
if oe.isParametric:
edgeX, edgeY, edgeZ = oe.param_to_xyz(
edgeX, edgeY, edgeZ)
zN = 0
gridZ = None
for zTop in edgeZ:
gridZ = np.linspace(-thickness, zTop,
self.surfCPOrder) if\
gridZ is None else np.concatenate((
gridZ, np.linspace(-thickness, zTop,
self.surfCPOrder)))
gridY = np.repeat(edgeY, len(edgeZ))
if oe.shape == 'round':
yN = (gridY-cY) / rY
gridX = np.repeat(edgeX, len(edgeZ))
xN = (gridX-cX) / rX
else:
gridX = np.repeat(edgeX, len(edgeZ))
yN = np.zeros_like(gridX)
xN = (1 if ie == 1 else -1) * np.ones_like(gridX)
zN = np.zeros_like(gridX)
faceBeam = rsources.Beam(nrays=len(gridX))
faceBeam.x = gridX
faceBeam.y = gridY
faceBeam.z = gridZ
faceBeam.a = xN
faceBeam.b = yN
faceBeam.c = zN
oe.local_to_global(faceBeam, is2ndXtal=is2ndXtal)
self.plotCurvedMesh(faceBeam.x, faceBeam.y, faceBeam.z,
faceBeam.a, faceBeam.b, faceBeam.c,
[0]*3)
gl.glDisable(gl.GL_MAP2_VERTEX_3)
gl.glDisable(gl.GL_MAP2_NORMAL)
# Contour
# xBound = np.linspace(xLimits[0], xLimits[1],
# self.surfCPOrder*(localTiles[0]+1))
# yBound = np.linspace(yLimits[0], yLimits[1],
# self.surfCPOrder*(localTiles[1]+1))
# if oe.shape == 'round':
# oeContour = [0]
# oneEdge = [0]
# else:
# oeContour = [0]*4
# oneEdge = [0]*4
# oeContour[0] = np.array([xBound,
# yBound[0]*np.ones_like(xBound)]) # bottom
# oeContour[1] = np.array([xBound[-1]*np.ones_like(yBound),
# yBound]) # left
# oeContour[2] = np.array([np.flip(xBound, 0),
# yBound[-1]*np.ones_like(xBound)]) # top
# oeContour[3] = np.array([xBound[0]*np.ones_like(yBound),
# np.flip(yBound, 0)]) # right
#
# for ie, edge in enumerate(oeContour):
# if oe.shape == 'round':
# edgeX, edgeY = rX*np.cos(yBound)+cX, rY*np.sin(yBound)+cY
# else:
# edgeX = edge[0, :]
# edgeY = edge[1, :]
# edgeZ = np.zeros_like(edgeX)
#
# if oe.isParametric:
# edgeX, edgeY, edgeZ = oe.xyz_to_param(edgeX, edgeY,
# edgeZ)
#
# edgeZ = local_z(edgeX, edgeY)
# if oe.isParametric:
# edgeX, edgeY, edgeZ = oe.param_to_xyz(
# edgeX, edgeY, edgeZ)
# edgeBeam = rsources.Beam(nrays=len(edgeX))
# edgeBeam.x = edgeX
# edgeBeam.y = edgeY
# edgeBeam.z = edgeZ
#
# oe.local_to_global(edgeBeam, is2ndXtal=is2ndXtal)
# oneEdge[ie] = np.vstack((edgeBeam.x - self.coordOffset[0],
# edgeBeam.y - self.coordOffset[1],
# edgeBeam.z - self.coordOffset[2])).T
#
# self.oeContour[oe.name] = oneEdge
# def drawOeContour(self, oe):
# gl.glEnable(gl.GL_MAP1_VERTEX_3)
# gl.glLineWidth(self.contourWidth)
# gl.glColor4f(0.0, 0.0, 0.0, 1.0)
# cpo = self.surfCPOrder
# for ie in range(len(self.oeContour[oe.name])):
# edge = self.oeContour[oe.name][ie]
# nTiles = self.tiles[0] if ie in [0, 2] else self.tiles[1]
# nTiles = self.tiles[1]*3 if oe.shape == 'round' else nTiles
# for tile in range(nTiles+1):
# gl.glMap1f(gl.GL_MAP1_VERTEX_3, 0, 1,
# self.modelToWorld(edge[tile*cpo:(tile+1)*cpo+1, :]))
# gl.glMapGrid1f(cpo, 0.0, 1.0)
# gl.glEvalMesh1(gl.GL_LINE, 0, cpo)
#
# gl.glDisable(gl.GL_MAP1_VERTEX_3)
# def drawSlitEdges(self, oe):
# gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
# gl.glLineWidth(self.contourWidth)
# gl.glColor4f(0.0, 0.0, 0.0, 1.0)
# gl.glBegin(gl.GL_QUADS)
# for edge in self.modelToWorld(np.array(self.slitEdges[oe.name]) -
# np.array(self.coordOffset)):
# gl.glVertex3f(*edge[0, :])
# gl.glVertex3f(*edge[1, :])
# gl.glVertex3f(*edge[3, :])
# gl.glVertex3f(*edge[2, :])
#
# gl.glVertex3f(*edge[0, :])
# gl.glVertex3f(*edge[1, :])
# gl.glVertex3f(*edge[5, :])
# gl.glVertex3f(*edge[4, :])
#
# gl.glVertex3f(*edge[5, :])
# gl.glVertex3f(*edge[1, :])
# gl.glVertex3f(*edge[3, :])
# gl.glVertex3f(*edge[7, :])
#
# gl.glVertex3f(*edge[4, :])
# gl.glVertex3f(*edge[5, :])
# gl.glVertex3f(*edge[7, :])
# gl.glVertex3f(*edge[6, :])
#
# gl.glVertex3f(*edge[0, :])
# gl.glVertex3f(*edge[4, :])
# gl.glVertex3f(*edge[6, :])
# gl.glVertex3f(*edge[2, :])
#
# gl.glVertex3f(*edge[2, :])
# gl.glVertex3f(*edge[3, :])
# gl.glVertex3f(*edge[7, :])
# gl.glVertex3f(*edge[6, :])
# gl.glEnd()
def plotAperture(self, oe):
surfCPOrder = self.surfCPOrder
gl.glEnable(gl.GL_MAP2_VERTEX_3)
gl.glEnable(gl.GL_MAP2_NORMAL)
plotVolume = False
# slitT = self.slitThickness
if oe.shape == 'round':
r = oe.r
isBeamStop = len(re.findall('Stop', str(type(oe)))) > 0
if isBeamStop:
limits = [[0, r, 0, 2*np.pi]]
else:
wf = max(r*0.25, 2.5)
limits = [[r, r+wf, 0, 2*np.pi]]
tiles = self.tiles[1] * 5
else:
try:
left, right, bottom, top = oe.spotLimits
except: # analysis:ignore
if _DEBUG_:
raise
else:
left, right, bottom, top = 0, 0, 0, 0
for akind, d in zip(oe.kind, oe.opening):
if akind.startswith('l'):
left = d
elif akind.startswith('r'):
right = d
elif akind.startswith('b'):
bottom = d
elif akind.startswith('t'):
top = d
w = right - left
h = top - bottom
wf = max(min(w, h)*0.5, 2.5)
limits = []
for akind, d in zip(oe.kind, oe.opening):
if akind.startswith('l'):
limits.append([left-wf, left, bottom-wf, top+wf])
elif akind.startswith('r'):
limits.append([right, right+wf, bottom-wf, top+wf])
elif akind.startswith('b'):
limits.append([left-wf, right+wf, bottom-wf, bottom])
elif akind.startswith('t'):
limits.append([left-wf, right+wf, top, top+wf])
tiles = self.tiles[1]
if not plotVolume:
for xMin, xMax, yMin, yMax in limits:
xGridOe = np.linspace(xMin, xMax, surfCPOrder)
deltaY = (yMax - yMin) / float(tiles)
for k in range(tiles):
yMinT = yMin + k*deltaY
yMaxT = yMinT + deltaY
yGridOe = np.linspace(yMinT, yMaxT, surfCPOrder)
xv, yv = np.meshgrid(xGridOe, yGridOe)
if oe.shape == 'round':
xv, yv = xv*np.cos(yv), xv*np.sin(yv)
xv = xv.flatten()
yv = yv.flatten()
gbT = rsources.Beam(nrays=len(xv))
gbT.x = xv
gbT.y = np.zeros_like(xv)
gbT.z = yv
gbT.a = np.zeros_like(xv)
gbT.b = np.ones_like(xv)
gbT.c = np.zeros_like(xv)
oe.local_to_global(gbT)
for surf in [1, -1]:
self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
gbT.a, gbT.b[:]*surf, gbT.c,
[0, 0, 0])
# else:
# self.slitEdges[oe.name] = []
# for iface, face in enumerate(limits):
# dT = slitT if iface < 2 else -slitT # Slit thickness
# # front
# xGridOe = np.linspace(face[0], face[1], surfCPOrder)
# zGridOe = np.linspace(face[2], face[3], surfCPOrder)
# yGridOe = np.linspace(0, -dT, surfCPOrder)
# xVert, yVert, zVert = np.meshgrid([face[0], face[1]],
# [0, -dT],
# [face[2], face[3]])
# bladeVertices = np.vstack((xVert.flatten(),
# yVert.flatten(),
# zVert.flatten())).T
# gbt = rsources.Beam(nrays=8)
# gbt.x = bladeVertices[:, 0]
# gbt.y = bladeVertices[:, 1]
# gbt.z = bladeVertices[:, 2]
# oe.local_to_global(gbt)
#
# self.slitEdges[oe.name].append(np.vstack((gbt.x, gbt.y,
# gbt.z)).T)
#
# xv, zv = np.meshgrid(xGridOe, zGridOe)
# xv = xv.flatten()
# zv = zv.flatten()
#
# gbT = rsources.Beam(nrays=len(xv))
# gbT.x = xv
# gbT.y = np.zeros_like(xv)
# gbT.z = zv
#
# gbT.a = np.zeros_like(xv)
# gbT.b = np.ones_like(xv)
# gbT.c = np.zeros_like(xv)
#
# oe.local_to_global(gbT)
#
# for ysurf in [0, dT]:
# nsurf = 1. if (dT > 0 and ysurf != 0) or\
# (ysurf == 0 and dT < 0) else -1.
# self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
# gbT.a, gbT.b[:]*nsurf, gbT.c,
# [0, ysurf, 0])
#
# # side
# zv, yv = np.meshgrid(zGridOe, yGridOe)
# zv = zv.flatten()
# yv = yv.flatten()
#
# gbT = rsources.Beam(nrays=len(yv))
# gbT.y = yv
# gbT.x = np.zeros_like(yv)
# gbT.z = zv
#
# gbT.a = np.ones_like(yv)
# gbT.b = np.zeros_like(yv)
# gbT.c = np.zeros_like(yv)
#
# oe.local_to_global(gbT)
#
# for isurf, xsurf in enumerate([face[0], face[1]]):
# nsurf = 1. if isurf == 0 else -1
# self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
# gbT.a[:]*nsurf, gbT.b, gbT.c,
# [xsurf, 0, 0])
#
# # top
# xv, yv = np.meshgrid(xGridOe, yGridOe)
# xv = xv.flatten()
# yv = yv.flatten()
#
# gbT = rsources.Beam(nrays=len(yv))
# gbT.x = xv
# gbT.y = yv
# gbT.z = np.zeros_like(xv)
#
# gbT.a = np.zeros_like(yv)
# gbT.b = np.zeros_like(yv)
# gbT.c = np.ones_like(yv)
#
# oe.local_to_global(gbT)
#
# for isurf, zsurf in enumerate([face[2], face[3]]):
# nsurf = 1. if isurf == 0 else -1
# self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
# gbT.a, gbT.b, gbT.c[:]*nsurf,
# [0, 0, zsurf])
gl.glDisable(gl.GL_MAP2_VERTEX_3)
gl.glDisable(gl.GL_MAP2_NORMAL)
def plotScreen(self, oe, dimensions=None, frameColor=None, plotFWHM=False):
scAbsZ = np.linalg.norm(oe.z * self.scaleVec)
scAbsX = np.linalg.norm(oe.x * self.scaleVec)
if dimensions is not None:
vScrHW = dimensions[0]
vScrHH = dimensions[1]
else:
vScrHW = self.vScreenSize
vScrHH = self.vScreenSize
dX = vScrHW * np.array(oe.x) * self.maxLen / scAbsX
dZ = vScrHH * np.array(oe.z) * self.maxLen / scAbsZ
vScreenBody = np.zeros((4, 3))
vScreenBody[0, :] = vScreenBody[1, :] = oe.center - dX
vScreenBody[2, :] = vScreenBody[3, :] = oe.center + dX
vScreenBody[0, :] -= dZ
vScreenBody[1, :] += dZ
vScreenBody[2, :] += dZ
vScreenBody[3, :] -= dZ
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
# if self.invertColors:
# gl.glColor4f(0.0, 0.0, 0.0, 0.2)
# else:
# gl.glColor4f(1.0, 1.0, 1.0, 0.2)
for i in range(4):
gl.glVertex3f(*self.modelToWorld(vScreenBody[i, :] -
self.coordOffset))
gl.glEnd()
if frameColor is not None:
self.virtScreen.frame = vScreenBody
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glLineWidth(2)
gl.glBegin(gl.GL_QUADS)
gl.glColor4f(*frameColor)
for i in range(4):
gl.glVertex3f(*self.modelToWorld(vScreenBody[i, :] -
self.coordOffset))
gl.glEnd()
if plotFWHM:
gl.glLineWidth(1)
gl.glDisable(gl.GL_LINE_SMOOTH)
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.)
startVec = np.array([0, 1, 0])
destVec = np.array(oe.y / self.scaleVec)
rotVec = np.cross(startVec, destVec)
rotAngle = np.degrees(np.arccos(
np.dot(startVec, destVec) /
np.linalg.norm(startVec) / np.linalg.norm(destVec)))
rotVecGL = np.float32(np.hstack((rotAngle, rotVec)))
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
scr = np.zeros((3, 3))
for iAx in range(3):
scr[iAx] = np.array(gl.gluProject(
*(self.modelToWorld(vScreenBody[iAx] - self.coordOffset)),
model=pModel, proj=pProjection, view=pView))
vFlip = 2. if scr[0, 1] > scr[1, 1] else 0.
hFlip = 2. if scr[1, 0] > scr[2, 0] else 0.
for iAx, text in enumerate(oe.FWHMstr):
fontScale = self.fontSize / 12500.
coord = self.modelToWorld(
(vScreenBody[iAx + 1] + vScreenBody[iAx + 2]) * 0.5 -
self.coordOffset)
coordShift = np.zeros(3, dtype=np.float32)
if iAx == 0: # Horizontal Label
coordShift[0] = (hFlip - 1.) * fontScale *\
len(text) * 104.76 * 0.5
coordShift[2] = fontScale * 200.
else: # Vertical Label
coordShift[0] = fontScale * 200.
coordShift[2] = (vFlip - 1.) * fontScale *\
len(text) * 104.76 * 0.5
gl.glPushMatrix()
gl.glTranslatef(*coord)
gl.glRotatef(*rotVecGL)
gl.glTranslatef(*coordShift)
gl.glRotatef(180.*(vFlip*0.5), 1, 0, 0)
gl.glRotatef(180.*(hFlip*0.5), 0, 0, 1)
if iAx > 0:
gl.glRotatef(-90, 0, 1, 0)
if iAx == 0: # Horizontal Label to half height
gl.glTranslatef(0, 0, -50. * fontScale)
else: # Vertical Label to half height
gl.glTranslatef(-50. * fontScale, 0, 0)
gl.glRotatef(90, 1, 0, 0)
gl.glScalef(fontScale, fontScale, fontScale)
for symbol in text:
gl.glutStrokeCharacter(
gl.GLUT_STROKE_MONO_ROMAN, ord(symbol))
gl.glPopMatrix()
gl.glEnable(gl.GL_LINE_SMOOTH)
def plotHemiScreen(self, oe, dimensions=None):
try:
rMajor = oe.R
except: # analysis:ignore
rMajor = 1000.
if dimensions is not None:
rMinor = dimensions
else:
rMinor = self.vScreenSize
if rMinor > rMajor:
rMinor = rMajor
yVec = np.array(oe.x)
sphereCenter = np.array(oe.center)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 0.2)
else:
gl.glColor4f(1.0, 1.0, 1.0, 0.2)
gl.glEnable(gl.GL_MAP2_VERTEX_3)
dAngle = np.arctan2(rMinor, rMajor)
xLimits = [-dAngle + yVec[0], dAngle + yVec[0]]
yLimits = [-dAngle + yVec[2], dAngle + yVec[2]]
for i in range(self.tiles[0]):
deltaX = (xLimits[1] - xLimits[0]) /\
float(self.tiles[0])
xGridOe = np.linspace(xLimits[0] + i*deltaX,
xLimits[0] + (i+1)*deltaX,
self.surfCPOrder)
for k in range(self.tiles[1]):
deltaY = (yLimits[1] - yLimits[0]) /\
float(self.tiles[1])
yGridOe = np.linspace(yLimits[0] + k*deltaY,
yLimits[0] + (k+1)*deltaY,
self.surfCPOrder)
xv, yv = np.meshgrid(xGridOe, yGridOe)
xv = xv.flatten()
yv = yv.flatten()
ibp = rsources.Beam(nrays=len(xv))
ibp.x[:] = sphereCenter[0]
ibp.y[:] = sphereCenter[1]
ibp.z[:] = sphereCenter[2]
ibp.b[:] = yVec[1]
ibp.a = xv
ibp.c = yv
ibp.state[:] = 1
gbp = oe.expose_global(beam=ibp)
surfCP = np.vstack((gbp.x - self.coordOffset[0],
gbp.y - self.coordOffset[1],
gbp.z - self.coordOffset[2])).T
gl.glMap2f(gl.GL_MAP2_VERTEX_3, 0, 1, 0, 1,
self.modelToWorld(surfCP.reshape(
self.surfCPOrder,
self.surfCPOrder, 3)))
gl.glMapGrid2f(self.surfCPOrder, 0.0, 1.0,
self.surfCPOrder, 0.0, 1.0)
gl.glEvalMesh2(gl.GL_FILL, 0, self.surfCPOrder,
0, self.surfCPOrder)
gl.glDisable(gl.GL_MAP2_VERTEX_3)
def addLighting(self, pos):
spot = 60
exp = 30
ambient = [0.2, 0.2, 0.2, 1]
diffuse = [0.5, 0.5, 0.5, 1]
specular = [1.0, 1.0, 1.0, 1]
gl.glEnable(gl.GL_LIGHTING)
# corners = [[-pos, pos, pos, 1], [-pos, -pos, -pos, 1],
# [-pos, pos, -pos, 1], [-pos, -pos, pos, 1],
# [pos, pos, -pos, 1], [pos, -pos, pos, 1],
# [pos, pos, pos, 1], [pos, -pos, -pos, 1]]
corners = [[0, 0, pos, 1], [0, pos, 0, 1],
[pos, 0, 0, 1], [-pos, 0, 0, 1],
[0, -pos, 0, 1], [0, 0, -pos, 1]]
gl.glLightModeli(gl.GL_LIGHT_MODEL_TWO_SIDE, 0)
for iLight in range(len(corners)):
light = gl.GL_LIGHT0 + iLight
gl.glEnable(light)
gl.glLightfv(light, gl.GL_POSITION, corners[iLight])
gl.glLightfv(light, gl.GL_SPOT_DIRECTION,
np.array(corners[len(corners)-iLight-1])/pos)
gl.glLightfv(light, gl.GL_SPOT_CUTOFF, spot)
gl.glLightfv(light, gl.GL_SPOT_EXPONENT, exp)
gl.glLightfv(light, gl.GL_AMBIENT, ambient)
gl.glLightfv(light, gl.GL_DIFFUSE, diffuse)
gl.glLightfv(light, gl.GL_SPECULAR, specular)
# gl.glBegin(gl.GL_LINES)
# glVertex4f(*corners[iLight])
# glVertex4f(*corners[len(corners)-iLight-1])
# gl.glEnd()
def toggleHelp(self):
self.showHelp = not self.showHelp
self.glDraw()
def drawHelp(self):
hHeight = 300
hWidth = 500
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, self.viewPortGL[3]-hHeight, hWidth, hHeight)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(-1, 1, -1, 1, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
if self.invertColors:
gl.glColor4f(1.0, 1.0, 1.0, 0.9)
else:
gl.glColor4f(0.0, 0.0, 0.0, 0.9)
backScreen = [[1, 1], [1, -1],
[-1, -1], [-1, 1]]
for corner in backScreen:
gl.glVertex3f(corner[0], corner[1], 0)
gl.glEnd()
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.0)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.0)
gl.glLineWidth(3)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glBegin(gl.GL_QUADS)
backScreen = [[1, 1], [1, -1],
[-1, -1], [-1, 1]]
for corner in backScreen:
gl.glVertex3f(corner[0], corner[1], 0)
gl.glEnd()
helpList = [
'F1: Open/Close this help window',
'F3: Add/Remove Virtual Screen',
'F4: Dock/Undock xrtGlow if launched from xrtQook',
'F5/F6: Quick Save/Load Scene']
if hasattr(self, 'generator'):
helpList += ['F7: Start recording movie']
helpList += [
'LeftMouse: Rotate the Scene',
'SHIFT+LeftMouse: Translate in perpendicular to the shortest view axis', # analysis:ignore
'ALT+LeftMouse: Translate in parallel to the shortest view axis', # analysis:ignore
'CTRL+LeftMouse: Drag Virtual Screen',
'ALT+WheelMouse: Scale Virtual Screen',
'CTRL+SHIFT+LeftMouse: Translate the Beamline around Virtual Screen', # analysis:ignore
' (with Beamline along the longest view axis)', # analysis:ignore
'CTRL+ALT+LeftMouse: Translate the Beamline around Virtual Screen', # analysis:ignore
' (with Beamline along the shortest view axis)', # analysis:ignore
'CTRL+T: Toggle Virtual Screen orientation (vertical/normal to the beam)', # analysis:ignore
'WheelMouse: Zoom the Beamline',
'CTRL+WheelMouse: Zoom the Scene']
for iLine, text in enumerate(helpList):
self.drawText([-1. + 0.05,
1. - 2. * (iLine + 1) / float(len(helpList)+1), 0],
text, True)
gl.glFlush()
gl.glViewport(*pView)
def drawCone(self, z, r, nFacets, color):
phi = np.linspace(0, 2*np.pi, nFacets)
xp = r * np.cos(phi)
yp = r * np.sin(phi)
base = np.vstack((xp, yp, np.zeros_like(xp)))
coneVertices = np.hstack((np.array([0, 0, z]).reshape(3, 1),
base)).T
gridColor = np.zeros((len(coneVertices), 4))
gridColor[:, color] = 1
gridColor[:, 3] = 0.75
gridArray = gl.vbo.VBO(np.float32(coneVertices))
gridArray.bind()
gl.glVertexPointerf(gridArray)
gridColorArray = gl.vbo.VBO(np.float32(gridColor))
gridColorArray.bind()
gl.glColorPointerf(gridColorArray)
gl.glDrawArrays(gl.GL_TRIANGLE_FAN, 0, len(gridArray))
gridArray.unbind()
gridColorArray.unbind()
def drawLocalAxes(self, oe, is2ndXtal):
def drawArrow(color, arrowArray, yText='hkl'):
gridColor = np.zeros((len(arrowArray) - 1, 4))
gridColor[:, 3] = 0.75
if color == 4:
gridColor[:, 0] = 1
gridColor[:, 1] = 1
elif color == 5:
gridColor[:, 0] = 1
gridColor[:, 1] = 0.5
else:
gridColor[:, color] = 1
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gridArray = gl.vbo.VBO(np.float32(arrowArray[1:, :]))
gridArray.bind()
gl.glVertexPointerf(gridArray)
gridColorArray = gl.vbo.VBO(np.float32(gridColor))
gridColorArray.bind()
gl.glColorPointerf(gridColorArray)
gl.glDrawArrays(gl.GL_TRIANGLE_FAN, 0, len(gridArray))
gridArray.unbind()
gridColorArray.unbind()
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glBegin(gl.GL_LINES)
colorVec = [0, 0, 0, 0.75]
if color == 4:
colorVec[0] = 1
colorVec[1] = 1
elif color == 5:
colorVec[0] = 1
colorVec[1] = 0.5
else:
colorVec[color] = 1
gl.glColor4f(*colorVec)
gl.glVertex3f(*arrowArray[0, :])
gl.glVertex3f(*arrowArray[1, :])
gl.glEnd()
gl.glColor4f(*colorVec)
gl.glRasterPos3f(*arrowArray[1, :])
if color == 0:
axSymb = 'Z'
elif color == 1:
axSymb = 'Y'
elif color == 2:
axSymb = 'X'
elif color == 4:
axSymb = yText
else:
axSymb = ''
for symbol in " {}".format(axSymb):
gl.glutBitmapCharacter(self.fixedFont, ord(symbol))
gl.glDisable(gl.GL_LINE_SMOOTH)
z, r, nFacets = 0.25, 0.02, 20
phi = np.linspace(0, 2*np.pi, nFacets)
xp = np.insert(r *
|
np.cos(phi)
|
numpy.cos
|
import numpy as np
class Variable:
def __init__(self, data):
self.data = data
self.grad = None
self.creator = None
def set_creator(self, func):
self.creator = func
def backward(self):
f = self.creator # 1. Get a function (creator를 가져온다)
if f is not None:
x = f.input # 2. Get the function's input (creator에서 input을 가져오고)
x.grad = f.backward(self.grad) # 3. Call the function's backward (함수의 backward 메서드 호출)
x.backward() # 4. (재귀) 하나 앞 변수의 backward 메서드를 호출한다
class Function:
def __call__(self, input):
x = input.data
y = self.forward(x)
output = Variable(y)
output.set_creator(self) # Set parent(function)
self.input = input
self.output = output # Set output
return output
def forward(self, x):
raise NotImplementedError()
def backward(self, gy):
raise NotImplementedError()
class Square(Function):
def forward(self, x):
y = x ** 2
return y
def backward(self, gy):
x = self.input.data
gx = 2 * x * gy
return gx
class Exp(Function):
def forward(self, x):
y = np.exp(x)
return y
def backward(self, gy):
x = self.input.data
gx =
|
np.exp(x)
|
numpy.exp
|
# Third experiment
# Copyright holders: <NAME>, <NAME>
# 2021
#
# Choose fineLevel=9, coarseLevel=3, and maxIt=8 for the experiment in the paper.
# This will take time since the algorithm below does not exploit the potential
# parallelization and computes the different approximations one after the other.
# To obtain a first qualitative impression, it is recommended to use the parameters
# fineLevel=7, coarseLevel=3, and maxIt=12.
#
# Note that the level of data oscillations is automatically set to fineLevel-2
import numpy as np
import scipy.sparse as sparse
import matplotlib.pyplot as plt
from gridlod import pglod, util, lod, interp, coef, fem, func
from gridlod.world import World, Patch
fineLevel = 9
coarseLevel = 3
maxIt = 8
def coeffi(x,y,fineExp):
epslevel = fineExp-2
epslevel2 = fineExp-4
alpha = 0.
beta = 1.
res = np.ones(x.shape)
np.random.seed(123)
ran = np.random.uniform(0,1,x.shape[0]//(2**(2*epslevel2)))
eps = 1e-15
indices = (x<1+eps)*(x>-eps)*(y<1+eps)*(y>-eps)*(np.floor(2**(epslevel)*x)+2**epslevel*(np.floor(2**(epslevel)*y)))
for i in range(indices.shape[0]):
res[i] = alpha + (beta-alpha)*(1+np.sin(ran[int(indices[i])//64]*1.29408694*(indices[i])))
return res
def drawCoefficient(N, a):
aCube = a.reshape(N, order='F')
aCube = np.ascontiguousarray(aCube.T)
plt.figure(2)
cmap = plt.cm.get_cmap('binary')
plt.imshow(aCube,
origin='lower',
interpolation='none',
cmap=cmap,
alpha=1)
plt.title('Coefficient - Ex 3')
#positions = (0, 127, 255)
#labels = ("0", "0.5", "1")
#plt.xticks(positions, labels)
#plt.yticks(positions, labels)
def helmholtz_nonlinear_adaptive(mapper,fineLvl,coarseLvl,maxit):
fineExp = fineLvl
NFine = np.array([2 ** fineLvl, 2 ** fineLvl])
NpFine = np.prod(NFine + 1)
N = 2**coarseLvl
tolList = [2.0,1.0,0.5,0.25,0.125,0.0625,0.]
ell = 2 # localization parameter
k = 15. # wavenumber
maxit_Fine = 200
xt = util.tCoordinates(NFine)
xp = util.pCoordinates(NFine)
# multiscale coefficients on the scale NFine-2
np.random.seed(444)
sizeK = np.size(xt[:, 0])
nFine = NFine[0]
# determine domain D_eps = supp(1-n) = supp(1-A) (all equal for the moment)
indicesIn = (xt[:, 0] > 0.15) & (xt[:, 0] < 0.85) & (xt[:, 1] > 0.15) & (xt[:, 1] < 0.85)
indicesInEps = (xt[:, 0] > 0.15) & (xt[:, 0] < 0.85) & (xt[:, 1] > 0.15) & (xt[:, 1] < 0.85)
# coefficients
aFine = np.ones(xt.shape[0])
cn = .05 # lower bound on n
Cn = 1. # upper bound on n
nEpsPro = coeffi(xt[:,0],xt[:,1],fineLvl)
k2Fine = k ** 2 * np.ones(xt.shape[0])
k2Fine[indicesIn] = k ** 2 * ((Cn - cn) * nEpsPro[indicesIn] + cn)
kFine = k * np.ones(xt.shape[0])
Ceps = 0.3 # upper bound on eps (lower bound is 0)
epsEpsPro = np.ones(sizeK)
epsFine = np.zeros(xt.shape[0])
epsFine[indicesInEps] = Ceps * epsEpsPro[indicesInEps] # 0 OR Ceps
plotC = np.ones(sizeK)
plotC[indicesIn] = nEpsPro[indicesIn]
drawCoefficient(NFine,plotC)
xC = xp[:, 0]
yC = xp[:, 1]
# define right-hand side and boundary condition
def funcF(x, y):
res = 100*np.ones(x.shape, dtype='complex128')
return res
f = funcF(xC, yC)
# reference solution
uSol = np.zeros(NpFine, dtype='complex128')
# boundary conditions
boundaryConditions = np.array([[1, 1], [1, 1]])
worldFine = World(NFine, np.array([1, 1]), boundaryConditions)
# fine matrices
BdFineFEM = fem.assemblePatchBoundaryMatrix(NFine, fem.localBoundaryMassMatrixGetter(NFine))
MFineFEM = fem.assemblePatchMatrix(NFine, fem.localMassMatrix(NFine))
KFineFEM = fem.assemblePatchMatrix(NFine, fem.localStiffnessMatrix(NFine)) # , aFine)
kBdFine = fem.assemblePatchBoundaryMatrix(NFine, fem.localBoundaryMassMatrixGetter(NFine), kFine)
KFine = fem.assemblePatchMatrix(NFine, fem.localStiffnessMatrix(NFine), aFine)
print('***computing reference solution***')
uOldFine = np.zeros(NpFine, dtype='complex128')
for it in np.arange(maxit_Fine):
print('-- itFine = %d' % it)
knonlinUpreFine = np.abs(uOldFine)
knonlinUFine = func.evaluateCQ1(NFine, knonlinUpreFine, xt)
k2FineUfine = np.copy(k2Fine)
k2FineUfine[indicesInEps] *= (1. + epsFine[indicesInEps] * knonlinUFine[indicesInEps] ** 2) # full coefficient, including nonlinearity
k2MFine = fem.assemblePatchMatrix(NFine, fem.localMassMatrix(NFine), k2FineUfine) # weighted mass matrix, updated in every iteration
nodesFine = np.arange(worldFine.NpFine)
fixFine = util.boundarypIndexMap(NFine, boundaryConditions == 0)
freeFine = np.setdiff1d(nodesFine, fixFine)
# right-hand side
fhQuad = MFineFEM * f
# fine system
lhsh = KFine[freeFine][:, freeFine] - k2MFine[freeFine][:, freeFine] + 1j * kBdFine[freeFine][:,freeFine]
rhsh = fhQuad[freeFine]
xFreeFine = sparse.linalg.spsolve(lhsh, rhsh)
xFullFine = np.zeros(worldFine.NpFine, dtype='complex128')
xFullFine[freeFine] = xFreeFine
uOldFine = np.copy(xFullFine)
# residual - used as stopping criterion
knonlinU = np.abs(uOldFine)
knonlinUFineIt = func.evaluateCQ1(NFine, knonlinU, xt)
k2FineUfineIt = np.copy(k2Fine)
k2FineUfineIt[indicesInEps] *= (1. + epsFine[indicesInEps] * knonlinUFineIt[indicesInEps] ** 2) # update full coefficient, including nonlinearity
k2MFineIt = fem.assemblePatchMatrix(NFine, fem.localMassMatrix(NFine), k2FineUfineIt)
Ares = KFine - k2MFineIt + 1j * kBdFine
residual = np.linalg.norm(Ares * xFullFine - fhQuad)/np.linalg.norm(Ares * xFullFine)
print('---- residual = %.4e' % residual)
if residual < 1e-12:
break # stopping criterion
uSol = xFullFine # final fine reference solution
print('***reference solution computed***\n')
counter = 0 # for figures
print('***computing multiscale approximations***')
relErrEnergy = np.zeros([len(tolList),maxit])
for tol in tolList:
counter += 1
print('H = %.4e, tol = %.4e' % (1./N,tol))
NWorldCoarse = np.array([N, N])
NCoarseElement = NFine // NWorldCoarse
world = World(NWorldCoarse, NCoarseElement, boundaryConditions)
NpCoarse = np.prod(NWorldCoarse + 1)
uOldUps = np.zeros(NpFine, dtype='complex128')
for it in np.arange(maxit):
print('-- it = %d:' % it)
knonlinUpre = np.abs(uOldUps)
knonlinU = func.evaluateCQ1(NFine, knonlinUpre, xt)
k2FineU = np.copy(k2Fine)
k2FineU[indicesInEps] *= (1. + epsFine[indicesInEps] * knonlinU[indicesInEps] ** 2)
print('---- starting computation of correctors')
def computeLocalContribution(TInd):
patch = Patch(world, ell, TInd)
IPatch = lambda: interp.L2ProjectionPatchMatrix(patch, boundaryConditions)
aPatch = lambda: coef.localizeCoefficient(patch, aFine)
kPatch = lambda: coef.localizeCoefficient(patch, kFine)
k2Patch = lambda: coef.localizeCoefficient(patch, k2FineU)
correctorsList = lod.computeBasisCorrectors_helmholtz(patch, IPatch, aPatch, kPatch, k2Patch) # adapted for Helmholtz setting
csi = lod.computeBasisCoarseQuantities_helmholtz(patch, correctorsList, aPatch, kPatch, k2Patch) # adapted for Helmholtz setting
return patch, correctorsList, csi.Kmsij, csi.Mmsij, csi.Bdmsij, csi.muTPrime
def computeIndicators(TInd):
k2FineUPatch = lambda: coef.localizeCoefficient(patchT[TInd], k2FineU)
k2FineUOldPatch = lambda: coef.localizeCoefficient(patchT[TInd], k2FineUOld)
E_vh = lod.computeErrorIndicatorCoarse_helmholtz(patchT[TInd],muTPrime[TInd],k2FineUOldPatch,k2FineUPatch)
return E_vh
def UpdateCorrectors(TInd):
patch = Patch(world, ell, TInd)
IPatch = lambda: interp.L2ProjectionPatchMatrix(patch, boundaryConditions)
aPatch = lambda: coef.localizeCoefficient(patch, aFine)
kPatch = lambda: coef.localizeCoefficient(patch, kFine)
k2Patch = lambda: coef.localizeCoefficient(patch, k2FineU)
correctorsList = lod.computeBasisCorrectors_helmholtz(patch, IPatch, aPatch, kPatch, k2Patch)
csi = lod.computeBasisCoarseQuantities_helmholtz(patch, correctorsList, aPatch, kPatch, k2Patch) # adapted for Helmholtz setting
return patch, correctorsList, csi.Kmsij, csi.Mmsij, csi.Bdmsij, csi.muTPrime
def UpdateElements(tol, E, Kmsij_old, Mmsij_old, Bdmsij_old, correctors_old, mu_old):
print('---- apply tolerance')
Elements_to_be_updated = []
for (i, eps) in E.items():
if eps > tol*k**2 :
Elements_to_be_updated.append(i)
if len(E) > 0:
print('---- percentage of non-zero element correctors to be updated: %.4f' % (100 * np.size(Elements_to_be_updated) / len(E)), flush=True)
print('---- total percentage of element correctors to be updated: %.4f' % (100 * np.size(Elements_to_be_updated) / len(mu_old)), flush=True)
print('---- update local contributions')
KmsijT_list = list(np.copy(Kmsij_old))
MmsijT_list = list(np.copy(Mmsij_old))
BdmsijT_list = list(np.copy(Bdmsij_old))
muT_list = np.copy(mu_old)
for T in np.setdiff1d(range(world.NtCoarse), Elements_to_be_updated):
patch = Patch(world, ell, T)
aPatch = lambda: coef.localizeCoefficient(patch, aFine)
kPatch = lambda: coef.localizeCoefficient(patch, kFine)
k2Patch = lambda: coef.localizeCoefficient(patch, k2FineU)
csi = lod.computeBasisCoarseQuantities_helmholtz(patch, correctors_old[T], aPatch, kPatch, k2Patch)
KmsijT_list[T] = csi.Kmsij
MmsijT_list[T] = csi.Mmsij
BdmsijT_list[T] = csi.Bdmsij
muT_list[T] = csi.muTPrime
if np.size(Elements_to_be_updated) != 0:
#print('---- update correctors')
patchT_irrelevant, correctorsListTNew, KmsijTNew, MmsijTNew, BdmsijTNew, muTPrimeNew = zip(*mapper(UpdateCorrectors,Elements_to_be_updated))
#print('---- update correctorsList')
correctorsListT_list = list(np.copy(correctors_old))
i = 0
for T in Elements_to_be_updated:
KmsijT_list[T] = KmsijTNew[i]
correctorsListT_list[T] = correctorsListTNew[i]
MmsijT_list[T] = MmsijTNew[i]
BdmsijT_list[T] = BdmsijTNew[i]
muT_list[T] = muTPrimeNew[i]
i += 1
KmsijT = tuple(KmsijT_list)
correctorsListT = tuple(correctorsListT_list)
MmsijT = tuple(MmsijT_list)
BdmsijT = tuple(BdmsijT_list)
muTPrime = tuple(muT_list)
return correctorsListT, KmsijT, MmsijT, BdmsijT, muTPrime
else:
KmsijT = tuple(KmsijT_list)
MmsijT = tuple(MmsijT_list)
BdmsijT = tuple(BdmsijT_list)
muTPrime = tuple(muT_list)
return correctors_old, KmsijT, MmsijT, BdmsijT, muTPrime
if it == 0:
patchT, correctorsListT, KmsijT, MmsijT, BdmsijT, muTPrime = zip(
*mapper(computeLocalContribution, range(world.NtCoarse)))
else:
E_vh = list(mapper(computeIndicators, range(world.NtCoarse)))
print('---- maximal value error estimator for basis correctors {}'.format(np.max(E_vh)))
E = {i: E_vh[i] for i in range(np.size(E_vh)) if E_vh[i] > 0 }
# loop over elements with possible recomputation of correctors
correctorsListT, KmsijT, MmsijT, BdmsijT, muTPrime = UpdateElements(tol, E, KmsijT, MmsijT, BdmsijT, correctorsListT, muTPrime) # tol scaled by maximal error indicator
print('---- finished computation of correctors')
KLOD = pglod.assembleMsStiffnessMatrix(world, patchT, KmsijT) # ms stiffness matrix
k2MLOD = pglod.assembleMsStiffnessMatrix(world, patchT, MmsijT) # ms mass matrix
kBdLOD = pglod.assembleMsStiffnessMatrix(world, patchT, BdmsijT) # ms boundary matrix
MFEM = fem.assemblePatchMatrix(NWorldCoarse, world.MLocCoarse)
BdFEM = fem.assemblePatchBoundaryMatrix(NWorldCoarse, fem.localBoundaryMassMatrixGetter(NWorldCoarse))
print('---- coarse matrices assembled')
nodes = np.arange(world.NpCoarse)
fix = util.boundarypIndexMap(NWorldCoarse, boundaryConditions == 0)
free = np.setdiff1d(nodes, fix)
assert (nodes.all() == free.all())
# compute global interpolation matrix
patchGlobal = Patch(world, NFine[0] + 2, 0)
IH = interp.L2ProjectionPatchMatrix(patchGlobal, boundaryConditions)
assert (IH.shape[0] == NpCoarse)
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
fHQuad = basis.T * MFineFEM * f
print('---- solving coarse system')
# coarse system
lhsH = KLOD[free][:, free] - k2MLOD[free][:, free] + 1j * kBdLOD[free][:,free]
rhsH = fHQuad[free]
xFree = sparse.linalg.spsolve(lhsH, rhsH)
basisCorrectors = pglod.assembleBasisCorrectors(world, patchT, correctorsListT)
modifiedBasis = basis - basisCorrectors
xFull = np.zeros(world.NpCoarse, dtype='complex128')
xFull[free] = xFree
uLodCoarse = basis * xFull
uLodFine = modifiedBasis * xFull
uOldUps = np.copy(uLodFine)
k2FineUOld = np.copy(k2FineU)
Err = np.sqrt(np.dot((uSol - uLodFine).conj(), KFineFEM * (uSol - uLodFine)) + k**2*np.dot((uSol - uLodFine).conj(), MFineFEM * (uSol - uLodFine)))
ErrEnergy = Err / np.sqrt(np.dot((uSol).conj(), KFineFEM * (uSol)) + k**2*np.dot((uSol).conj(), MFineFEM * (uSol)))
print('---- ',np.abs(ErrEnergy), '\n***********************************************')
# save errors in arrays
relErrEnergy[counter-1,it] = ErrEnergy
print('\n')
its =
|
np.arange(1,maxit+1)
|
numpy.arange
|
def ie_nn(dvm_otput_file_path, a0 = 3.545):
'''
Interatomic energy between the atoms and their nearest neighbor atoms.
This module has been tested for the source_23oct05 version of the DVM program
dvm_otput_file_path: the *.otput file of the DVM output
'''
args_dict = locals()
import os
import numpy as np
from .. import funcs
from .. import convert
from ..vasp import vasp_read
from ..vasp import vasp_analyze
from . import dvm_read
from .. import default_params
defaults_dict = default_params.default_params()
logfile = defaults_dict['logfile']
output_dir = os.path.join(os.getcwd(), defaults_dict['output_dir_name'])
funcs.mkdir(output_dir)
dvm_otput_file_path = os.path.abspath(dvm_otput_file_path)
# designate the working directory
workdir, dvm_otput_file = funcs.file_path_name(dvm_otput_file_path)
dvm_incar_file_path = os.path.join(workdir, dvm_otput_file[0:-6] + '.incar')
convert.dvmincar2poscar(dvm_incar_file_path)
dvmincar2poscar_poscar_file_path = os.path.join(workdir, dvm_otput_file[0:-6] + '_dvmincar2poscar.vasp')
poscar_dict = vasp_read.read_poscar(dvmincar2poscar_poscar_file_path)
atom_name_list = poscar_dict['atomname_list']
atom_indx_arr = poscar_dict['atom_indx_arr']
n_atoms = poscar_dict['n_atoms']
added_atom_data_arr = poscar_dict['added_atom_data']
dvm_otput_dict = dvm_read.read_otput(dvm_otput_file_path)
if dvm_otput_dict['spin'] == 0:
ie_au_arr = np.array([None] * n_atoms * n_atoms)
ie_au_arr.shape = n_atoms, n_atoms
for i_atom in range(0, n_atoms):
for j_atom in range(0, n_atoms):
ie_au_arr[i_atom, j_atom] = dvm_otput_dict['ie_arr'][int(added_atom_data_arr[i_atom, 3]) - 1, int(added_atom_data_arr[j_atom, 3]) - 1]
if dvm_otput_dict['spin'] == 1:
ie_au_up_arr = np.array([None] * n_atoms * n_atoms)
ie_au_up_arr.shape = n_atoms, n_atoms
ie_au_dw_arr = np.array([None] * n_atoms * n_atoms)
ie_au_dw_arr.shape = n_atoms, n_atoms
for i_atom in range(0, n_atoms):
for j_atom in range(0, n_atoms):
ie_au_up_arr[i_atom, j_atom] = dvm_otput_dict['ie_up_arr'][int(added_atom_data_arr[i_atom, 3]) - 1, int(added_atom_data_arr[j_atom, 3]) - 1]
ie_au_dw_arr[i_atom, j_atom] = dvm_otput_dict['ie_dw_arr'][int(added_atom_data_arr[i_atom, 3]) - 1, int(added_atom_data_arr[j_atom, 3]) - 1]
#########################
# IE for 1NN results
#########################
# related files
ie_au_nn_file_path = os.path.join(workdir, 'ie_nn_au.txt')
ie_ev_nn_file_path = os.path.join(workdir, 'ie_nn_ev.txt')
# call nearest neighbor analysis
vasp_analyze.nn_map(
poscar_file_path = dvmincar2poscar_poscar_file_path,
a0 = a0,
n_shell = 1
)
nn_atomname_list_file_path = os.path.join(workdir, 'nn_atomname_list_without_mirror_label_1NN.txt')
if funcs.file_status(nn_atomname_list_file_path) != 1:
quit()
# prepare the data
first_nn_ie_au_arr = None
first_nn_ie_au_up_arr = None
first_nn_ie_au_dw_arr = None
with open(nn_atomname_list_file_path, 'r') as f_1nn_atomname:
lines = f_1nn_atomname.readlines()
# initializetion
max_num_first_nn = max([len(funcs.split_line(x, ',')[1:]) for x in lines])
first_nn_atom_name_arr =
|
np.array([None] * n_atoms * max_num_first_nn)
|
numpy.array
|
import numpy
import math
import matplotlib.pyplot as plt
from uncertaincolors import to_rgb, all_to_rgb
x = numpy.linspace(-1,1, 50)
y = numpy.linspace(-1,1, 50)
X, Y = numpy.meshgrid(x, y)
value = numpy.abs(X) #**0.5 * X/numpy.abs(X) #+ 0.25
error = (X**2 + Y**2)**0.5
value = (1-Y)/2.
error = (X+1)/2.
# define b and r
b = value # * 0.114 * 3
r = 1 + 0 * value * 0.299 * 3
# we know what grayscale values we want
luma = error**0.5
# we also know how luma is defined:
# luma = (r**2 * 0.299 + g**2 * 0.587 + b**2 * 0.114)**0.5
# this defines one color given the other two.
# solve for green:
#g = ((luma**2 - b**2 * 0.114 - r**2 * 0.299) / 0.587)**0.5
g = (luma - b*0.114 - r*0.299) / 0.587
total =
|
numpy.dstack((r, g, b))
|
numpy.dstack
|
"""test base, using np"""
import numpy as np
from sana import base
TESTN = [2, 3, 5, 10, 45, 990]
TESTNPAIR = [1, 3, 10, 45, 990, 489555]
def test_npairs2n():
assert np.all(base.npairs2n(np.array(TESTNPAIR)).astype(int) == np.array(TESTN))
def test_n2npairs():
assert np.all(base.n2npairs(
|
np.array(TESTN)
|
numpy.array
|
"""Useful functions for plotting images -- both from fits cutouts
and the preprocessed ones in the IMG binary file.
"""
import os
import numpy as np
from itertools import product
from astropy import units as u
from astropy.table import Table
from astropy.wcs import WCS
from astropy.visualization import ImageNormalize, LogStretch, AsinhStretch
import pandas as pd
import matplotlib.pyplot as plt
import pyink as pu
# from astropy.coordinates import SkyCoord, search_around_sky, match_coordinates_sky
# import seaborn as sns
# from astroquery.vizier import Vizier
# import vlass_data_loader as vdl
def load_fits(filename, ext=0):
hdulist = fits.open(filename)
d = hdulist[ext]
return d
def load_radio_fits(filename, ext=0):
hdu = load_fits(filename, ext=ext)
wcs = WCS(hdu.header).celestial
hdu.data = np.squeeze(hdu.data)
hdu.header = wcs.to_header()
return hdu
def scale_img(radio_file):
"""Scale a radio image for convenient plotting"""
radio_data = load_radio_fits(radio_file).data
vmin =
|
np.percentile(radio_data, 0.1)
|
numpy.percentile
|
# -*- coding: utf-8 -*-
import os
os.chdir('D:/Python/TAR/system')
import numpy as np
from load_data import load_data, get_nn_predict_values
#from nn_values import get_nn_predict_values
from sklearn.metrics import f1_score, precision_score, recall_score
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout
from keras import regularizers
def concatenate(m1, m2): # expects train/test_S as m1 and freq_train/test type as m2
matrix = []
for i in range(m1.shape[0]): # rows
row = []
for j in range(m1.shape[1]):
row.append(m1[i][j])
row.append(m2[0][i])
matrix.append(np.array(row))
return np.array(matrix)
def concatenate2(m1, m2):
matrix = []
for i in range(m1.shape[0]):
row = []
for j in range(m1.shape[1]):
row.append(m1[i][j])
for j in range(m2.shape[1]):
row.append(m2[i][j])
matrix.append(np.array(row))
return np.array(matrix)
def prep_data(train_S, test_S, freq_train, freq_test, chi2_train, chi2_test, tfidf_train, tfidf_test):
nn1_train = concatenate(train_S, freq_train)
nn2_train = concatenate(train_S, chi2_train)
nn3_train = concatenate(train_S, tfidf_train)
nn1_test = concatenate(test_S, freq_test)
nn2_test = concatenate(test_S, chi2_test)
nn3_test = concatenate(test_S, tfidf_test)
return nn1_train, nn2_train, nn3_train, nn1_test, nn2_test, nn3_test
def shuffle_data(X, y):
rng_state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(rng_state)
np.random.shuffle(y)
return X, y
def run_model(filename, train_X, train_y, test_X, test_y):
train_X, train_y = shuffle_data(train_X, train_y)
from sklearn import preprocessing
#train_X = preprocessing.scale(train_X)
#test_X = preprocessing.scale(test_X)
scaler = preprocessing.StandardScaler().fit(train_X)
scaler.fit(train_X)
scaler.transform(train_X)
scaler.transform(test_X)
from sklearn import svm
model = svm.SVC()
y = np.argmax(train_y, axis=1)
model.fit(train_X, y)
p = model.predict(test_X)
print('svm f1 =', f1_score(np.argmax(test_y, axis=1), p)) # for comparison purposes
model = Sequential()
model.add(Dense(70, input_dim=204, activation='relu', kernel_regularizer=regularizers.l2(0.00))) # change input dim as necessary, it is kept this way here to showcase the dimensionality of best presented model in the paper
model.add(Dense(60, activation='relu', kernel_regularizer=regularizers.l2(0.00)))
model.add(Dense(50, activation='relu', kernel_regularizer=regularizers.l2(0.00)))
model.add(Dense(40, activation='relu', kernel_regularizer=regularizers.l2(0.00)))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x=train_X, y=train_y, batch_size=32, epochs=15, shuffle=True)
os.chdir('D:/Python/TAR/system/models')
model.save(filename + '.h5') # manually move to folder based on neural network type produced
#
p = model.predict(test_X)
print('f1', filename, ':', f1_score(np.argmax(test_y, axis=1), np.argmax(p, axis=1)))
def accuracy_score(y_true, y_pred):
correct_count = 0
for i in range(len(y_true)):
if y_true[i] == y_pred[i]:
correct_count += 1
accuracy = float(correct_count) / float(len(y_true))
return accuracy
def get_p_value(y_true, p1, p2, n_r_others=10000):
# init seed using local time in miliseconds
import datetime
np.random.seed(datetime.datetime.now().microsecond)
'''
calculate R_real and then an array of R_1, R_2, ..., R_n
calculate a p value based on a placement of R_real in the sorted R_list
'''
R_real = get_R(y_true, p1, p2)
R = get_R(y_true, p1, p2, real=False, repeats=n_r_others)
print('R_real =', R_real)
position = n_r_others # init starting position of R_real in list to last place
# find the real position of R_real in list of R
for i in range(len(R) - 1):
if R_real < R[0]:
position = 0
break
if R_real >= R[i] and R_real <= R[i + 1]:
position = i
break
print('position =', position)
return abs(((position / n_r_others) * 2) - 1)
def get_R(y_true, p1, p2, real=True, repeats=10000):
if real == True: # get inital and real R value
R = f1_score(y_true, p1) - f1_score(y_true, p2)
return R
# else, R_1, R_2, ..., R_n values by shuffling predictions
# n is 10 000 by default
# return sorted np.ndarray object
R = []
for i in range(repeats):
change = np.random.randint(0, 2, size=len(p1))
t_p1 = []
t_p2 = []
# exchange places in predictions with a chance of 50%
for j in range(len(p1)):
if change[j]:
t_p1.append(p2[j])
t_p2.append(p1[j])
else:
t_p1.append(p1[j])
t_p2.append(p2[j])
# calculate R score and append it to the list
R.append(f1_score(y_true, t_p1) - f1_score(y_true, t_p2))
# track percentage done
#if i % 100 == 0:
# print('{}%'.format(i//100))
#print('100% - done!')
R = np.array(R)
R = np.sort(R)
print(R)
return R
def print_p_values():
'''
calculates the p_value of various network combinations
print everything to the screen for demonstration purposes
'''
print()
print()
# get y_true scores
train_X, train_S, train_y, test_X, test_S, test_y = load_data()
y_true = np.argmax(test_y, axis=1)
# get ensamble predictions
import pickle
os.chdir('D:/Python/TAR/system')
# predictions_ensamble is a numpy array of loaded ensamble predictions on
# the unshufled test set, like all other predictions written bellow
with open('predicted_by_ensamble.pkl', 'rb') as pickle_file:
predictions_ensamble = pickle.load(pickle_file)
#print(predictions_ensamble)
# CHAPTER I
# special NN vs bare NN improvement
#
# 1) get predictions on the test set
# 1.1) get nn_bare predictions
os.chdir('D:/Python/TAR/system/models/bare')
nn_bare = load_model('model_chi2.h5')
train_X, train_S, train_y, test_X, test_S, test_y = load_data('chi2', get_all_special_data=False)
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = get_nn_predict_values()
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = prep_data(
train_S, test_S, freq_train, freq_test, chi2_train, chi2_test, tfidf_train, tfidf_test
)
predictions_bare = np.argmax(nn_bare.predict(test_X), axis=1)
# 1.2) get nn_augmented predictions
os.chdir('D:/Python/TAR/system/models/special')
nn_augmented = load_model('special_chi2.h5')
train_X, train_S, train_y, test_X, test_S, test_y = load_data('chi2', get_all_special_data=False)
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = get_nn_predict_values()
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = prep_data(
train_S, test_S, freq_train, freq_test, chi2_train, chi2_test, tfidf_train, tfidf_test
)
predictions_augmented = np.argmax(nn_augmented.predict(concatenate2(test_X, test_S)), axis=1)
p_value = get_p_value(y_true[-54:], predictions_augmented[-54:], predictions_bare[-54:], n_r_others=10000)
#p_value = get_p_value(y_true[:-54], predictions_augmented[:-54], predictions_bare[:-54], n_r_others=10000)
#p_value = get_p_value(y_true, predictions_augmented, predictions_bare, n_r_others=10000)
#print(R)
print('-------------')
print('p_value =', p_value)
print('-------------')
p_value = get_p_value(y_true[-54:], predictions_augmented[-54:], predictions_ensamble[-54:], n_r_others=10000)
#p_value = get_p_value(y_true[:-54], predictions_augmented[:-54], predictions_ensamble[:-54], n_r_others=10000)
#p_value = get_p_value(y_true, predictions_augmented, predictions_ensamble, n_r_others=10000)
print('-------------')
print('p_value (ensamble)', p_value)
print('-------------')
print()
print()
from scipy import stats
print('t-test', stats.ttest_ind(predictions_augmented[-54:], predictions_bare[-54:]))
#print('t-test', stats.ttest_ind(predictions_augmented[:-54], predictions_bare[:-54]))
def final_testing():
print()
# load bare neural networks
os.chdir('D:/Python/TAR/system/models/bare')
freq_bare = load_model('model_freq.h5')
chi2_bare = load_model('model_chi2.h5')
tfidf_bare = load_model('model_tfidf.h5')
# load augmented neural networks
os.chdir('D:/Python/TAR/system/models/special')
freq = load_model('special_freq.h5')
chi2 = load_model('special_chi2.h5')
tfidf = load_model('special_tfidf.h5')
# -- EXTENSIVE MODEL TESTING --
# CHAPTER I
# Bare neural networks, train and test accuracy and f1 scores
print('Bare neural networks, train and test accuracy and f1 scores')
# 1) freq
train_X, train_S, train_y, test_X, test_S, test_y = load_data('freq', get_all_special_data=False)
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = get_nn_predict_values()
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = prep_data(
train_S, test_S, freq_train, freq_test, chi2_train, chi2_test, tfidf_train, tfidf_test
)
p_train = freq_bare.predict(train_X)
p_test = freq_bare.predict(test_X)
print('freq')
print('\tacc: train {} test {}'.format(accuracy_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
accuracy_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\tf1: train {} test {}'.format(f1_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
f1_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\tprecision: train {} test {}'.format(precision_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
precision_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\trecall: train {} test {}'.format(recall_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
recall_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
# 2) chi2
train_X, train_S, train_y, test_X, test_S, test_y = load_data('chi2', get_all_special_data=False)
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = get_nn_predict_values()
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = prep_data(
train_S, test_S, freq_train, freq_test, chi2_train, chi2_test, tfidf_train, tfidf_test
)
p_train = chi2_bare.predict(train_X)
p_test = chi2_bare.predict(test_X)
print('chi2')
print('\tacc: train {} test {}'.format(accuracy_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
accuracy_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\tf1: train {} test {}'.format(f1_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
f1_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\tprecision: train {} test {}'.format(precision_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
precision_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\trecall: train {} test {}'.format(recall_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
recall_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
# 3) tfidf
train_X, train_S, train_y, test_X, test_S, test_y = load_data('chi2', get_all_special_data=False)
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = get_nn_predict_values()
freq_train, chi2_train, tfidf_train, freq_test, chi2_test, tfidf_test = prep_data(
train_S, test_S, freq_train, freq_test, chi2_train, chi2_test, tfidf_train, tfidf_test
)
p_train = tfidf_bare.predict(train_X)
p_test = tfidf_bare.predict(test_X)
print('tfidf')
print('\tacc: train {} test {}'.format(accuracy_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
accuracy_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\tf1: train {} test {}'.format(f1_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
f1_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\tprecision: train {} test {}'.format(precision_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
precision_score(np.argmax(test_y, axis=1), np.argmax(p_test, axis=1))))
print('\trecall: train {} test {}'.format(recall_score(np.argmax(train_y, axis=1), np.argmax(p_train, axis=1)),
recall_score(np.argmax(test_y, axis=1),
|
np.argmax(p_test, axis=1)
|
numpy.argmax
|
# ENPM661 Project 3 Phase 2
# <NAME> & <NAME>
import math
import time
import numpy as np
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
w = int(10.2)
h = 10
w_radius=int(0.1)
sep_dis=1
def convert_RPM_mps (RPM):
V=(RPM*2*np.pi)/60
return V
####IF WE HAVE TIME WE SHOULD CHANGE THIS INTO A GUI AND GET ALL INPUTS ONE TIME
##### Input functions #####
def get_parameters():
## print("Please enter the rigid robot parameters.")
## ans=(input("Enter the radius (default=3): "))
## if ans=='': radius=3
## else: radius=int(ans)
ans=(input("Enter the obstacle clearance (default=2): "))
if ans=='': clearance=0.2
else: clearance=int(ans)
## ans=(input("Enter the robot step size (1-10, default=1): "))
## if ans=='' or int(ans)<1: step=1
## elif int(ans)>10: step=10
## else: step=int(ans)
ans=(input("Enter the left wheel speed in RPM (default=5): "))
if ans=='': RPM1=5
else: RPM1=int(ans)
ans=(input("Enter the right wheel speed in RPM (default=5): "))
if ans=='': RPM2=5
else: RPM2=int(ans)
## return radius, clearance, step, RPM1, RPM2
return clearance, RPM1, RPM2
def get_start():
print("\nPlease enter the initial coordinates of the robot.")
ans=(input("Enter the x coordinate (default=50): "))
if ans=='': x=7
else: x=int(ans)
ans=(input("Enter the y coordinate (default=30): "))
if ans=='': y=5
else: y=int(ans)
ans=(input("Enter the starting theta (30-deg increments, default=60): "))
if ans=='': theta_s=45
else: theta_s=int(ans)
return [x, y], theta_s
def get_goal():
print("\nPlease enter the coordinates of the robot's goal.")
ans=(input("Enter the target x coordinate (default=7): "))
if ans=='': x=7
else: x=int(ans)
ans=(input("Enter the target y coordinate (default=5): "))
if ans=='': y=5
return [x, y]
def drotmatrix(point,angle):
R=np.array([[np.cos(np.deg2rad(angle)),-(np.sin(np.deg2rad(angle)))],[np.sin(np.deg2rad(angle)),np.cos(np.deg2rad(angle))]])
b=
|
np.array(point)
|
numpy.array
|
import numpy as np
a0 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
a1 = a0
a2 = a0
a3 = a0
a4 = a0
a5 = a0
a6 = a0
a7 = a0
pieces=[a0]
#pieces.append(a1)
#pieces.append(a2)
#pieces.append(a3)
#pieces.append(a4)
#pieces.append(a5)
#pieces.append(a6)
#pieces.append(a7)
b0 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
b1 = b0
b2 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,8,9,9,8,0],[0,0,9,1,1,9,0],[0,0,8,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
b3 = b2
b4 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
b5 = b4
b6 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,8,0,0],[0,9,1,1,9,0,0],[0,8,9,9,8,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
b7 = b6
pieces.append(b0)
#pieces.append(b1)
pieces.append(b2)
#pieces.append(b3)
pieces.append(b4)
#pieces.append(b5)
pieces.append(b6)
#pieces.append(b7)
c0 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
c1 = c0
c2 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,9,8,0],[0,9,1,1,1,9,0],[0,8,9,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
c3 = c2
c4 = c0
c5 = c0
c6 = c2
c7 = c2
pieces.append(c0)
#pieces.append(c1)
pieces.append(c2)
#pieces.append(c3)
#pieces.append(c4)
#pieces.append(c5)
#pieces.append(c6)
#pieces.append(c7)
d0 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,1,9,0],[0,0,8,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
d1 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,9,1,1,9,0,0],[0,8,9,9,8,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
d2 = d1
d3 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,8,0,0],[0,9,1,1,9,0,0],[0,8,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
d4 = d3
d5 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,8,9,9,8,0],[0,0,9,1,1,9,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
d6 = d5
d7 = d0
#pieces.append(d0)
pieces.append(d1)
#pieces.append(d2)
pieces.append(d3)
#pieces.append(d4)
pieces.append(d5)
#pieces.append(d6)
pieces.append(d7)
e0 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0]])
e1 = e0
e2 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,9,9,8],[0,9,1,1,1,1,9],[0,8,9,9,9,9,8],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
e3 = e2
e4 = np.array([[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
e5 = e4
e6 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[8,9,9,9,9,8,0],[9,1,1,1,1,9,0],[8,9,9,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
e7 = e6
pieces.append(e0)
#pieces.append(e1)
pieces.append(e2)
#pieces.append(e3)
pieces.append(e4)
#pieces.append(e5)
pieces.append(e6)
#pieces.append(e7)
f0 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,8,9,1,9,0,0],[0,9,1,1,9,0,0],[0,8,9,9,8,0,0],[0,0,0,0,0,0,0]])
f1 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,1,9,0],[0,0,8,9,9,8,0],[0,0,0,0,0,0,0]])
f2 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,9,8,0],[0,9,1,1,1,9,0],[0,8,9,9,1,9,0],[0,0,0,8,9,8,0],[0,0,0,0,0,0,0]])
f3 = np.array([[0,0,0,0,0,0,0],[0,0,0,8,9,8,0],[0,8,9,9,1,9,0],[0,9,1,1,1,9,0],[0,8,9,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
f4 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,9,8,0],[0,0,9,1,1,9,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
f5 = np.array([[0,0,0,0,0,0,0],[0,8,9,9,8,0,0],[0,9,1,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
f6 = np.array([[0,0,0,0,0,0,0],[0,8,9,8,0,0,0],[0,9,1,9,9,8,0],[0,9,1,1,1,9,0],[0,8,9,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
f7 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,9,8,0],[0,9,1,1,1,9,0],[0,9,1,9,9,8,0],[0,8,9,8,0,0,0],[0,0,0,0,0,0,0]])
pieces.append(f0)
pieces.append(f1)
pieces.append(f2)
pieces.append(f3)
pieces.append(f4)
pieces.append(f5)
pieces.append(f6)
pieces.append(f7)
g0 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,8,0],[0,0,9,1,1,9,0],[0,0,9,1,9,8,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
g1 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,8,9,1,9,0,0],[0,9,1,1,9,0,0],[0,8,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
g2 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,8,9,1,9,8,0],[0,9,1,1,1,9,0],[0,8,9,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
g3 = g0
g4 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,9,8,0],[0,9,1,1,1,9,0],[0,8,9,1,9,8,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
g5 = g2
g6 = g1
g7 = g4
pieces.append(g0)
pieces.append(g1)
pieces.append(g2)
pieces.append(g3)
#pieces.append(g4)
#pieces.append(g5)
#pieces.append(g6)
#pieces.append(g7)
h0 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,8,9,9,8,0],[0,0,9,1,1,9,0],[0,0,9,1,1,9,0],[0,0,8,9,9,8,0],[0,0,0,0,0,0,0]])
h1 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,8,0,0],[0,9,1,1,9,0,0],[0,9,1,1,9,0,0],[0,8,9,9,8,0,0],[0,0,0,0,0,0,0]])
h2 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,9,8,0],[0,0,9,1,1,9,0],[0,0,9,1,1,9,0],[0,0,8,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
h3 = h0
h4 = np.array([[0,0,0,0,0,0,0],[0,8,9,9,8,0,0],[0,9,1,1,9,0,0],[0,9,1,1,9,0,0],[0,8,9,9,8,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
h5 = h2
h6 = h1
h7 = h4
pieces.append(h0)
#pieces.append(h1)
pieces.append(h2)
#pieces.append(h3)
pieces.append(h4)
#pieces.append(h5)
pieces.append(h6)
#pieces.append(h7)
i0 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,8,0,0],[0,9,1,1,9,8,0],[0,8,9,1,1,9,0],[0,0,8,9,9,8,0],[0,0,0,0,0,0,0]])
i1 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,8,9,9,8,0],[0,8,9,1,1,9,0],[0,9,1,1,9,8,0],[0,8,9,9,8,0,0],[0,0,0,0,0,0,0]])
i2 = np.array([[0,0,0,0,0,0,0],[0,0,0,8,9,8,0],[0,0,8,9,1,9,0],[0,0,9,1,1,9,0],[0,0,9,1,9,8,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
i3 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,0,9,1,9,8,0],[0,0,9,1,1,9,0],[0,0,8,9,1,9,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
i4 = np.array([[0,0,0,0,0,0,0],[0,8,9,9,8,0,0],[0,9,1,1,9,8,0],[0,8,9,1,1,9,0],[0,0,8,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
i5 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,9,8,0],[0,8,9,1,1,9,0],[0,9,1,1,9,8,0],[0,8,9,9,8,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
i6 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,8,0,0],[0,8,9,1,9,0,0],[0,9,1,1,9,0,0],[0,9,1,9,8,0,0],[0,8,9,8,0,0,0],[0,0,0,0,0,0,0]])
i7 = np.array([[0,0,0,0,0,0,0],[0,8,9,8,0,0,0],[0,9,1,9,8,0,0],[0,9,1,1,9,0,0],[0,8,9,1,9,0,0],[0,0,8,9,8,0,0],[0,0,0,0,0,0,0]])
pieces.append(i0)
pieces.append(i1)
pieces.append(i2)
pieces.append(i3)
pieces.append(i4)
pieces.append(i5)
pieces.append(i6)
pieces.append(i7)
j0 = np.array([[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0]])
j1 = j0
j2 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[8,9,9,9,9,9,8],[9,1,1,1,1,1,9],[8,9,9,9,9,9,8],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
j3 = j2
j4 = j0
j5 = j0
j6 = j2
j7 = j2
pieces.append(j0)
#pieces.append(j1)
pieces.append(j2)
#pieces.append(j3)
#pieces.append(j4)
#pieces.append(j5)
#pieces.append(j6)
#pieces.append(j7)
k0 = np.array([[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,8,9,1,9,0,0],[0,9,1,1,9,0,0],[0,8,9,9,8,0,0],[0,0,0,0,0,0,0]])
k1 = np.array([[0,0,8,9,8,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,8,0],[0,0,9,1,1,9,0],[0,0,8,9,9,8,0],[0,0,0,0,0,0,0]])
k2 = np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[8,9,9,9,9,8,0],[9,1,1,1,1,9,0],[8,9,9,9,1,9,0],[0,0,0,8,9,8,0],[0,0,0,0,0,0,0]])
k3 = np.array([[0,0,0,0,0,0,0],[0,0,0,8,9,8,0],[8,9,9,9,1,9,0],[9,1,1,1,1,9,0],[8,9,9,9,9,8,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
k4 = np.array([[0,0,0,0,0,0,0],[0,0,8,9,9,8,0],[0,0,9,1,1,9,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0]])
k5 = np.array([[0,0,0,0,0,0,0],[0,8,9,9,8,0,0],[0,9,1,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,9,1,9,0,0],[0,0,8,9,8,0,0]])
k6 = np.array([[0,0,0,0,0,0,0],[0,8,9,8,0,0,0],[0,9,1,9,9,9,8],[0,9,1,1,1,1,9],[0,8,9,9,9,9,8],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]])
k7 =
|
np.array([[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,8,9,9,9,9,8],[0,9,1,1,1,1,9],[0,9,1,9,9,9,8],[0,8,9,8,0,0,0],[0,0,0,0,0,0,0]])
pieces.append(k0)
pieces.append(k1)
pieces.append(k2)
|
numpy.array
|
'''
This is just a wrapper function of seaborn clustmap.
@author: <NAME>
inputs:
1. means: 2darray (row: number of features, column: number of clusters)
2. variance associated with the means. 2darray (row: number of features, column: number of clusters)
3. featureslice: 1d array of features (dimension match the rows of means and variance)
4. clustpop : sample size for each clusters ,should be 1d array (dimension match the number of columns for means and variance)
5. normtype: integer, normalization(scale) type: none, row=0,column=1,
(these 3 cases do not require input variances,clustpop, variances =None,clustpop = None)
2-4:matrix entries as pvalues of t-test
# 2 : 1- max (list of pvals), this means a given feature of a cluster has to differ from all of the rest of clusters;
# 3 : 1- average (list of pvals), this means on average a given feature of a cluster has to differ from all of the rest of clusters;
# 4 : 1 - min(list of pvals), this means a given feature of a cluster has to differ from any least one of the rest of clusters;
just added a feature cmap for selecting types values (positive ,negative, and zero) with 3 colors.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
import numpy as np
import seaborn as sns
import scipy.stats
# plot a simple nonscaled clustermap
def plotclustmap_simple(means,featureslice):
cg = sns.clustermap(means,yticklabels = featureslice, xticklabels = np.arange(means.shape[0]),standard_scale=None,col_cluster=False, figsize =(14,14))
cg.ax_row_dendrogram.set_visible(False)
return
def plotclustmap(means,variance,featureslice,clustpop,normtype,clust_name=None,**kwargs):
# map 3 levels
if clust_name:
clustname = clust_name
else:
clustname = np.arange(means.shape[1])
if normtype == None:
cg = sns.clustermap(means,yticklabels = featureslice, xticklabels = clustname,standard_scale=None,col_cluster=False, figsize =(14,14),**kwargs)
plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=45)
cg.ax_row_dendrogram.set_visible(False)
cg.cax.set_visible(False)
# row scaled
elif normtype == 0:
sns.clustermap(means,yticklabels = featureslice, xticklabels = clustname,standard_scale=0,col_cluster=False, **kwargs)
# column scaled
elif normtype == 1:
sns.clustermap(means,yticklabels = featureslice, xticklabels = clustname,standard_scale=1,col_cluster=False, **kwargs)
# 2 : 1- max (list of pvals)
elif normtype == 2:
stdev = np.sqrt(variance)
#pval =np.zeros(means.shape[1]-1,)
pval_table = np.zeros((means.shape[0],means.shape[1]))
for i in range(means.shape[0]):
pval_temp = np.zeros(means.shape[1])
pval_temp = np.diag(pval_temp)
for j in range(means.shape[1]-1):
for k in range(j+1,means.shape[1]):
tpval=scipy.stats.ttest_ind_from_stats(mean1=means[i,j], std1=stdev[i,j], nobs1=clustpop[j],
mean2=means[i,k], std2=stdev[i,k], nobs2=clustpop[k], equal_var=False)
#print(tpval)
pval_temp[j,k]= tpval[1]
pval_temp = pval_temp + pval_temp.T
pval_table[i,:] = 1-pval_temp.max(axis=1)
sns.clustermap(pval_table,yticklabels = featureslice,
xticklabels =np.arange(means.shape[0]),standard_scale=None,col_cluster=False, **kwargs)
# 3 : 1- average (list of pvals)
elif normtype == 3:
stdev = np.sqrt(variance)
#pval =np.zeros(means.shape[1]-1,)
pval_table = np.zeros((means.shape[0],means.shape[1]))
for i in range(means.shape[0]):
pval_temp = np.zeros(means.shape[1])
pval_temp = np.diag(pval_temp)
for j in range(means.shape[1]-1):
for k in range(j+1,means.shape[1]):
tpval=scipy.stats.ttest_ind_from_stats(mean1=means[i,j], std1=stdev[i,j], nobs1=clustpop[j],
mean2=means[i,k], std2=stdev[i,k], nobs2=clustpop[k], equal_var=False)
#print(tpval)
pval_temp[j,k]= tpval[1]
pval_temp = pval_temp + pval_temp.T
pval_table[i,:] = 1-pval_temp.mean(axis=1)
sns.clustermap(pval_table,yticklabels = featureslice,
xticklabels =np.arange(means.shape[0]),standard_scale=None,col_cluster=False, **kwargs)
# 4 : 1 - min(list of pvals)
elif normtype == 4:
stdev = np.sqrt(variance)
#pval =np.zeros(means.shape[1]-1,)
pval_table = np.zeros((means.shape[0],means.shape[1]))
for i in range(means.shape[0]):
pval_temp =
|
np.ones(means.shape[1])
|
numpy.ones
|
import numpy as np
import tensorflow as tf
from collections import defaultdict
class Greedy_Tracker(object):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
self.network_type = cfg_tracker.network_type
self.cls_thr = cfg_tracker.nn_gating_thr
self.det_ratio_thr = cfg_tracker.det_ratio
self.N_miss_max = cfg_tracker.N_miss_max
self.img_height = cfg_tracker.IMAGE_HEIGHT
self.img_width = cfg_tracker.IMAGE_WIDTH
self.all_tracks = defaultdict(lambda: defaultdict(defaultdict))
self.track_num = 0
self.model_info = {}
self.model_info['app_hidden_dim'] = cfg_train.APP_HIDDEN_DIM
self.model_info['mot_hidden_dim'] = cfg_train.MOT_HIDDEN_DIM
self.model_info['mot_input_dim'] = cfg_train.MOT_INPUT_DIM
self.result = []
self.cfg_train = cfg_train
self.cfg_tracker = cfg_tracker
self.sess = session
self.tf_ops = tf_ops
self.tf_plh = tf_placeholders
self.neg_mem_indices = self.precompute_neg_mem_indices()
def precompute_neg_mem_indices(self):
# get indices for online negative examples (i.e. other tracks in the scene) for each track
# NOTE: need to be set again when the code is used for tracking more objects
max_track_num = 200
max_det_num = 200
neg_mem_ind = np.zeros((max_track_num, max_det_num, max_track_num-1, 2))
for i in range(100):
for j in range(100):
xy_ind_tmp = np.zeros((max_track_num - 1, 2))
x_ind_tmp = np.arange(max_track_num, dtype=np.int32)
xy_ind_tmp[:, 0] = x_ind_tmp[x_ind_tmp != i]
xy_ind_tmp[:, 1] = j
neg_mem_ind[i, j, :, :] = xy_ind_tmp
return neg_mem_ind
def build_neg_mem_indices(self, track_num, det_num):
if track_num > 1:
neg_mem_inds = self.neg_mem_indices[:track_num, :det_num, :(track_num-1), :]
elif track_num == 1:
neg_mem_inds = None
else:
raise NotImplementedError
return neg_mem_inds
def get_lstm_states(self, h_np, c_np, cur_detbb_num, is_track_state):
h_np = np.reshape(h_np, (cur_detbb_num, cur_detbb_num, -1))
c_np = np.reshape(c_np, (cur_detbb_num, cur_detbb_num, -1))
if is_track_state == True:
h_np = np.transpose(h_np, (1, 0, 2))
c_np = np.transpose(c_np, (1, 0, 2))
# loop can be commented out later to improve processing time
# check lstm states
h_np = np.reshape(h_np , (cur_detbb_num * cur_detbb_num, -1))
for kkk in range(1, cur_detbb_num):
assert(np.array_equal(h_np[kkk*cur_detbb_num:(kkk+1)*cur_detbb_num, :], \
h_np[:cur_detbb_num, :]))
h_np = h_np[:cur_detbb_num, :]
# check lstm states
c_np = np.reshape(c_np , (cur_detbb_num * cur_detbb_num, -1))
for kkk in range(1, cur_detbb_num):
assert(np.array_equal(c_np[kkk*cur_detbb_num:(kkk+1)*cur_detbb_num, :], \
c_np[:cur_detbb_num, :]))
c_np = c_np[:cur_detbb_num, :]
return (h_np, c_np)
def get_lstm_states_new(self, h_np, c_np, cur_detbb_num):
h_np = np.reshape(h_np, (cur_detbb_num, -1))
c_np = np.reshape(c_np, (cur_detbb_num, -1))
h_np = h_np[:cur_detbb_num, :]
c_np = c_np[:cur_detbb_num, :]
return (h_np, c_np)
def get_lstm_states_for_matched_tracks(self, matching, model_dim, h_np, c_np, trk_num, det_num):
inds_sel1 = []
track_i_sel = []
# select lstm states for matched tracks
if len(matching) > 0:
h_np_tmp = np.zeros((len(matching), model_dim))
c_np_tmp = np.zeros((len(matching), 2 * model_dim))
h_np = np.reshape(h_np, (trk_num, det_num, -1))
c_np = np.reshape(c_np, (trk_num, det_num, -1))
for kkk in range(0, len(matching)):
track_i = int(matching[kkk][0, 0])
detbb_i = int(matching[kkk][0, 1])
h_np_tmp[kkk, :] = h_np[track_i, detbb_i, :]
c_np_tmp[kkk, :] = c_np[track_i, detbb_i, :]
inds_sel1.append(detbb_i)
track_i_sel.append(track_i)
h_np = h_np_tmp
c_np = c_np_tmp
else:
h_np = []
c_np = []
return (h_np, c_np, inds_sel1, track_i_sel)
def precompute_app_features(self, imgs, bbs):
cur_detbb_num = np.shape(imgs)[0]
assert(cur_detbb_num == np.shape(bbs)[0])
feed_dict = {
self.tf_plh['detbb_num']: cur_detbb_num,
self.tf_plh['images']:imgs,
self.tf_plh['is_training']: False,
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['valid_app_data']: np.ones((cur_detbb_num, 1, 1), dtype=np.int32),
self.tf_plh['indices_for_mapping']: np.reshape(np.arange(cur_detbb_num * 1, dtype=np.int32), (-1, 1)),
self.tf_plh['image_batch_shape']: np.array([cur_detbb_num * 1, self.cfg_train.APP_LAYER_DIM])
}
app_embed_np = self.sess.run(self.tf_ops['app_embed'], feed_dict=feed_dict)
return app_embed_np
def initialize_tracks(
self,
h,
c,
memory,
bbs,
bbs_norm,
det_ids,
frame,
hidden_dim,
is_dummy,
network
):
h = np.reshape(h, (-1, hidden_dim))
if network == 'app_blstm':
assert(np.shape(memory)[0] == np.shape(h)[0])
assert(np.shape(memory)[0] == np.shape(c)[0])
assert(np.array_equal(h, c[:, hidden_dim:]))
assert(np.shape(h)[0] == np.shape(c)[0])
if is_dummy == False:
for i in range(0, np.shape(h)[0]):
self.track_num += 1
# 1 x d
self.all_tracks[self.track_num]['h_states'] = h[i, :]
# 1 x d
self.all_tracks[self.track_num]['c_states'] = c[i, :]
self.all_tracks[self.track_num]['real_det_num'] = 1
self.all_tracks[self.track_num]['miss_det_num'] = 0
self.all_tracks[self.track_num]['last_miss_det_num'] = 0
self.all_tracks[self.track_num]['bb'] = bbs[det_ids[i], :]
self.all_tracks[self.track_num]['bb_norm'] = bbs_norm[det_ids[i], :]
self.all_tracks[self.track_num]['frame'] = frame
self.all_tracks[self.track_num]['th'] = [self.cls_thr]
if network == 'app_blstm':
# 1 x 1 x d
self.all_tracks[self.track_num]['mem'] = memory[i, :, :]
self.result.append((frame, det_ids[i], 1.0, self.track_num))
elif is_dummy == True:
ct = -1
for i in range(0, np.shape(memory)[0]):
ct -= 1
# 1 x d
self.all_tracks[ct]['h_states'] = h[i, :]
# 1 x d
self.all_tracks[ct]['c_states'] = c[i, :]
self.all_tracks[ct]['real_det_num'] = 1
self.all_tracks[ct]['miss_det_num'] = 0
self.all_tracks[ct]['last_miss_det_num'] = 0
self.all_tracks[ct]['bb'] = bbs[det_ids[i], :]
self.all_tracks[ct]['bb_norm'] = bbs_norm[det_ids[i], :]
self.all_tracks[ct]['frame'] = frame
self.all_tracks[ct]['th'] = [self.cls_thr]
if network == 'app_blstm':
# 1 x 1 x d
self.all_tracks[ct]['mem'] = memory[i, :, :]
else:
raise NotImplementedError
def delete_dummy_tracks(self, frame):
for i in self.all_tracks.keys():
if i < 0:
del self.all_tracks[i]
for i in self.all_tracks.keys():
assert(i > 0)
def update_tracks(
self,
h,
c,
memory,
bbs,
bbs_norm,
track_ids,
matching,
matching_score,
frame,
hidden_dim,
network,
missdet_tracks
):
h = np.reshape(h, (-1, hidden_dim))
if np.shape(c)[0] != 0:
if network == 'app_blstm':
assert((np.shape(memory)[0] == np.shape(h)[0]))
assert((np.shape(memory)[0] == np.shape(c)[0]))
assert(np.array_equal(h, c[:, hidden_dim:]))
assert(len(matching) == len(matching_score))
track_ids_sel1 = []
for i in range(0, len(matching)):
track_i = int(matching[i][0, 0])
detbb_i = int(matching[i][0, 1])
if network == 'app_blstm':
self.all_tracks[track_ids[track_i]]['mem'] = memory[i, :, :]
self.all_tracks[track_ids[track_i]]['h_states'] = h[i, :]
self.all_tracks[track_ids[track_i]]['c_states'] = c[i, :]
self.all_tracks[track_ids[track_i]]['real_det_num'] += 1
self.all_tracks[track_ids[track_i]]['last_miss_det_num'] = 0
self.all_tracks[track_ids[track_i]]['bb'] = bbs[detbb_i, :]
self.all_tracks[track_ids[track_i]]['bb_norm'] = bbs_norm[detbb_i, :]
self.all_tracks[track_ids[track_i]]['frame'] = frame
self.all_tracks[track_ids[track_i]]['th'] = self.all_tracks[track_ids[track_i]]['th'] \
+ [matching_score[i]]
self.result.append((frame, detbb_i, 1.0, track_ids[track_i]))
track_ids_sel1.append(track_ids[track_i])
# update non matched tracks with dummy detections
track_ids_sel2 = np.setdiff1d(track_ids, track_ids_sel1)
if network == 'mot_lstm' and len(track_ids_sel2) > 0:
assert(np.array_equal(track_ids_sel2, missdet_tracks['track_ids']))
for i in range(0, len(track_ids_sel2)):
# skip dummy track
if track_ids_sel2[i] < 0:
continue
self.all_tracks[track_ids_sel2[i]]['miss_det_num'] += 1
self.all_tracks[track_ids_sel2[i]]['last_miss_det_num'] += 1
self.result.append((frame, None, None, track_ids_sel2[i]))
if network == 'mot_lstm' and len(track_ids_sel2) > 0:
self.all_tracks[track_ids_sel2[i]]['h_states'] = missdet_tracks['h_states'][i, :]
self.all_tracks[track_ids_sel2[i]]['c_states'] = missdet_tracks['c_states'][i, :]
assert(track_ids_sel2[i] == missdet_tracks['track_ids'][i])
def compute_iou(self, bb_p, bb_n):
bb_px_min = bb_p[0]
bb_py_min = bb_p[1]
bb_pw = bb_p[2]
bb_ph = bb_p[3]
bb_px_max = bb_px_min + bb_pw
bb_py_max = bb_py_min + bb_ph
bb_nx_min = bb_n[0]
bb_ny_min = bb_n[1]
bb_nw = bb_n[2]
bb_nh = bb_n[3]
bb_nx_max = bb_nx_min + bb_nw
bb_ny_max = bb_ny_min + bb_nh
bb_p_area = (bb_px_max - bb_px_min)*(bb_py_max - bb_py_min)
bb_n_area = (bb_nx_max - bb_nx_min)*(bb_ny_max - bb_ny_min)
x1 = np.maximum(bb_px_min, bb_nx_min)
y1 = np.maximum(bb_py_min, bb_ny_min)
x2 = np.minimum(bb_px_max, bb_nx_max)
y2 = np.minimum(bb_py_max, bb_ny_max)
w = np.maximum(0.0, x2 - x1)
h = np.maximum(0.0, y2 - y1)
intersection = np.multiply(w, h)
union = np.add(bb_p_area, bb_n_area) - intersection
IoU = np.divide(intersection, union)
return IoU
def solve_greedy_matching(self, softmax, m_states, track_num, detbb_num, track_ids, bbs, frame):
col1 = np.arange(track_num)
col2 = np.arange(detbb_num)
col1 = np.expand_dims(col1, axis=1)
col2 = np.expand_dims(col2, axis=0)
col1 = np.reshape(np.tile(col1, (1, detbb_num)), (-1, 1))
col2 = np.reshape(np.tile(col2, (track_num, 1)), (-1, 1))
track_detbb_pair_ind = np.concatenate((col1, col2), axis=1)
assert(np.shape(track_detbb_pair_ind)[0] == track_num * detbb_num)
motion_gating_mask = np.ones((track_num, detbb_num, 1))
if self.cfg_tracker.IS_NAIVE_GATING_ON == True:
for i in range(0, track_num):
bb_p = self.all_tracks[track_ids[i]]['bb']
bb_n = bbs
if track_ids[i] < 0:
motion_gating_mask[i, :, 0] = 0
else:
fr_diff = (frame - self.all_tracks[track_ids[i]]['frame'])
motion_gating_mask[i, :, 0] = self.naive_motion_gating(bb_p, bb_n, fr_diff)
motion_gating_mask = np.reshape(motion_gating_mask, (track_num * detbb_num, 1))
# (N1 * N2) x 1
softmax_pos = softmax[:, 1]
softmax_pos = np.reshape(softmax_pos, (-1, 1))
softmax_pos_org = softmax_pos
softmax_pos = np.multiply(softmax_pos, motion_gating_mask)
matching = []
matching_score = []
while True:
max_p = np.amax(softmax_pos, axis=0)
max_i = np.argmax(softmax_pos, axis=0)
assert(softmax_pos[max_i] == max_p)
assert(np.shape(softmax_pos)[0] == np.shape(track_detbb_pair_ind)[0])
if max_p > self.cls_thr:
matching.append(track_detbb_pair_ind[max_i, :])
matching_score.append(softmax_pos_org[max_i])
del_ind1 = track_detbb_pair_ind[:, 1] == track_detbb_pair_ind[max_i, 1]
del_ind2 = track_detbb_pair_ind[:, 0] == track_detbb_pair_ind[max_i, 0]
del_ind = np.where(np.logical_or(del_ind1, del_ind2))[0]
track_detbb_pair_ind_tmp = np.delete(track_detbb_pair_ind, del_ind, axis=0)
softmax_pos = np.delete(softmax_pos, del_ind, axis=0)
softmax_pos_org = np.delete(softmax_pos_org, del_ind, axis=0)
assert(len(np.where(track_detbb_pair_ind_tmp[:, 1] == track_detbb_pair_ind[max_i, 1])[0]) == 0)
assert(len(np.where(track_detbb_pair_ind_tmp[:, 0] == track_detbb_pair_ind[max_i, 0])[0]) == 0)
track_detbb_pair_ind = track_detbb_pair_ind_tmp
# out of the loop when there is no good match left
else:
break
# out of the loop when all detections are taken
if np.shape(track_detbb_pair_ind)[0] == 0:
break
return (matching, matching_score)
def pick_imgs(self, imgs, imgs_inds):
imgs_sel = np.zeros((len(imgs_inds), self.img_height, self.img_width, 3))
for i in range(0, len(imgs_inds)):
imgs_sel[i, :, :, :] = imgs[imgs_inds[i], :, :, :]
return imgs_sel
def pick_dets(self, dets, dets_inds):
dets_sel = np.zeros((len(dets_inds), self.model_info['mot_input_dim']))
for i in range(0, len(dets_inds)):
dets_sel[i, :] = dets[dets_inds[i], :]
return dets_sel
def get_gating_result(self, x_diff, y_diff, w_diff, h_diff, gating_factor):
# NOTE: These parameters are tuned for the MOT Challenge datasets.
x_diff_th = 3.5
y_diff_th = 2.0
w_diff_th = 1.8
h_diff_th = 1.8
return np.logical_and(np.logical_and(x_diff < x_diff_th, y_diff < y_diff_th),
np.logical_and(w_diff < w_diff_th, h_diff < h_diff_th))
def naive_motion_gating(self, bb_p, bb_n, gating_factor):
bb_px = bb_p[0]
bb_py = bb_p[1]
bb_pw = bb_p[2]
bb_ph = bb_p[3]
bb_nx = bb_n[:, 0]
bb_ny = bb_n[:, 1]
bb_nw = bb_n[:, 2]
bb_nh = bb_n[:, 3]
x_diff = np.divide(np.abs(bb_px - bb_nx), bb_pw)
y_diff = np.divide(np.abs(bb_py - bb_ny), bb_ph)
w_diff = np.maximum(np.divide(bb_pw, bb_nw), np.divide(bb_nw, bb_pw))
h_diff = np.maximum(np.divide(bb_ph, bb_nh), np.divide(bb_nh, bb_ph))
return self.get_gating_result(x_diff, y_diff, w_diff, h_diff, gating_factor)
def get_result(self):
return self.result
class Greedy_Tracker_APP_BLSTM(Greedy_Tracker):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
super(Greedy_Tracker_APP_BLSTM, self).__init__(cfg_tracker, cfg_train, tf_ops, tf_placeholders, session)
def run(self, bbs, bbs_norm, imgs, frame_num):
# first frame
if len(self.all_tracks.keys()) == 0 and imgs is not None:
mem_np = self.initialize_track_mems(imgs, bbs)
h_np, c_np, memory_np = mem_np
cur_detbb_num = np.shape(imgs)[0]
self.initialize_tracks(
h_np,
c_np,
memory_np,
bbs,
bbs_norm,
np.array(range(cur_detbb_num)),
frame_num,
self.model_info['app_hidden_dim'],
is_dummy=False,
network='app_blstm'
)
elif len(self.all_tracks.keys()) != 0:
bookkeeping = {}
self.data_association(imgs, bbs, bbs_norm, frame_num, bookkeeping)
self.update_existing_tracks(bbs, bbs_norm, frame_num, bookkeeping)
self.start_new_tracks(imgs, bbs, bbs_norm, frame_num, bookkeeping)
def initialize_track_mems(self, imgs, bbs):
cur_detbb_num =
|
np.shape(imgs)
|
numpy.shape
|
# -*- coding: utf-8 -*-
##################################################
# © 2017 ETH Zurich, Swiss Seismological Service #
# <NAME>' - wavedec at gmail dot com #
##################################################
"""
Here are estimation routines used in WaveDec
"""
from scipy.optimize import minimize # , approx_fprime
import numpy as np
from numpy import shape, fft, zeros, real, imag, eye, linalg, ceil, linspace, pi, \
meshgrid, concatenate, sqrt, array, sum, conjugate, log, mean, size, dot, \
arctan2, sin, cos, argmin, reshape, matrix, transpose, arange
import DataUtils as db
import logging
from wdSettings import MODEL_NOISE, MODEL_VERTICAL, MODEL_RAYLEIGH, MODEL_LOVE
from wdSettings import EWX, NSY, UDZ, ROTX, ROTY, ROTZ
def decomposeWavefield(conn, y, WindowId, Ts, Fvec_ndx, Kmax, Kstep, Estep, Vmin, WavesToModel, MaxWaves, MaxIterations, ArrayInfo, Gamma):
""" Fit different wave types at several frequencies.
Parameters
----------
conn :
SQL database connection
y : float array
input signal
WindowId : int
Unique indentifier of the window
Ts : float
Sampling time [s]
Fvec_ndx : int array
...
Kmax : float
Largest wavenumber [1/m] to analyze
Kstep : float
Grid step in the wavenumber plane [1/m]
Vmin : float
Smallest velocity [m/s] to analyze
WavesToModel :
Describe which wave types should be fitted
MaxWaves : int
Maximum number of waves to model at each frequency in the time window
MaxIterations : int
Maximum number of interations. In each iteration parameters of each wave are re-estimated.
ArrayInfo : float array
An array containing information about sensor location and channels.
It has L rows, where L is the number of channels.
Each rows has the following form:
pos_x, pos_y, pos_z, cmp, Ts
where the first three fields are the position of the sensor in [m].
cmp is the component code.
Ts is the sampling time as read from the SAC file.
Gamma : float
Controls model complexity. (0 for pure ML, 1 for BIC, other values for intermediate strategies)
"""
K = shape(y)[0]
L = shape(y)[1]
Fvec_fft = fft.fftfreq(K, Ts);
# Fvec = Fvec_fft[Fvec_ndx]
(Sm_bw, Sw_bw, Swm_bw) = bwMessages(y, Ts)
Sm_fw_all = zeros(shape(Sm_bw))
F=shape(Sm_bw)[2]
NumK = np.int(2*ceil(Kmax/Kstep)) # number of points in the wavenumber search grid. Even, so that we do not have 0 in the final search grid
NumE = np.int(ceil(np.pi/Estep)) # number of points in the ellipticity search grid Even, so that we have 0 in the final search grid
Wavenumber_x = linspace(-Kmax, Kmax, NumK)
Wavenumber_y = linspace(-Kmax, Kmax, NumK)
EllipticityAngle = linspace(-pi/2, pi/2, NumE, endpoint=False)
xx, yy, zz = meshgrid(Wavenumber_x,Wavenumber_y,EllipticityAngle)
xx=concatenate(xx); yy=concatenate(yy); zz=concatenate(zz);
ndx_ok = sqrt( xx**2 + yy**2 ) <= Kmax # only wavenumber smaller than Kmax
xx = xx[ndx_ok]; yy = yy[ndx_ok]; zz = zz[ndx_ok];
X_grid_R = array([xx, yy, zz]) # Rayleigh waves
xx, yy = meshgrid(Wavenumber_x,Wavenumber_y)
xx=concatenate(xx); yy=concatenate(yy);
ndx_ok = sqrt( xx**2 + yy**2 ) <= Kmax # only wavenumber smaller than Kmax
xx = xx[ndx_ok]; yy = yy[ndx_ok];
X_grid_L = array([xx, yy]) # Love waves
# Fndx
for ff in Fvec_ndx:
X_grid_R_f = X_grid_R
X_grid_L_f = X_grid_L
X_grid_V_f = X_grid_L
if Vmin != None and Vmin > 0: # further restrict search grid
if WavesToModel[MODEL_LOVE] or WavesToModel[MODEL_VERTICAL]:
ndx_ok = sqrt( X_grid_L[0,:]**2 + X_grid_L[1,:]**2 ) <= Fvec_fft[ff]/Vmin
if WavesToModel[MODEL_LOVE]:
X_grid_L_f = X_grid_L[:,ndx_ok]
if WavesToModel[MODEL_VERTICAL]:
X_grid_V_f = X_grid_L[:,ndx_ok]
if WavesToModel[MODEL_RAYLEIGH]:
ndx_ok = sqrt( X_grid_R[0,:]**2 + X_grid_R[1,:]**2 ) <= Fvec_fft[ff]/Vmin
X_grid_R_f = X_grid_R[:,ndx_ok]
Sm_fw = zeros((2,L,F,MaxWaves)) # shape() = (2,L,F)
WaveModel = zeros((MaxWaves,))
WaveAmplitude = zeros((MaxWaves,))
WavePhase = zeros((MaxWaves,))
WaveX_ML = [None]* MaxWaves
# gradually increase the number of waves modeled
NumWaves = 0 # number of waves modeled
for mm in range(0,MaxWaves):
logging.debug("Fitting {0}-th model (at {1:.3f} [Hz])".format(mm+1, Fvec_fft[ff]))
tmpBIC=[]
tmpModel=[]
tmpSm_bw = Sm_bw - sum(Sm_fw ,3) + Sm_fw[:,:,:,mm]
# how about variance messages?
# Parameter estimation: attempt to fit different possible models
if WavesToModel[MODEL_NOISE]:
(BIC_N, sigma2_ML) = fitNoise(tmpSm_bw, L, K, Gamma)
tmpBIC.append(BIC_N); tmpModel.append(MODEL_NOISE);
if WavesToModel[MODEL_VERTICAL] and size(X_grid_V_f) > 0:
(BIC_V, Sm_fw_V, Amplitude_V, Phase_V, X_ML_V) = fitVerticalWave(X_grid_V_f, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
tmpBIC.append(BIC_V); tmpModel.append(MODEL_VERTICAL);
if WavesToModel[MODEL_LOVE] and size(X_grid_L_f) > 0:
(BIC_L, Sm_fw_L, Amplitude_L, Phase_L, X_ML_L) = fitLoveWave(X_grid_L_f, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
tmpBIC.append(BIC_L); tmpModel.append(MODEL_LOVE);
if WavesToModel[MODEL_RAYLEIGH] and size(X_grid_R_f) > 0:
(BIC_R, Sm_fw_R, Amplitude_R, Phase_R, X_ML_R) = fitRayleighWave(X_grid_R_f, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
tmpBIC.append(BIC_R); tmpModel.append(MODEL_RAYLEIGH);
# Model selection: choose wave with smallest BIC
if len(tmpBIC) > 0:
WaveModel[mm] = tmpModel[np.argmin(tmpBIC)]
histBIC = zeros((MaxIterations+1,))
histBIC[0] = np.min(tmpBIC)
if WaveModel[mm] == MODEL_NOISE:
break # when a noise model is chosen no more waves need to be added
if WaveModel[mm] == 0:
break # Nothing was modeled. Perhaps because of stringent Vmin at this frequency
elif WaveModel[mm] == MODEL_VERTICAL:
Sm_fw[:,:,:,mm] = Sm_fw_V
WaveAmplitude[mm] = Amplitude_V
WavePhase[mm] = Phase_V
WaveX_ML[mm] = X_ML_V
elif WaveModel[mm] == MODEL_LOVE:
Sm_fw[:,:,:,mm] = Sm_fw_L
WaveAmplitude[mm] = Amplitude_L
WavePhase[mm] = Phase_L
WaveX_ML[mm] = X_ML_L
elif WaveModel[mm] == MODEL_RAYLEIGH:
Sm_fw[:,:,:,mm] = Sm_fw_R
WaveAmplitude[mm] = Amplitude_R
WavePhase[mm] = Phase_R
WaveX_ML[mm] = X_ML_R
else:
logging.warning("Warning: unrecognized wave model")
# refine existing estimates, if needed
NumWaves = sum( (WaveModel > 0) & (WaveModel != MODEL_NOISE) )
if (MaxIterations > 0) and (NumWaves > 1):
for ii in range(1,MaxIterations):
for m in range(0,mm+1):
logging.debug("Refining estimates of model {0}/{1}".format(m+1,mm+1))
tmpSm_bw = Sm_bw - sum(Sm_fw ,3) + Sm_fw[:,:,:,m]
if WaveModel[m] == MODEL_NOISE:
continue
elif WaveModel[m] == MODEL_VERTICAL:
X = WaveX_ML[m]
(BIC_V, Sm_fw_V, Amplitude_V, Phase_v, X_ML_V) = fitVerticalWave(X, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
WaveX_ML[m] = X_ML_V
WaveAmplitude[m] = Amplitude_V
WavePhase[m] = Phase_V
Sm_fw[:,:,:,m] = Sm_fw_V
elif WaveModel[m] == MODEL_LOVE:
X = WaveX_ML[m]
(BIC_L, Sm_fw_L, Amplitude_L, Phase_L, X_ML_L) = fitLoveWave(X, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
WaveX_ML[m] = X_ML_L
WaveAmplitude[m] = Amplitude_L
WavePhase[m] = Phase_L
Sm_fw[:,:,:,m] = Sm_fw_L
elif WaveModel[m] == MODEL_RAYLEIGH:
X = WaveX_ML[m]
(BIC_R, Sm_fw_R, Amplitude_R, Phase_R, X_ML_R) = fitRayleighWave(X, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
WaveX_ML[m] = X_ML_R
WaveAmplitude[m] = Amplitude_R
WavePhase[m] = Phase_R
Sm_fw[:,:,:,m] = Sm_fw_R
else:
logging.warning("Unrecognized wave model {0}".format(WaveModel[m]))
tmpSm_bw = Sm_bw - sum(Sm_fw ,3)
(BIC_N, sigma2_ML) = fitNoise(tmpSm_bw, L, K, Gamma)
histBIC[ii] = BIC_N # TODO is this the right BIC value, multiple waves are they all same?
if abs(histBIC[ii]-histBIC[ii-1])/abs(histBIC[ii-1]) < 0.01:
break
# Compute forward messages and estimated paramters
Sm_fw_all += sum(Sm_fw ,3)
NumWaves = sum( (WaveModel > 0) & (WaveModel != MODEL_NOISE) )
for mm in range(0,NumWaves):
if WaveModel[mm] == MODEL_NOISE:
continue
elif WaveModel[mm] == MODEL_VERTICAL:
Kx = WaveX_ML[mm][0]
Ky = WaveX_ML[mm][1]
Wavenumber = sqrt( Kx**2 + Ky**2)
Azimuth = np.mod(arctan2(Ky, Kx), 2*pi) # Note the role reversal, as from documentation
db.addVerticalWave(conn, WindowId, ff, WaveAmplitude[mm], WavePhase[mm], Wavenumber, Azimuth)
elif WaveModel[mm] == MODEL_LOVE:
Kx = WaveX_ML[mm][0]
Ky = WaveX_ML[mm][1]
Wavenumber = sqrt( Kx**2 + Ky**2)
Azimuth = np.mod(arctan2(Ky, Kx), 2*pi) # Note the role reversal, as from documentation
db.addLoveWave(conn, WindowId, ff, WaveAmplitude[mm], WavePhase[mm], Wavenumber, Azimuth)
elif WaveModel[mm] == MODEL_RAYLEIGH:
Kx = WaveX_ML[mm][0]
Ky = WaveX_ML[mm][1]
EllipticityAngle = np.mod(WaveX_ML[mm][2] + pi/2, pi) -pi/2
Wavenumber = sqrt( Kx**2 + Ky**2)
Azimuth = np.mod(arctan2(Ky, Kx), 2*pi) # Note the role reversal, as from documentation
db.addRayleighWave(conn, WindowId, ff, WaveAmplitude[mm], WavePhase[mm], Wavenumber, Azimuth,EllipticityAngle)
else:
logging.warning("Unrecognized wave model {0}".format(WaveModel[mm]))
# after all freq have been processed
(BIC_N, sigma2_ML) = fitNoise(Sm_bw - Sm_fw_all, L, K, Gamma)
db.addNoise(conn, WindowId, sigma2_ML)
# TODO after estimating noise again, do we want to iterate once more on wave params ?
return
def bwMessages(y, Ts):
"""Compute sufficient statistics, assuming unitary variance
Parameters
----------
y : 2d float array
It is an array of size (K, L). Each column contains the signal at the l-th location.
Ts : float
The sampling time in [s]
Returns
-------
Sm_bw : float array
It is an array of size (2, L). Each column refers to the the signal at the l-th location.
Contains the mean vector of the message S.
Sw_bw : float array
It is an array of size (2, 2, L). Each page refers to the the signal at the l-th location.
Contains the precision matrix of the message S.
Swm_bw : float array
It is an array of size (2, L). Each column refers to the the signal at the l-th location.
Contains the weighted mean vector of the message S.
"""
K = shape(y)[0]
L = shape(y)[1]
#Fvec = fft.fftfreq(K, Ts);
#Fmin=0
#Fmax=0.5/Ts
#Fndx = (Fvec >= Fmin) and (Fvec <= Fmax);
#Fvec=Fvec[Fndx];
Fnum=int(K/2+1); # TODO need to make sure K is even
Y_bw= fft.rfft(y, K, 0);
Swm_bw = zeros((2,L,Fnum));
Sm_bw= zeros((2,L,Fnum));
Sw_bw=zeros((2,2,L,Fnum));
for ff in range(0,Fnum):
for ll in range(0, L):
Swm_bw[0,ll,ff] = real(Y_bw[ff,ll]);
Swm_bw[1,ll,ff] = imag(Y_bw[ff,ll]);
Sw_bw[:,:,ll,ff] = (K/2)*eye(2);
Sm_bw[:,ll,ff] = linalg.solve(Sw_bw[:,:,ll,ff], Swm_bw[:,ll,ff]);
return (Sm_bw, Sw_bw, Swm_bw)
### Vertical wave
def negLL_VerticalWave(X_grid, Sw_bw, Swm_bw, SlnGamma_bw, ArrayInfo):
""" Computes the loglikelihood of a plane wave (vertical component only)
Parameters
----------
X_grid : float array
Array of size (2,N). Description of N points to evaluate likelihoodfunction. First row is the wavenumber along the x axes. Second row the wavenumber along the y axes.
Sw_bw, Swm_bw, SlnGamma_bw :
sufficient statistics
ArrayInfo : float array
An array containing information about sensor location and channels.
It has L rows, where L is the number of channels.
Each rows has the following form:
pos_x, pos_y, pos_z, cmp, Ts
where the first three fields are the position of the sensor in [m].
cmp is the component code.
Ts is the sampling time as read from the SAC file.
Returns
-------
negLogLikelihood : float array
A one dimensional array of N elements with the value of minus the log-likelihood.
"""
if shape(shape(X_grid))[0] == 1: # single point
X_grid = reshape(X_grid, (2,-1))
N_points=shape(X_grid)[1]
Wavenumber_x=X_grid[0,:]
Wavenumber_y=X_grid[1,:]
L=shape(Swm_bw)[1]
negLogLikelihood = zeros((N_points,))
pos_x=ArrayInfo[:,0]
pos_y=ArrayInfo[:,1]
comp=ArrayInfo[:,3]
if False: # TODO check if input matrices are const*identity
for nn in range(0,N_points):
Uw = matrix(zeros((2,2)))
Uwm = matrix(zeros((2,1)))
for ll in range(0,L):
if comp[ll] == UDZ:
phi = -2*pi*(Wavenumber_x[nn]*pos_x[ll] + Wavenumber_y[nn]*pos_y[ll])
H = matrix([[cos(phi), -sin(phi)],[sin(phi), cos(phi)]])
else:
H = matrix([[0, 0],[0, 0]])
Uw = Uw + transpose(H) * matrix(Sw_bw[:,:,ll]) * H
Uwm = Uwm + transpose(H) * transpose(matrix(Swm_bw[:,ll]))
Um = linalg.solve(Uw, Uwm)
negLogLikelihood[nn] = -0.5*transpose(Um) * Uwm +
|
sum(-SlnGamma_bw)
|
numpy.sum
|
import matrices_new_extended as mne
import numpy as np
import sympy as sp
from equality_check import Point
x, y, z = sp.symbols("x y z")
Point.base_point =
|
np.array([x, y, z, 1])
|
numpy.array
|
import sys
import numpy as np,h5py
from keras.layers import *
from keras.models import Model
from keras.callbacks import *
from keras.optimizers import RMSprop
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from os import listdir
tf.python.control_flow_ops = tf
def get_encoder():
S = Input(shape = (64,64,3))
#norm = Lambda(lambda a: a/255.0)(S)
conv1 = Convolution2D(32,8,8,subsample = (4,4),activation = 'relu',weights = [np.random.rand(3,8,8,32),
|
np.ones((32,))
|
numpy.ones
|
import numpy as np
import scipy
import sympy
import scipy.sparse as sps
import math
import porepy as pp
###
np.random.seed(42)
base = 4
domain = np.array([1, 1])
basedim = np.array([base, base])
num_refs = 7
ref_rate=2
### End of parameter definitions
xr = domain[0]
x0 = xr/2
yt = domain[1]
y0 = yt/2
# Permeability tensor
kd = 1
kc = 0.1
# Analytical solution
x, y = sympy.symbols('x y')
# Discontinuity line: rx + sy = d
r = 0.7
s = 1 - r
# passing to the central point
d = r * x0 + s * y0
theta = math.pi/2 - math.atan(-r/s)
# jump in permeability (1 if no jump)
l = 1
f = r * x + s * y
g1 = 10#sympy.sin(x) * sympy.cos(y)
g2 = 1# * x - 3 * y
n = math.sqrt(r*r+s*s)
d1 = - (f - d) / n
d2 = (f - d) / n
u0 = 10
test = 1
if test == 1:
a1 = 1
a2 = 0
if test == 2:
a1 = 0
a2 = 1
if test == 3:
a1 = 1
a2 = 1
if test == 4:
a1 = 1
a2 = 100
u = u0 + a1 * sympy.Piecewise((g1 * d1, ((f>=0)&(f<d))), ((- g2 * d2, ((f>=d)&(f<=1))))) + a2 * sympy.sin(x) * sympy.cos(y)
gx = sympy.diff(u, x)
gy = sympy.diff(u, y)
k = sympy.Piecewise((1, ((f>=0)&(f<d))), (l, ((f>=d)&(f<=1))))
perm_xx = kd * k
perm_yy = kd * k
perm_xy = kc * k
perm_xx_f = sympy.lambdify((x, y), perm_xx, 'numpy')
perm_yy_f = sympy.lambdify((x, y), perm_yy, 'numpy')
perm_xy_f = sympy.lambdify((x, y), perm_xy, 'numpy')
u_f = sympy.lambdify((x, y), u, 'numpy')
gx_f = sympy.lambdify((x, y), gx, 'numpy')
gy_f = sympy.lambdify((x, y), gy, 'numpy')
dux = sympy.diff(u, x)
duy = sympy.diff(u, y)
dux_f = sympy.lambdify((x, y), dux, 'numpy')
duy_f = sympy.lambdify((x, y), duy, 'numpy')
rhs = - (sympy.diff(perm_xx * dux + perm_xy * duy, x) + sympy.diff(perm_xy * dux + perm_yy * duy, y))
rhs += (sympy.diff(perm_xx * gx + perm_xy * gy, x) + sympy.diff(perm_xy * gx + perm_yy * gy, y))
rhs_f = sympy.lambdify((x, y), rhs, 'numpy')
bctype = 'neu'
eps = 1.0e-8
deviation_from_plane_tol=1e-5
def invert_tensor_2d(perm):
k = np.zeros_like(perm)
term = perm[0,0,:] * perm[1,1,:] - perm[0,1,:]*perm[1,0,:]
k[0,0,:] = perm[1,1,:] / term
k[1,1,:] = perm[0,0,:] / term
k[1,0,:] = k[0,1,:] = - perm[0,1,:] / term
return k
def standard_discr(g, k, gforce):
if g.dim == 2:
# Rotate the grid into the xy plane and delete third dimension. First
# make a copy to avoid alterations to the input grid
g = g.copy()
cell_centers, face_normals, face_centers, R, _, nodes = pp.map_geometry.map_grid(
g, deviation_from_plane_tol
)
g.cell_centers = cell_centers
g.face_normals = face_normals
g.face_centers = face_centers
g.nodes = nodes
# Rotate the permeability tensor and delete last dimension
k = k.copy()
k.values = np.tensordot(R.T, np.tensordot(R, k.values, (1, 0)), (0, 1))
k.values = np.delete(k.values, (2), axis=0)
k.values = np.delete(k.values, (2), axis=1)
# Step 1
# take harmonic mean of cell permeability tensors
fi, ci, sgn = sps.find(g.cell_faces)
perm = k.values[::, ::, ci]
# invert cell-centers permeability tensor
iperm = invert_tensor_2d(perm)
# Distance from face center to cell center
fc_cc = g.face_centers[::, fi] - g.cell_centers[::, ci]
dist_face_cell = np.linalg.norm(fc_cc, 2, axis=0)
# take harmonic mean of permeability k_12 = ((d1 * K1^-1 + d2 * K2^-1)/(d1+d2))^-1
hperm = np.zeros((2,2,g.num_faces))
den = np.bincount(fi, weights=dist_face_cell)
hperm[0,0,:] = np.bincount(fi, weights=dist_face_cell*iperm[0,0,:]) / den
hperm[1,1,:] = np.bincount(fi, weights=dist_face_cell*iperm[1,1,:]) / den
hperm[1,0,:] = hperm[0,1,:] = np.bincount(fi, weights=dist_face_cell*iperm[0,1,:]) / den
hperm = invert_tensor_2d(hperm)
nk_x = np.sum(g.face_normals[:2]*hperm[0,:,:], axis = 0)
nk_y = np.sum(g.face_normals[:2]*hperm[1,:,:], axis = 0)
div_g = np.vstack((nk_x, nk_y))
# Step 2
# take arithmetic mean of cell center gravities
gforce = np.reshape(gforce, (g.num_cells,2))
gforce = gforce[ci,:]
fgx = np.bincount(fi, weights=dist_face_cell*gforce[:,0]) / den
fgy = np.bincount(fi, weights=dist_face_cell*gforce[:,1]) / den
fg = np.vstack((fgx, fgy))
flux_g = np.sum(fg[:2]*div_g, axis=0)
return flux_g
def run_convergence(grid_type, gravity):
u_err = []
flux_err = []
hs = []
for iter1 in range(num_refs):
dim = basedim.shape[0]
grid_dims = basedim * ref_rate ** iter1
g = pp.CartGrid(grid_dims, domain)
g.compute_geometry()
if r != 1.:
g = rotate(g, theta, domain, grid_dims)
xc = g.cell_centers
# Permeability tensor
k = pp.SecondOrderTensor(np.ones(g.num_cells))
k_xx = np.zeros(g.num_cells)
k_xy = np.zeros(g.num_cells)
k_yy = np.zeros(g.num_cells)
k_xx[:] = perm_xx_f(xc[0], xc[1])
k_yy[:] = perm_yy_f(xc[0], xc[1])
k_xy[:] = perm_xy_f(xc[0], xc[1])
k = pp.SecondOrderTensor(k_xx, kyy=k_yy, kxy=k_xy)
# Gravity
gforce = np.zeros((2, g.num_cells))
gforce[0,:] = gx_f(xc[0], xc[1])
gforce[1,:] = gy_f(xc[0], xc[1])
gforce = gforce.ravel('F')
# Set type of boundary conditions
xf = g.face_centers
u_bound = np.zeros(g.num_faces)
if bctype == 'dir':
dir_faces = g.get_all_boundary_faces()
else:
# Dir left and right
left_faces = np.ravel(np.argwhere(g.face_centers[0] < 1e-10))
right_faces = np.ravel(np.argwhere(g.face_centers[0] > domain[0] - 1e-10))
# Neu bottom and top
bot_faces = np.ravel(np.argwhere(g.face_centers[1] < 1e-10))
top_faces = np.ravel(np.argwhere(g.face_centers[1] > domain[1] - 1e-10))
dir_faces = np.concatenate((left_faces, right_faces))
neu_faces = np.concatenate((bot_faces, top_faces))
bound_cond = pp.BoundaryCondition(g, dir_faces, ['dir'] * dir_faces.size)
# set value of boundary condition
u_bound[dir_faces] = u_f(xf[0, dir_faces], xf[1, dir_faces])
# Exact solution
u_ex = u_f(xc[0], xc[1])
kgradpx = perm_xx_f(xf[0], xf[1])*dux_f(xf[0], xf[1])+perm_xy_f(xf[0], xf[1])*duy_f(xf[0], xf[1])
kgradpy = perm_xy_f(xf[0], xf[1])*dux_f(xf[0], xf[1])+perm_yy_f(xf[0], xf[1])*duy_f(xf[0], xf[1])
du_ex_faces = np.vstack((kgradpx, kgradpy))
kgx = perm_xx_f(xf[0], xf[1])*gx_f(xf[0], xf[1]) + perm_xy_f(xf[0], xf[1]) * gy_f(xf[0], xf[1])
kgy = perm_xy_f(xf[0], xf[1])*gx_f(xf[0], xf[1]) + perm_yy_f(xf[0], xf[1]) * gy_f(xf[0], xf[1])
g_ex_faces = np.vstack((kgx, kgy))
flux_ex_du = -np.sum(g.face_normals[:2] * du_ex_faces, axis=0)
flux_ex_g = np.sum(g.face_normals[:2] * g_ex_faces, axis=0)
flux_ex = flux_ex_du + flux_ex_g
# MPFA discretization, and system matrix
if gravity:
flux, bound_flux, _, _, div_g = pp.Mpfa("flow").mpfa(
g, k, bound_cond, vector_source=gravity, inverter="python"
)
flux_g = div_g * gforce
else:
flux, bound_flux, _, _ = pp.Mpfa("flow").mpfa(
g, k, bound_cond, inverter="python"
)
flux_g = standard_discr(g, k, gforce)
div = pp.fvutils.scalar_divergence(g)
a = div * flux
if bctype == 'neu':
if gravity == False:
u_bound[neu_faces] = flux_ex[neu_faces] - flux_ex_g[neu_faces]
flux_g[neu_faces] = flux_ex_g[neu_faces]
if grid_type == 'cart':
u_bound[bot_faces] *= -1
else:
u_bound[neu_faces] = flux_ex[neu_faces]
u_bound[neu_faces] = 0
# Right hand side - contribution from the solution and the boundary conditions
xc = g.cell_centers
rhs = rhs_f(xc[0], xc[1]) * g.cell_volumes
b = rhs - div * bound_flux * u_bound - div * flux_g
# Solve system, derive fluxes
u_num = scipy.sparse.linalg.spsolve(a, b)
#pp.plot_grid(g, u_num, figsize=(15, 12))
#save = pp.Exporter(g, 'thetagrid', folder='plots')
#cell_id = np.arange(g.num_cells)
#save.write_vtk({"pressure": u_num})
flux_num_du = flux * u_num + bound_flux * u_bound
flux_num = flux_num_du + flux_g
assert np.all(abs(flux_num[neu_faces]) < 1.0e-12)
# Calculate errors
u_diff = u_num - u_ex
flux_diff = flux_num - flux_ex
hs.append(g.num_cells**(-1/g.dim))
u_err.append(np.sqrt(np.sum(g.cell_volumes * u_diff**2))/np.sqrt(np.sum(g.cell_volumes * u_ex**2)))
den = np.sqrt(np.sum((g.face_areas ** g.dim) * flux_ex**2))
num = np.sqrt(np.sum((g.face_areas ** g.dim) * flux_diff**2))
if den != 0:
flux_err.append(num/den)
else:
flux_err.append(num)
return u_err, flux_err, hs
def perturb(g, rate, dx):
rand = np.vstack((np.random.rand(g.dim, g.num_nodes), np.repeat(0., g.num_nodes)))
r1 = np.ravel(np.argwhere((g.nodes[0] > 1e-10) & (g.nodes[1] > 1e-10) & (g.nodes[1] < 1.0 - 1e-10) & (r*g.nodes[0]+s*g.nodes[1] < -1e-10)))
r2 = np.ravel(np.argwhere((g.nodes[0] < 1 - 1e-10) & (g.nodes[1] > 1e-10) & (g.nodes[1] < 1.0 - 1e-10) & (r*g.nodes[0]+s*g.nodes[1] > 1e-10)))
pert_nodes =
|
np.concatenate((r1, r2))
|
numpy.concatenate
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import sys
import random
# from contact_point_dataset_torch_multi_label import MyDataset
from hang_dataset import MyDataset
import os
import time
import argparse
from torch.utils.data import DataLoader
import torch
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter
import datetime
random.seed(2)
torch.manual_seed(2)
np.random.seed(2)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UTILS_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'utils'))
sys.path.append(UTILS_DIR)
from data_helper import *
from coord_helper import *
from train_helper import *
from rotation_lib import *
import s1_model_multi_label as s1_model
import s2a_model as my_model
from s2_utils import *
def restore_model_s1(epoch, save_top_dir, sess):
ckpt_path = os.path.join(save_top_dir,str(epoch)+'model.ckpt')
variables = slim.get_variables_to_restore()
variables_to_restore = [v for v in variables if 's2a_' not in v.name and ('s1_' in v.name)]
# for v in variables_to_restore:
# print(v.name)
saver = tf.train.Saver(variables_to_restore)
print("restoring from %s" % ckpt_path)
saver.restore(sess, ckpt_path)
def train(args, train_loader, test_loader, writer, result_folder, file_name):
model_folder = os.path.join(result_folder, 'models')
can_write = not (writer is None)
# stage 1
pc_o_pl, pc_h_pl, z_pl, gt_transl_pl, gt_aa_pl, pose_mult_pl = s1_model.placeholder_inputs(args.batch_size, 4096, args)
pred_transl_tf, pred_aa_tf, end_points = s1_model.get_model(pc_o_pl, pc_h_pl, z_pl)
loss_transl_tf, loss_aa_tf, min_pose_idx_tf = s1_model.get_loss(pred_transl_tf, pred_aa_tf, gt_transl_pl, gt_aa_pl, pose_mult_pl, float(args.loss_transl_const), end_points)
loss_s1_tf = float(args.loss_transl_const) * loss_transl_tf + loss_aa_tf
# stage 2
pc_combined_pl, gt_cp_score_o_pl, gt_cp_score_h_pl, non_nan_mask_pl = my_model.placeholder_inputs(args.batch_size, 4096, args)
pred_cp_score_o_tf, pred_cp_score_h_tf, end_points = my_model.get_model(pc_combined_pl, 4096, no_softmax=args.no_softmax)
loss_o_tf, loss_h_tf = my_model.get_loss(pred_cp_score_o_tf, pred_cp_score_h_tf, gt_cp_score_o_pl, gt_cp_score_h_pl, end_points)
loss_tf = loss_o_tf + loss_h_tf
loss_tf = tf.boolean_mask(loss_tf, non_nan_mask_pl)
loss_tf = tf.reduce_mean(loss_tf)
print('loss tf', loss_tf)
train_op = tf.train.AdamOptimizer(learning_rate=args.learning_rate).minimize(loss_tf)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(init_op)
saver = tf.train.Saver(max_to_keep=1000)
loss_tracker = LossTracker()
loss_tracker_test = LossTracker()
epoch_init = 0
if not train_loader is None:
epoch_iter = len(train_loader)
if args.pretrain_s1:
pretrain_s1_folder = os.path.abspath(os.path.join(result_folder, '..', '..', args.pretrain_s1_exp_name, args.pretrain_s1_model_name, 'models'))
restore_model_s1(args.pretrain_s1_epoch, pretrain_s1_folder, sess)
if args.restore_model_epoch != -1:
epoch_init = args.restore_model_epoch
restore_model_folder = os.path.abspath(os.path.join(result_folder, '..', args.restore_model_name, 'models'))
restore_model_generic(epoch_init, restore_model_folder, saver, sess)
total_ct = 0
for epoch_i in range(args.max_epochs):
loss_tracker.reset()
for i, batch_dict in enumerate(train_loader):
if args.run_test:
break
total_ct += 1
log_it = ((total_ct % args.log_freq ) == 0) and can_write
pc_o = batch_dict['input1']
pc_h = batch_dict['input2']
gt_pose = batch_dict['output4']
n_pose = batch_dict['n_pose']
b_size = gt_pose.shape[0]
pc_o_idx = batch_dict['pc_o_idx']
pc_h_idx = batch_dict['pc_h_idx']
cp_map_o_dir = batch_dict['cp_map_o_dir']
cp_map_h_dir = batch_dict['cp_map_h_dir']
pose_mult = np.ones((b_size, gt_pose.shape[1]))
for ii in range(b_size):
pose_mult[ii][n_pose[ii]:] = np.inf
feed_dict_s1 = {
pc_o_pl: pc_o[:, :, :3],
pc_h_pl: pc_h[:, :, :3],
gt_transl_pl: gt_pose[:, :, :3],
gt_aa_pl: gt_pose[:, :, 3:],
z_pl: np.random.normal(size=(pc_o.shape[0],1,32)),
pose_mult_pl: pose_mult,
}
pred_transl, pred_aa, loss_transl_val, loss_aa_val, min_pose_idx = sess.run([
pred_transl_tf, pred_aa_tf, loss_transl_tf, loss_aa_tf, min_pose_idx_tf
], feed_dict=feed_dict_s1)
gt_min_pose = np.squeeze(np.take_along_axis(gt_pose, np.expand_dims(min_pose_idx[:, 1:], axis=1), axis=1), axis=1)
angle_diff = np.mean(angle_diff_batch(gt_min_pose[:, 3:], pred_aa, aa=True, degree=True))
# stage 2
pc_combined = create_pc_combined_batch(pc_o, pc_h, pred_transl, pred_aa, aa=True)
min_pose_idx_s2 = np.ones((b_size, 2), dtype=np.int32) * -1
min_pose_idx_s2 = min_pose_idx
gt_min_pose_s2 = np.squeeze(np.take_along_axis(gt_pose, np.expand_dims(min_pose_idx_s2[:, 1:], axis=1), axis=1), axis=1)
angle_diff_min_pose_s2 = np.mean(angle_diff_batch(gt_min_pose_s2[:, 3:], pred_aa, aa=True, degree=True))
# for ii in range(b_size):
# # pos = pred_transl
# # quat = pred_aa
# # print('bsize', b_size)
# # tmp1 = transform_pc_batch(pc_o, pos, quat, aa=True)[0]
# # tmp2 = transform_pc(pc_o[0], pos[0], quat[0], aa=True)
# # assert np.allclose(tmp1[:, :3], tmp2[:, :3]), np.max(np.abs(tmp1 - tmp2))
# # assert np.allclose(tmp1[:, 3:], tmp2[:, 3:])
# pc_combined_tmp = create_pc_combined(pc_o[ii], pc_h[ii], pred_transl[ii], pred_aa[ii], aa=True)
# # print(np.max(np.abs(pc_combined[ii] - pc_combined_tmp)))
gt_cp_score_o, gt_cp_score_h, non_nan_idx = create_gt_cp_map(pc_o_idx, pc_h_idx, cp_map_o_dir, cp_map_h_dir, min_pose_idx_s2)
non_nan_mask = np.zeros((b_size), dtype=np.bool)
non_nan_mask[non_nan_idx] = True
if len(non_nan_idx) == 0:
continue
feed_dict = {
pc_combined_pl: pc_combined,
gt_cp_score_o_pl: gt_cp_score_o,
gt_cp_score_h_pl: gt_cp_score_h,
non_nan_mask_pl: non_nan_mask
}
pred_cp_score_o, pred_cp_score_h, loss_o_val, loss_h_val, _ = sess.run([
pred_cp_score_o_tf, pred_cp_score_h_tf, loss_o_tf, loss_h_tf, train_op
], feed_dict=feed_dict)
# if np.sum(np.isnan(loss_h_val)) > 0:
# print('pred o', np.sum(np.isnan(pred_cp_score_o)))
# print('gt o', np.sum(np.isnan(gt_cp_score_o)))
# print('pred h', np.sum(np.isnan(pred_cp_score_h)))
# print('gt h', np.sum(np.isnan(gt_cp_score_h)))
# assert np.sum(np.isnan(loss_h_val)) == 0
loss_dict = {
'loss_o': np.mean(loss_o_val[non_nan_idx]),
'loss_o_dist': np.mean(np.sqrt(2 * loss_o_val[non_nan_idx])),
'loss_h': np.mean(loss_h_val[non_nan_idx]),
'loss_h_dist': np.mean(np.sqrt(2 * loss_h_val[non_nan_idx])),
'loss': np.mean((loss_o_val + loss_h_val)[non_nan_idx])
}
loss_dict_s1 = {
'loss_transl': np.mean(loss_transl_val),
'loss_transl_sqrt': np.sqrt(np.mean(loss_transl_val)),
'loss_aa': np.mean(loss_aa_val),
'loss_aa_sqrt': np.sqrt(np.mean(loss_aa_val)),
'angle_diff': angle_diff,
'angle_diff_min_pose_s2': angle_diff_min_pose_s2,
}
loss_tracker.add_dict(loss_dict)
if log_it:
write_tb(loss_dict, writer, 'train', total_ct)
write_tb(loss_dict_s1, writer, 'train_s1', total_ct)
# print(loss_dict_to_str(loss_dict_s1))
print('epoch {} iter {}/{} {}'.format(epoch_i, i, epoch_iter, loss_dict_to_str(loss_dict)))
if (total_ct % args.model_save_freq == 0) and not args.no_save:
save_model_generic(epoch_init + total_ct, model_folder, saver, sess)
loss_dict_epoch = loss_tracker.stat()
if can_write:
write_tb(loss_dict_epoch, writer, 'train_epoch', total_ct)
if (not args.no_eval) and (((epoch_i + 1) % args.eval_epoch_freq == 0) or args.run_test):
eval_folder_dir = os.path.join(result_folder, 'eval')
mkdir_if_not(eval_folder_dir)
loss_tracker_test.reset()
for i, batch_dict in enumerate(test_loader):
pc_o = batch_dict['input1']
pc_h = batch_dict['input2']
gt_pose = batch_dict['output4']
n_pose = batch_dict['n_pose']
b_size = gt_pose.shape[0]
pc_o_idx = batch_dict['pc_o_idx']
pc_h_idx = batch_dict['pc_h_idx']
cp_map_o_dir = batch_dict['cp_map_o_dir']
cp_map_h_dir = batch_dict['cp_map_h_dir']
pose_mult = np.ones((b_size, gt_pose.shape[1]))
for ii in range(b_size):
pose_mult[ii][n_pose[ii]:] = np.inf
feed_dict_s1 = {
pc_o_pl: pc_o[:, :, :3],
pc_h_pl: pc_h[:, :, :3],
gt_transl_pl: gt_pose[:, :, :3],
gt_aa_pl: gt_pose[:, :, 3:],
pose_mult_pl: pose_mult,
}
if i == 0:
out_dir = os.path.join(eval_folder_dir, '{}_eval_epoch_{}_ct_{}.json'.format(file_name, str(epoch_i + 1), total_ct))
eval_result_dict = {}
for ii in range(args.eval_sample_n):
z = np.random.normal(size=(pc_o.shape[0],1,args.z_dim))
feed_dict_s1[z_pl] = z
pred_transl, pred_aa, loss_transl_val, loss_aa_val, min_pose_idx = sess.run([
pred_transl_tf, pred_aa_tf, loss_transl_tf, loss_aa_tf, min_pose_idx_tf], feed_dict=feed_dict_s1)
gt_min_pose = np.squeeze(np.take_along_axis(gt_pose, np.expand_dims(min_pose_idx[:, 1:], axis=1), axis=1), axis=1)
# angle_diff = np.mean(angle_diff_batch(gt_min_pose[:, 3:], pred_aa, aa=True, degree=True))
min_pose_idx_s2 =
|
np.ones((b_size, 2), dtype=np.int32)
|
numpy.ones
|
import numpy as np
from sklearn import preprocessing, neighbors, model_selection, svm
import pandas as pd
import pickle
import serial
import re
import random
from sklearn.metrics import confusion_matrix,plot_confusion_matrix,plot_precision_recall_curve
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
df_A = pd.read_table('sign/a.txt',header=None,sep=',')
A = np.array(df_A)
df_B = pd.read_table('sign/b.txt',header=None,sep=',')
B = np.array(df_B)
df_C = pd.read_table('sign/c.txt',header=None,sep=',')
C = np.array(df_C)
df_D = pd.read_table('sign/d.txt',header=None,sep=',')
D = np.array(df_D)
df_E = pd.read_table('sign/e.txt',header=None,sep=',')
E = np.array(df_E)
df_F = pd.read_table('sign/f.txt',header=None,sep=',')
F = np.array(df_F)
df_H = pd.read_table('sign/h.txt',header=None,sep=',')
df_G= pd.read_table('sign/g.txt',header=None,sep=',')
G = np.array(df_G)
H = np.array(df_H)
df_I = pd.read_table('sign/i.txt',header=None,sep=',')
I = np.array(df_I)
df_J = pd.read_table('sign/j.txt',header=None,sep=',')
J = np.array(df_J)
df_K = pd.read_table('sign/k.txt',header=None,sep=',')
K = np.array(df_K)
df_L = pd.read_table('sign/l.txt',header=None,sep=',')
L = np.array(df_L)
df_M = pd.read_table('sign/m.txt',header=None,sep=',')
M = np.array(df_M)
df_N = pd.read_table('sign/n.txt',header=None,sep=',')
N = np.array(df_N)
df_P= pd.read_table('sign/p.txt',header=None,sep=',')
P = np.array(df_P)
df_V = pd.read_table('sign/v.txt',header=None,sep=',')
V = np.array(df_V)
df_O = pd.read_table('sign/o.txt',header=None,sep=',')
O = np.array(df_O)
df_Q = pd.read_table('sign/q.txt',header=None,sep=',')
Q = np.array(df_Q)
df_R = pd.read_table('sign/r.txt',header=None,sep=',')
R = np.array(df_R)
df_S = pd.read_table('sign/s.txt',header=None,sep=',')
S = np.array(df_S)
df_W = pd.read_table('sign/w.txt',header=None,sep=',')
W = np.array(df_W)
df_Z = pd.read_table('sign/z.txt',header=None,sep=',')
Z = np.array(df_Z)
df_T = pd.read_table('sign/t.txt',header=None,sep=',')
T = np.array(df_T)
df_U = pd.read_table('sign/u.txt',header=None,sep=',')
U =
|
np.array(df_U)
|
numpy.array
|
"""
Defines the AbstractGrid class and child classes
"""
__all__ = [
"AbstractGrid",
"CartesianGrid",
"NonUniformCartesianGrid",
]
import astropy.units as u
import numpy as np
import pandas as pd
import scipy.interpolate as interp
import warnings
import xarray as xr
from abc import ABC
from cached_property import cached_property
from collections import namedtuple
from scipy.spatial import distance
from typing import Union
def _detect_is_uniform_grid(pts0, pts1, pts2, tol=1e-6):
r"""
Determine whether a grid is uniform (uniformly spaced) by computing the
variance of the grid gradients.
"""
variance = np.zeros([3])
dx = np.gradient(pts0, axis=0)
variance[0] = np.std(dx) / np.mean(dx)
dy = np.gradient(pts1, axis=1)
variance[1] = np.std(dy) / np.mean(dy)
dz = np.gradient(pts2, axis=2)
variance[2] = np.std(dz) / np.mean(dz)
return np.allclose(variance, 0.0, atol=tol)
class AbstractGrid(ABC):
r"""
Abstract grid represents a 3D grid of positions. The grid is stored as an
np.ndarray, while the units associated with each dimension are stored
separately.
"""
def __init__(self, *seeds, num=100, **kwargs):
# Initialize some variables
self._interpolator = None
self._is_uniform = None
# If three inputs are given, assume it's a user-provided grid
if len(seeds) == 3:
self._load_grid(seeds[0], seeds[1], seeds[2])
# If two inputs are given, assume they are start and stop arrays
# to create a new grid
elif len(seeds) == 2:
self._make_grid(seeds[0], seeds[1], num=num, **kwargs)
else:
raise TypeError(
f"{self.__class__.__name__} takes 2 or 3 "
f"positional arguments but {len(seeds)} were given"
)
def _validate(self):
r"""
Checks to make sure that the grid parameters are
consistent with the coordinate system and units selected
"""
return True
# A named tuple describing a key recognized by PlasmaPy to correspond to
# a particular physical quantity
RecognizedQuantity = namedtuple(
"RecognizedQuantities", ["key", "description", "unit"]
)
# These standard keys are used to refer to certain
# physical quantities. This dictionary also provides the expected unit.
_recognized_quantities_list = [
RecognizedQuantity("x", "x spatial position", u.m),
RecognizedQuantity("y", "y spatial position", u.m),
RecognizedQuantity("z", "z spatial position", u.m),
RecognizedQuantity("rho", "Mass density", u.kg / u.m ** 3),
RecognizedQuantity("E_x", "Electric field (x component)", u.V / u.m),
RecognizedQuantity("E_y", "Electric field (y component)", u.V / u.m),
RecognizedQuantity("E_z", "Electric field (z component)", u.V / u.m),
RecognizedQuantity("B_x", "Magnetic field (x component)", u.T),
RecognizedQuantity("B_y", "Magnetic field (y component)", u.T),
RecognizedQuantity("B_z", "Magnetic field (z component)", u.T),
RecognizedQuantity("phi", "Electric Scalar Potential", u.V),
]
# Create a dict of recognized quantities for fast access by key
_recognized_quantities = {}
for _rq in _recognized_quantities_list:
_recognized_quantities[_rq.key] = _rq
@property
def recognized_quantities(self):
r"""
A dictionary of standard key names representing particular physical
quantities. Using these keys allows these
quantities to be recognized automatically by other PlasmaPy functions.
Each entry contains a tuple containing a description and the unit
associated with the quantity.
"""
return self._recognized_quantities
def require_quantities(self, req_quantities, replace_with_zeros=False):
r"""
Checks to make sure that a list of required quantities are present.
Optionally, can create missing quantities and fill them with
an array of zeros.
Parameters
----------
req_quantities : list of str
A list of quantity keys that are required
replace_with_zeros : boolean, optional
If true, missing quantities will be replaced with an array
of zeros. If false, an exception will be raised instead.
The default is False.
Raises
------
KeyError
If `replace_with_zeros` is False and a required quantity is missing,
raises a KeyError.
KeyError
If `replace_with_zeros` is True but the quantity is not in the
list of recognized quantities, raises a KeyError. This is because
in this case the units for the quantity are unknown, so an array
of zeros cannot be constructed.
Returns
-------
None.
"""
for rq in req_quantities:
# Error check that grid contains E and B variables required
if rq not in self.quantities:
# If missing, warn user and then replace with an array of zeros
if replace_with_zeros:
warnings.warn(
f"{rq} is not specified for the provided grid."
"This quantity will be assumed to be zero.",
RuntimeWarning,
)
if rq in self.recognized_quantities.keys():
unit = self.recognized_quantities[rq].unit
else:
raise KeyError(
f"{rq} is not a recognized key, and "
"so cannot be automatically assumed "
"to be zero."
)
arg = {rq: np.zeros(self.shape) * unit}
self.add_quantities(**arg)
else:
raise KeyError(
f"{rq} is not specified for the provided "
"grid but is required."
)
# *************************************************************************
# Fundamental properties of the grid
# *************************************************************************
def __repr__(self):
line_sep = "-----------------------------\n"
shape = list(self.shape)
coords = list(self.ds.coords.keys())
ax_units = self.units
ax_dtypes = [self.ds[i].dtype for i in coords]
coord_lbls = [str(i) + ": " + str(j) for i, j in zip(coords, shape)]
s = f"*** Grid Summary ***\n{type(self)}\n"
s += f"Dimensions: ({', '.join(coord_lbls)})\n"
if self.is_uniform:
s += (
"Uniformly Spaced: (dax0, dax1, dax2) = "
f"({self.dax0:.3f}, {self.dax1:.3f}, {self.dax2:.3f})\n"
)
else:
s += "Non-Uniform Spacing\n"
s += line_sep + "Coordinates:\n"
for i in range(len(self.shape)):
s += f"\t-> {coords[i]} ({ax_units[i]}) {ax_dtypes[i]} ({shape[i]},)\n"
keys = self.quantities
rkeys = [k for k in keys if k in list(self.recognized_quantities.keys())]
nrkeys = [k for k in keys if k not in list(self.recognized_quantities.keys())]
s += line_sep + "Recognized Quantities:\n"
if len(rkeys) == 0:
s += "-None-\n"
else:
for key in rkeys:
unit = self.ds[key].attrs["unit"]
dtype = self.ds[key].dtype
shape = self.ds[key].shape
s += f"\t-> {key} ({unit}) {dtype} {shape} \n"
s += line_sep + "Unrecognized Quantities:\n"
if len(nrkeys) == 0:
s += "-None-\n"
else:
for key in nrkeys:
unit = self.ds[key].attrs["unit"]
dtype = self.ds[key].dtype
shape = self.ds[key].shape
s += f"\t-> {key} ({unit}) {dtype} {shape} \n"
return s
def __getitem__(self, key):
"""
Given a key, return the corresponding array as an `astropy.Quantity`
Returning with copy=False means that the array returned is a direct
reference to the underlying DataArray, so changes made will be reflected
in the underlying DataArray.
"""
return u.Quantity(self.ds[key].data, self.ds[key].attrs["unit"], copy=False)
@property
def is_uniform(self) -> bool:
"""
A boolean value reflecting whether or not the grid points are
uniformly spaced.
"""
if self._is_uniform is None: # coverage: ignore
raise ValueError(
"The `is_uniform` attribute is not accessible "
"before a grid has been loaded."
)
return self._is_uniform
@property
def shape(self):
r""" Shape of the grid"""
if self.is_uniform:
return (self.ax0.size, self.ax1.size, self.ax2.size)
else:
return self.ds.coords["ax0"].shape
@property
def grids(self):
r"""
Three grids of vertex positions (in each coordinate), each having
shape (N0, N1, N2)
"""
if self.is_uniform:
pts0, pts1, pts2 = np.meshgrid(self.ax0, self.ax1, self.ax2, indexing="ij")
_grids = (pts0, pts1, pts2)
else:
_grids = (
self.ds["ax0"].data * self.unit0,
self.ds["ax1"].data * self.unit1,
self.ds["ax2"].data * self.unit2,
)
return _grids
@property
def grid(self):
r"""
A single grid of vertex positions of shape (N0, N1, N2, 3)
Only defined for grids for which the `unit` property is defined.
"""
pts0, pts1, pts2 = self.grids
if self.is_uniform:
n0, n1, n2 = pts0.shape
grid =
|
np.zeros([n0, n1, n2, 3])
|
numpy.zeros
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit and integration tests for creating the :mod:`pennylane` :attr:`QNode.circuit.hash` attribute.
"""
import pytest
import numpy as np
import pennylane as qml
from pennylane.operation import Tensor
from pennylane.circuit_graph import CircuitGraph
from pennylane.qnodes import BaseQNode
from pennylane.variable import Variable
from pennylane.wires import Wires
pytestmark = pytest.mark.usefixtures("tape_mode")
class TestCircuitGraphHash:
"""Test the creation of a hash on a CircuitGraph"""
numeric_queues = [
([
qml.RX(0.3, wires=[0])
],
[],
'RX!0.3![0]|||'
),
([
qml.RX(0.3, wires=[0]),
qml.RX(0.4, wires=[1]),
qml.RX(0.5, wires=[2]),
],
[],
'RX!0.3![0]RX!0.4![1]RX!0.5![2]|||'
)
]
@pytest.mark.parametrize("queue, observable_queue, expected_string", numeric_queues)
def test_serialize_numeric_arguments(self, queue, observable_queue, expected_string):
"""Tests that the same hash is created for two circuitgraphs that have numeric arguments."""
circuit_graph_1 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1, 2]))
circuit_graph_2 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1, 2]))
assert circuit_graph_1.serialize() == circuit_graph_2.serialize()
assert expected_string == circuit_graph_1.serialize()
variable = Variable(1)
symbolic_queue = [
([qml.RX(variable, wires=[0])],
[],
'RX!V1![0]|||'
),
]
@pytest.mark.parametrize("queue, observable_queue, expected_string", symbolic_queue)
def test_serialize_symbolic_argument(self, queue, observable_queue, expected_string):
"""Tests that the same hash is created for two circuitgraphs that have symbolic arguments."""
circuit_graph_1 = CircuitGraph(queue + observable_queue, {}, Wires([0]))
circuit_graph_2 = CircuitGraph(queue + observable_queue, {}, Wires([0]))
assert circuit_graph_1.serialize() == circuit_graph_2.serialize()
assert expected_string == circuit_graph_1.serialize()
variable = Variable(1)
symbolic_queue = [
([
qml.RX(variable, wires=[0]),
qml.RX(0.3, wires=[1]),
qml.RX(variable, wires=[2])
],
[],
'RX!V1![0]RX!0.3![1]RX!V1![2]|||'
),
]
@pytest.mark.parametrize("queue, observable_queue, expected_string", symbolic_queue)
def test_serialize_numeric_and_symbolic_argument(self, queue, observable_queue, expected_string):
"""Tests that the same hash is created for two circuitgraphs that have both numeric and symbolic arguments."""
circuit_graph_1 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1, 2]))
circuit_graph_2 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1, 2]))
assert circuit_graph_1.serialize() == circuit_graph_2.serialize()
assert expected_string == circuit_graph_1.serialize()
variable = Variable(1)
many_symbolic_queue = [
([
qml.RX(variable, wires=[0]),
qml.RX(variable, wires=[1])
],
[],
'RX!V1![0]' +
'RX!V1![1]' +
'|||'
),
]
@pytest.mark.parametrize("queue, observable_queue, expected_string", many_symbolic_queue)
def test_serialize_symbolic_argument_multiple_times(self, queue, observable_queue, expected_string):
"""Tests that the same hash is created for two circuitgraphs that have the same symbolic argument
used multiple times."""
circuit_graph_1 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1]))
circuit_graph_2 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1]))
assert circuit_graph_1.serialize() == circuit_graph_2.serialize()
assert expected_string == circuit_graph_1.serialize()
variable1 = Variable(1)
variable2 = Variable(2)
multiple_symbolic_queue = [
([
qml.RX(variable1, wires=[0]),
qml.RX(variable2, wires=[1])
],
[],
'RX!V1![0]' +
'RX!V2![1]' +
'|||'
),
]
@pytest.mark.parametrize("queue, observable_queue, expected_string", multiple_symbolic_queue)
def test_serialize_multiple_symbolic_arguments(self, queue, observable_queue, expected_string):
"""Tests that the same hash is created for two circuitgraphs that have multiple symbolic arguments."""
circuit_graph_1 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1]))
circuit_graph_2 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1]))
assert circuit_graph_1.serialize() == circuit_graph_2.serialize()
assert expected_string == circuit_graph_1.serialize()
observable1 = qml.PauliZ(0)
observable1.return_type = not None
observable2 = qml.Hermitian(np.array([[1, 0],[0, -1]]), wires=[0])
observable2.return_type = not None
observable3 = Tensor(qml.PauliZ(0) @ qml.PauliZ(1))
observable3.return_type = not None
numeric_observable_queue = [
([],
[observable1],
'|||PauliZ[0]'
),
(
[],
[observable2],
'|||Hermitian![[ 1 0]\n [ 0 -1]]![0]'
),
(
[],
[observable3],
'|||[\'PauliZ\', \'PauliZ\'][0, 1]'
)
]
@pytest.mark.parametrize("queue, observable_queue, expected_string", numeric_observable_queue)
def test_serialize_numeric_arguments_observables(self, queue, observable_queue, expected_string):
"""Tests that the same hash is created for two circuitgraphs that have identical queues and empty variable_deps."""
circuit_graph_1 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1]))
circuit_graph_2 = CircuitGraph(queue + observable_queue, {}, Wires([0, 1]))
assert circuit_graph_1.serialize() == circuit_graph_2.serialize()
assert expected_string == circuit_graph_1.serialize()
class TestQNodeCircuitHashIntegration:
"""Test for the circuit hash that is being created for a QNode during evaluation (inside of _construct)"""
def test_evaluate_circuit_hash_numeric(self):
"""Tests that the circuit hash of identical circuits containing only numeric parameters are equal"""
dev = qml.device("default.qubit", wires=2)
a = 0.3
b = 0.2
def circuit1():
qml.RX(a, wires=[0])
qml.RY(b, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([], {})
circuit_hash_1 = node1.circuit.hash
def circuit2():
qml.RX(a, wires=[0])
qml.RY(b, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 == circuit_hash_2
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_evaluate_circuit_hash_symbolic(self, x, y):
"""Tests that the circuit hash of identical circuits containing only symbolic parameters are equal"""
dev = qml.device("default.qubit", wires=2)
def circuit1(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([x, y], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 == circuit_hash_2
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_evaluate_circuit_hash_numeric_and_symbolic(self, x, y):
"""Tests that the circuit hash of identical circuits containing numeric and symbolic parameters are equal"""
dev = qml.device("default.qubit", wires=3)
def circuit1(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.RZ(0.3, wires=[2])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([x, y], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.RZ(0.3, wires=[2])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 == circuit_hash_2
@pytest.mark.parametrize(
"a,b",
zip(np.linspace(0.1, 2 * np.pi, 3), np.linspace(0, 2 * np.pi, 3)),
)
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 0, 3), np.linspace(-2 * np.pi, 0, 3)),
)
def test_evaluate_circuit_hash_symbolic_assigned_arguments_do_not_matter(self, a, b, x, y):
"""Tests that the circuit hashes of identical circuits where different values are assigned to symbolic parameters are equal"""
dev = qml.device("default.qubit", wires=2)
def circuit1(a, b):
qml.RX(a, wires=[0])
qml.RY(b, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([a, b], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 == circuit_hash_2
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_evaluate_circuit_hash_numeric_and_symbolic_tensor_return(self, x, y):
"""Tests that the circuit hashes of identical circuits having a tensor product in the return
statement are equal"""
dev = qml.device("default.qubit", wires=3)
def circuit1(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.RZ(0.3, wires=[2])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([x, y], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.RZ(0.3, wires=[2])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 == circuit_hash_2
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_evaluate_circuit_hash_same_operation_has_numeric_and_symbolic(self, x, y):
"""Tests that the circuit hashes of identical circuits where one operation has both numeric
and symbolic arguments are equal"""
dev = qml.device("default.qubit", wires=3)
def circuit1(x, y):
qml.Rot(x, y, 0.3, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([x, y], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.Rot(x, y, 0.3, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = dev.circuit_hash
assert circuit_hash_1 == circuit_hash_2
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_evaluate_circuit_hash_numeric_and_symbolic_return_type_does_not_matter(self, x, y):
"""Tests that the circuit hashes of identical circuits only differing on their return types are equal"""
dev = qml.device("default.qubit", wires=3)
def circuit1(x, y):
qml.Rot(x, y, 0.3, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([x, y], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.Rot(x, y, 0.3, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.var(qml.PauliZ(0) @ qml.PauliX(1))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = node2.circuit.hash
def circuit3(x, y):
qml.Rot(x, y, 0.3, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliZ(0) @ qml.PauliX(1))
node3 = BaseQNode(circuit1, dev)
node3.evaluate([x, y], {})
circuit_hash_3 = node3.circuit.hash
assert circuit_hash_1 == circuit_hash_2 == circuit_hash_3
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_evaluate_circuit_hash_hermitian(self, x, y):
"""Tests that the circuit hashes of identical circuits containing a Hermitian observable are equal"""
dev = qml.device("default.qubit", wires=3)
matrix = np.array([[1, 0], [0, 1]])
def circuit1(x, y):
qml.Rot(x, y, 0.3, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.Hermitian(matrix, wires=[0]) @ qml.PauliX(1))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([x, y], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.Rot(x, y, 0.3, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.Hermitian(matrix, wires=[0]) @ qml.PauliX(1))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 == circuit_hash_2
class TestQNodeCircuitHashDifferentHashIntegration:
"""Tests for checking that different circuit graph hashes are being created for different circuits in a QNode during evaluation (inside of _construct)"""
def test_evaluate_circuit_hash_numeric_different(self):
"""Tests that the circuit hashes of identical circuits except for one numeric value are different"""
dev = qml.device("default.qubit", wires=2)
a = 0.3
b = 0.2
def circuit1():
qml.RX(a, wires=[0])
qml.RY(b, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([], {})
circuit_hash_1 = node1.circuit.hash
c = 0.6
def circuit2():
qml.RX(c, wires=[0])
qml.RY(b, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 != circuit_hash_2
def test_evaluate_circuit_hash_numeric_different_operation(self):
"""Tests that the circuit hashes of identical circuits except for one of the operations are different"""
dev = qml.device("default.qubit", wires=2)
a = 0.3
def circuit1():
qml.RX(a, wires=[0])
return qml.expval(qml.PauliZ(0))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([], {})
circuit_hash_1 = node1.circuit.hash
def circuit2():
qml.RY(a, wires=[0])
return qml.expval(qml.PauliZ(0))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 != circuit_hash_2
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_evaluate_circuit_hash_numeric_and_symbolic_operation_differs(self, x, y):
"""Tests that the circuit hashes of identical circuits that have numeric and symbolic arguments
except for one of the operations are different"""
dev = qml.device("default.qubit", wires=3)
def circuit1(x, y):
qml.RX(x, wires=[0])
qml.RZ(y, wires=[1]) # <-------------------------------------- RZ
qml.RZ(0.3, wires=[2])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node1 = BaseQNode(circuit1, dev)
node1.evaluate([x, y], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1]) # <-------------------------------------- RY
qml.RZ(0.3, wires=[2])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 != circuit_hash_2
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_evaluate_circuit_hash_different_return_observable_vs_tensor(self, x, y):
"""Tests that the circuit hashes of identical circuits except for the return statement are different"""
dev = qml.device("default.qubit", wires=3)
def circuit1(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.RZ(0.3, wires=[2])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)) # <------------- qml.PauliZ(0)
node1 = BaseQNode(circuit1, dev)
node1.evaluate([x, y], {})
circuit_hash_1 = node1.circuit.hash
def circuit2(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.RZ(0.3, wires=[2])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) # <------------- qml.PauliZ(0) @ qml.PauliX(1)
node2 = BaseQNode(circuit2, dev)
node2.evaluate([x, y], {})
circuit_hash_2 = node2.circuit.hash
assert circuit_hash_1 != circuit_hash_2
@pytest.mark.parametrize(
"x,y",
zip(
|
np.linspace(-2 * np.pi, 2 * np.pi, 7)
|
numpy.linspace
|
import json
from typing import Any, Text, Dict, List
from bert_serving.client import BertClient
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
import numpy as np
def encode_standard_question():
bc = BertClient()
data = json.load(open("./data/nlu/qa.json", "r", encoding="utf-8"))
standard_questions = [each['q'] for each in data]
print("Standard question size", len(standard_questions))
print("Start to calculate encoder....")
standard_questions_encoder = bc.encode(standard_questions)
np.save("./data/standard_questions", standard_questions_encoder)
standard_questions_encoder_len = np.sqrt(
|
np.sum(standard_questions_encoder * standard_questions_encoder, axis=1)
|
numpy.sum
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This module provides the :class:`ImageComplexData` of the :class:`Plot`.
"""
from __future__ import absolute_import
__authors__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__date__ = "14/06/2018"
import logging
import numpy
from silx.third_party import enum
from ...colors import Colormap
from .core import ColormapMixIn, ItemChangedType
from .image import ImageBase
_logger = logging.getLogger(__name__)
# Complex colormap functions
def _phase2rgb(colormap, data):
"""Creates RGBA image with colour-coded phase.
:param Colormap colormap: The colormap to use
:param numpy.ndarray data: The data to convert
:return: Array of RGBA colors
:rtype: numpy.ndarray
"""
if data.size == 0:
return numpy.zeros((0, 0, 4), dtype=numpy.uint8)
phase = numpy.angle(data)
return colormap.applyToData(phase)
def _complex2rgbalog(phaseColormap, data, amin=0., dlogs=2, smax=None):
"""Returns RGBA colors: colour-coded phases and log10(amplitude) in alpha.
:param Colormap phaseColormap: Colormap to use for the phase
:param numpy.ndarray data: the complex data array to convert to RGBA
:param float amin: the minimum value for the alpha channel
:param float dlogs: amplitude range displayed, in log10 units
:param float smax:
if specified, all values above max will be displayed with an alpha=1
"""
if data.size == 0:
return numpy.zeros((0, 0, 4), dtype=numpy.uint8)
rgba = _phase2rgb(phaseColormap, data)
sabs = numpy.absolute(data)
if smax is not None:
sabs[sabs > smax] = smax
a = numpy.log10(sabs + 1e-20)
a -= a.max() - dlogs # display dlogs orders of magnitude
rgba[..., 3] = 255 * (amin + a / dlogs * (1 - amin) * (a > 0))
return rgba
def _complex2rgbalin(phaseColormap, data, gamma=1.0, smax=None):
"""Returns RGBA colors: colour-coded phase and linear amplitude in alpha.
:param Colormap phaseColormap: Colormap to use for the phase
:param numpy.ndarray data:
:param float gamma: Optional exponent gamma applied to the amplitude
:param float smax:
"""
if data.size == 0:
return numpy.zeros((0, 0, 4), dtype=numpy.uint8)
rgba = _phase2rgb(phaseColormap, data)
a = numpy.absolute(data)
if smax is not None:
a[a > smax] = smax
a /= a.max()
rgba[..., 3] = 255 * a**gamma
return rgba
class ImageComplexData(ImageBase, ColormapMixIn):
"""Specific plot item to force colormap when using complex colormap.
This is returning the specific colormap when displaying
colored phase + amplitude.
"""
class Mode(enum.Enum):
"""Identify available display mode for complex"""
ABSOLUTE = 'absolute'
PHASE = 'phase'
REAL = 'real'
IMAGINARY = 'imaginary'
AMPLITUDE_PHASE = 'amplitude_phase'
LOG10_AMPLITUDE_PHASE = 'log10_amplitude_phase'
SQUARE_AMPLITUDE = 'square_amplitude'
def __init__(self):
ImageBase.__init__(self)
ColormapMixIn.__init__(self)
self._data = numpy.zeros((0, 0), dtype=numpy.complex64)
self._dataByModesCache = {}
self._mode = self.Mode.ABSOLUTE
self._amplitudeRangeInfo = None, 2
# Use default from ColormapMixIn
colormap = super(ImageComplexData, self).getColormap()
phaseColormap = Colormap(
name='hsv',
vmin=-numpy.pi,
vmax=numpy.pi)
phaseColormap.setEditable(False)
self._colormaps = { # Default colormaps for all modes
self.Mode.ABSOLUTE: colormap,
self.Mode.PHASE: phaseColormap,
self.Mode.REAL: colormap,
self.Mode.IMAGINARY: colormap,
self.Mode.AMPLITUDE_PHASE: phaseColormap,
self.Mode.LOG10_AMPLITUDE_PHASE: phaseColormap,
self.Mode.SQUARE_AMPLITUDE: colormap,
}
def _addBackendRenderer(self, backend):
"""Update backend renderer"""
plot = self.getPlot()
assert plot is not None
if not self._isPlotLinear(plot):
# Do not render with non linear scales
return None
mode = self.getVisualizationMode()
if mode in (self.Mode.AMPLITUDE_PHASE,
self.Mode.LOG10_AMPLITUDE_PHASE):
# For those modes, compute RGBA image here
colormap = None
data = self.getRgbaImageData(copy=False)
else:
colormap = self.getColormap()
data = self.getData(copy=False)
if data.size == 0:
return None # No data to display
return backend.addImage(data,
legend=self.getLegend(),
origin=self.getOrigin(),
scale=self.getScale(),
z=self.getZValue(),
selectable=self.isSelectable(),
draggable=self.isDraggable(),
colormap=colormap,
alpha=self.getAlpha())
def setVisualizationMode(self, mode):
"""Set the visualization mode to use.
:param Mode mode:
"""
assert isinstance(mode, self.Mode)
assert mode in self._colormaps
if mode != self._mode:
self._mode = mode
self._updated(ItemChangedType.VISUALIZATION_MODE)
# Send data updated as value returned by getData has changed
self._updated(ItemChangedType.DATA)
# Update ColormapMixIn colormap
colormap = self._colormaps[self._mode]
if colormap is not super(ImageComplexData, self).getColormap():
super(ImageComplexData, self).setColormap(colormap)
def getVisualizationMode(self):
"""Returns the visualization mode in use.
:rtype: Mode
"""
return self._mode
def _setAmplitudeRangeInfo(self, max_=None, delta=2):
"""Set the amplitude range to display for 'log10_amplitude_phase' mode.
:param max_: Max of the amplitude range.
If None it autoscales to data max.
:param float delta: Delta range in log10 to display
"""
self._amplitudeRangeInfo = max_, float(delta)
self._updated(ItemChangedType.VISUALIZATION_MODE)
def _getAmplitudeRangeInfo(self):
"""Returns the amplitude range to use for 'log10_amplitude_phase' mode.
:return: (max, delta), if max is None, then it autoscales to data max
:rtype: 2-tuple"""
return self._amplitudeRangeInfo
def setColormap(self, colormap, mode=None):
"""Set the colormap for this specific mode.
:param ~silx.gui.colors.Colormap colormap: The colormap
:param Mode mode:
If specified, set the colormap of this specific mode.
Default: current mode.
"""
if mode is None:
mode = self.getVisualizationMode()
self._colormaps[mode] = colormap
if mode is self.getVisualizationMode():
super(ImageComplexData, self).setColormap(colormap)
else:
self._updated(ItemChangedType.COLORMAP)
def getColormap(self, mode=None):
"""Get the colormap for the (current) mode.
:param Mode mode:
If specified, get the colormap of this specific mode.
Default: current mode.
:rtype: ~silx.gui.colors.Colormap
"""
if mode is None:
mode = self.getVisualizationMode()
return self._colormaps[mode]
def setData(self, data, copy=True):
""""Set the image complex data
:param numpy.ndarray data: 2D array of complex with 2 dimensions (h, w)
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
"""
data = numpy.array(data, copy=copy)
assert data.ndim == 2
if not numpy.issubdtype(data.dtype, numpy.complexfloating):
_logger.warning(
'Image is not complex, converting it to complex to plot it.')
data = numpy.array(data, dtype=numpy.complex64)
self._data = data
self._dataByModesCache = {}
# TODO hackish data range implementation
if self.isVisible():
plot = self.getPlot()
if plot is not None:
plot._invalidateDataRange()
self._updated(ItemChangedType.DATA)
def getComplexData(self, copy=True):
"""Returns the image complex data
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
:rtype: numpy.ndarray of complex
"""
return numpy.array(self._data, copy=copy)
def getData(self, copy=True, mode=None):
"""Returns the image data corresponding to (current) mode.
The returned data is always floats, to get the complex data, use
:meth:`getComplexData`.
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
:param Mode mode:
If specified, get data corresponding to the mode.
Default: Current mode.
:rtype: numpy.ndarray of float
"""
if mode is None:
mode = self.getVisualizationMode()
if mode not in self._dataByModesCache:
# Compute data for mode and store it in cache
complexData = self.getComplexData(copy=False)
if mode is self.Mode.PHASE:
data = numpy.angle(complexData)
elif mode is self.Mode.REAL:
data = numpy.real(complexData)
elif mode is self.Mode.IMAGINARY:
data = numpy.imag(complexData)
elif mode in (self.Mode.ABSOLUTE,
self.Mode.LOG10_AMPLITUDE_PHASE,
self.Mode.AMPLITUDE_PHASE):
data = numpy.absolute(complexData)
elif mode is self.Mode.SQUARE_AMPLITUDE:
data = numpy.absolute(complexData) ** 2
else:
_logger.error(
'Unsupported conversion mode: %s, fallback to absolute',
str(mode))
data =
|
numpy.absolute(complexData)
|
numpy.absolute
|
import numpy
import cv2
from math import isclose
class PanelDetector:
# Constructor
def __init__(self, index=0, calibrations=10):
self.__debug = True
# initialize video device
self.__capture = cv2.VideoCapture(index)
# define number of calibration
self.__calibrations = calibrations
# epsilon for center of buttons comparison in pixels
self.__distance = 10
# epsilon for rectangle detection in percent
self.__curvy = 0.04
# epsilon for sector detection in pixels
self.__sectorepsilon = 100
# is the sectors found or not
self.__sectors_found = False
# is the largest rectangle found or not
self.__roi_found = False
# largest contour
self.__roi_contour = None
# rectangle of interest where panel will be found
self.__roi = (0, 0, 0, 0)
# homography matrix
self.__homography = numpy.empty(shape=(3, 3))
# corners of the panel
self.__corners = numpy.empty(shape=(4, 2))
# number of the panel's buttons
self.__nbuttons = 12
# centers of the panel's buttons
self.__centroids = numpy.array([], dtype=numpy.uint8)
# corners of the top view of panel
self.__top_view_centroids = numpy.array([], dtype=numpy.uint8)
# kernel matrices for morphological transformation
self.__kernel_square =
|
numpy.ones((5, 5), numpy.uint8)
|
numpy.ones
|
# external imports
import unittest
import numpy as np
# internal inputs
from pympc.dynamics.discrete_time_systems import LinearSystem, AffineSystem, PieceWiseAffineSystem, mcais
from pympc.geometry.polyhedron import Polyhedron
class TestLinearSystem(unittest.TestCase):
def test_intialization(self):
np.random.seed(1)
# wrong initializations
A = np.ones((3,3))
B = np.ones((4,1))
self.assertRaises(ValueError, LinearSystem, A, B)
A = np.ones((4,5))
self.assertRaises(ValueError, LinearSystem, A, B)
def test_condense_and_simulate(self):
# random systems
for i in range(10):
n = np.random.randint(1, 10)
m = np.random.randint(1, 10)
N = np.random.randint(10, 50)
x0 = np.random.rand(n, 1)
u = [np.random.rand(m, 1)/10. for j in range(N)]
A = np.random.rand(n,n)/10.
B = np.random.rand(n,m)/10.
S = LinearSystem(A, B)
# simulate vs condense
x = S.simulate(x0, u)
A_bar, B_bar = S.condense(N)
np.testing.assert_array_almost_equal(
np.vstack(x),
A_bar.dot(x0) + B_bar.dot(np.vstack(u))
)
def test_controllable(self):
# double integrator (controllable)
A = np.array([[0., 1.],[0., 0.]])
B = np.array([[0.],[1.]])
h = .1
S = LinearSystem.from_continuous(A, B, h)
self.assertTrue(S.controllable)
# make it uncontrollable
B = np.array([[1.],[0.]])
S = LinearSystem.from_continuous(A, B, h)
self.assertFalse(S.controllable)
def test_solve_dare_and_simulate_closed_loop(self):
np.random.seed(1)
# uncontrollable system
A = np.array([[0., 1.],[0., 0.]])
B = np.array([[1.],[0.]])
h = .1
S = LinearSystem.from_continuous(A, B, h)
self.assertRaises(ValueError, S.solve_dare, np.eye(2), np.eye(1))
# test lqr on random systems
for i in range(100):
n = np.random.randint(5, 10)
m = np.random.randint(1, n -1)
controllable = False
while not controllable:
A = np.random.rand(n,n)
B = np.random.rand(n,m)
S = LinearSystem(A, B)
controllable = S.controllable
Q = np.eye(n)
R = np.eye(m)
P, K = S.solve_dare(Q, R)
self.assertTrue(np.min(np.linalg.eig(P)[0]) > 0.)
# simulate in closed-loop and check that x' P x is a Lyapunov function
N = 10
x0 = np.random.rand(n,1)
x_list = S.simulate_closed_loop(x0, N, K)
V_list = [x.T.dot(P).dot(x)[0,0] for x in x_list]
dV_list = [V_list[i] - V_list[i+1] for i in range(len(V_list)-1)]
self.assertTrue(min(dV_list) > 0.)
# simulate in closed-loop and check that 1/2 x' P x is exactly the cost to go
A_cl = A + B.dot(K)
x0 = np.random.rand(n,1)
infinite_horizon_V = .5*x0.T.dot(P).dot(x0)
finite_horizon_V = 0.
max_iter = 1000
t = 0
while not np.isclose(infinite_horizon_V, finite_horizon_V):
finite_horizon_V += .5*(x0.T.dot(Q).dot(x0) + (K.dot(x0)).T.dot(R).dot(K).dot(x0))
x0 = A_cl.dot(x0)
t += 1
if t == max_iter:
self.assertTrue(False)
def test_mcais(self):
"""
Tests only if the function macais() il called correctly.
For the tests of mcais() see the class TestMCAIS.
"""
# undamped pendulum linearized around the unstable equilibrium
A = np.array([[0., 1.], [1., 0.]])
B = np.array([[0.], [1.]])
h = .1
S = LinearSystem.from_continuous(A, B, h)
K = S.solve_dare(np.eye(2), np.eye(1))[1]
d_min = - np.ones((3,1))
d_max = - d_min
D = Polyhedron.from_bounds(d_min, d_max)
O_inf = S.mcais(K, D)
self.assertTrue(O_inf.contains(np.zeros((2,1))))
def test_from_continuous(self):
# test from continuous
for i in range(10):
n = np.random.randint(1, 10)
m = np.random.randint(1, 10)
A = np.random.rand(n,n)
B = np.random.rand(n,m)
# reduce discretization step until the two method are almost equivalent
h = .01
convergence = False
while not convergence:
S_ee = LinearSystem.from_continuous(A, B, h, 'explicit_euler')
S_zoh = LinearSystem.from_continuous(A, B, h, 'zero_order_hold')
convergence = np.allclose(S_ee.A, S_zoh.A) and np.allclose(S_ee.B, S_zoh.B)
if not convergence:
h /= 10.
self.assertTrue(convergence)
self.assertRaises(ValueError, LinearSystem.from_continuous, A, B, h, 'gatto')
class TestAffineSystem(unittest.TestCase):
def test_intialization(self):
np.random.seed(1)
# wrong initializations
A = np.ones((3,3))
B = np.ones((4,1))
c = np.ones((4,1))
self.assertRaises(ValueError, AffineSystem, A, B, c)
A = np.ones((4,5))
self.assertRaises(ValueError, AffineSystem, A, B, c)
A = np.ones((4,4))
c = np.ones((5,1))
self.assertRaises(ValueError, AffineSystem, A, B, c)
def test_condense_and_simulate(self):
# random systems
for i in range(10):
n = np.random.randint(1, 10)
m = np.random.randint(1, 10)
N = np.random.randint(10, 50)
x0 = np.random.rand(n, 1)
u = [np.random.rand(m, 1)/10. for j in range(N)]
A = np.random.rand(n,n)/10.
B = np.random.rand(n,m)/10.
c = np.random.rand(n,1)/10.
S = AffineSystem(A, B, c)
# simulate vs condense
x = S.simulate(x0, u)
A_bar, B_bar, c_bar = S.condense(N)
np.testing.assert_array_almost_equal(
np.vstack(x),
A_bar.dot(x0) + B_bar.dot(np.vstack(u)) + c_bar
)
def test_from_continuous(self):
# test from continuous
for i in range(10):
n = np.random.randint(1, 10)
m = np.random.randint(1, 10)
A = np.random.rand(n,n)
B = np.random.rand(n,m)
c = np.random.rand(n,1)
# reduce discretization step until the two method are almost equivalent
h = .01
convergence = False
while not convergence:
S_ee = AffineSystem.from_continuous(A, B, c, h, 'explicit_euler')
S_zoh = AffineSystem.from_continuous(A, B, c, h, 'zero_order_hold')
convergence = np.allclose(S_ee.A, S_zoh.A) and np.allclose(S_ee.B, S_zoh.B) and np.allclose(S_ee.c, S_zoh.c)
if not convergence:
h /= 10.
self.assertTrue(convergence)
self.assertRaises(ValueError, AffineSystem.from_continuous, A, B, c, h, 'gatto')
class TestPieceWiseAffineSystem(unittest.TestCase):
def test_intialization(self):
np.random.seed(1)
# different number of systems and domains
A = np.ones((3,3))
B = np.ones((3,2))
c = np.ones((3,1))
S = AffineSystem(A, B, c)
affine_systems = [S]*5
F = np.ones((9,5))
g = np.ones((9,1))
D = Polyhedron(F, g)
domains = [D]*4
self.assertRaises(ValueError, PieceWiseAffineSystem, affine_systems, domains)
# incopatible number of states in affine systems
domains += [D, D]
A = np.ones((2,2))
B = np.ones((2,2))
c = np.ones((2,1))
affine_systems.append(AffineSystem(A, B, c))
self.assertRaises(ValueError, PieceWiseAffineSystem, affine_systems, domains)
# incopatible number of inputs in affine systems
del affine_systems[-1]
A = np.ones((3,3))
B = np.ones((3,1))
c = np.ones((3,1))
affine_systems.append(AffineSystem(A, B, c))
self.assertRaises(ValueError, PieceWiseAffineSystem, affine_systems, domains)
# different dimensinality of the domains and the systems
del affine_systems[-1]
affine_systems += [S, S]
F = np.ones((9,4))
g =
|
np.ones((9,1))
|
numpy.ones
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 13:57:38 2019
@author: deborahkhider
Calculate flood sevrity index based on volumetric flow threshold.
Returns one file for every year.
"""
import matplotlib
matplotlib.use('Agg')
import xarray as xr
import numpy as np
import glob as glob
from datetime import date
import sys
import ast
import pandas as pd
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import os
import imageio
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
def openDataset(data, thresholds, year, bounding_box):
''' Open the thresholds and GloFAS concatenated dataset for the appropriate year
Args:
data (str): path to GloFAS in netcdf format. All years in one file
thresholds (str): Name of the netcdf file containing the thresholds data
year (list): year to consider
bounding_box (list): min_lon, max_lon, min_lat, max_lat
Returns:
val (numpy array): Q values cut to the bounding box
Q2 (numpy array): Threshold Q for a 2-yr flood
Q5 (numpy array): Theshold Q for a 5-yr flood
Q20 (numpy array): Threshold Q for a 20-yr flood
lat (numpy array): latitude vector
lon (numpy array): longitude vector
time (numpy array): time vector
'''
data = xr.open_dataset(data)
min_year =
|
np.min(year)
|
numpy.min
|
"""
Modified from:
https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/data_utils/ModelNetDataLoader.py
"""
import os
import numpy as np
import warnings
import pickle
from tqdm import tqdm
from torch.utils.data import Dataset
from scipy.spatial.transform import Rotation as R
warnings.filterwarnings('ignore')
def get_random_rigid():
# Rotation Euler Angle: [0, 45]
rotation = np.random.randint(0, 45, size=(3,))
r = R.from_euler('zyx', rotation, degrees=True)
# Translation: [-0.5, 0.5)
t = np.random.random((3,)) - 0.5
return r, t
def farthest_point_sample(point, npoint):
N, D = point.shape
xyz = point[:, :3]
centroids = np.zeros((npoint,))
distance = np.ones((N,)) * 1e10
farthest = np.random.randint(0, N)
for i in range(npoint):
centroids[i] = farthest
centroid = xyz[farthest, :]
dist = np.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = np.argmax(distance, -1)
point = point[centroids.astype(np.int32)]
return point
class ModelNetDataLoader(Dataset):
def __init__(self, root, args, split='train', process_data=False):
self.root = root
self.npoints = args['num_point']
self.process_data = process_data
self.uniform = args['use_uniform_sample']
self.use_normals = args['use_normals']
self.num_category = args['num_category']
if self.num_category == 40:
self.catfile = os.path.join(self.root, 'modelnet40_shape_names.txt')
else:
self.catfile = os.path.join(self.root, 'modelnet10_shape_names.txt')
self.cat = [line.rstrip() for line in open(self.catfile)]
self.classes = dict(zip(self.cat, range(len(self.cat))))
shape_ids = {}
if self.num_category == 40:
shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))]
shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))]
else:
shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_train.txt'))]
shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_test.txt'))]
assert (split == 'train' or split == 'test')
shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]]
self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i]) + '.txt') for i in range(len(shape_ids[split]))]
print('The size of %s data is %d' % (split, len(self.datapath)))
if self.uniform:
self.save_path = os.path.join(root, 'modelnet%d_%s_%dpts_fps.dat' % (self.num_category, split, self.npoints))
else:
self.save_path = os.path.join(root, 'modelnet%d_%s_%dpts.dat' % (self.num_category, split, self.npoints))
if self.process_data:
if not os.path.exists(self.save_path):
print('Processing data %s (only running in the first time)...' % self.save_path)
self.list_of_points = [None] * len(self.datapath)
self.list_of_labels = [None] * len(self.datapath)
for index in tqdm(range(len(self.datapath)), total=len(self.datapath)):
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
cls = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1], delimiter=',').astype(np.float32)
if self.uniform:
point_set = farthest_point_sample(point_set, self.npoints)
else:
point_set = point_set[:self.npoints, :]
self.list_of_points[index] = point_set
self.list_of_labels[index] = cls
with open(self.save_path, 'wb') as f:
pickle.dump([self.list_of_points, self.list_of_labels], f)
else:
print('Load processes data from %s...' % self.save_path)
with open(self.save_path, 'rb') as f:
self.list_of_points, self.list_of_labels = pickle.load(f)
def __len__(self):
return len(self.datapath)
def _get_item(self, index):
if self.process_data:
point_set, label = self.list_of_points[index], self.list_of_labels[index]
else:
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
label = np.array([cls]).astype(np.int32)
point_set =
|
np.loadtxt(fn[1], delimiter=',')
|
numpy.loadtxt
|
#
# Code taken from https://github.com/JiaxuanYou/graph-generation/
#
import numpy as np
import networkx as nx
import os
import pickle as pkl
import time
import subprocess
import src.eval.mmd as mmd
import concurrent.futures
from datetime import datetime
from functools import partial
from tqdm import tqdm
PRINT_TIME = False
def degree_worker(G):
return np.array(nx.degree_histogram(G))
def add_tensor(x,y):
support_size = max(len(x), len(y))
if len(x) < len(y):
x = np.hstack((x, [0.0] * (support_size - len(x))))
elif len(y) < len(x):
y = np.hstack((y, [0.0] * (support_size - len(y))))
return x+y
def degree_stats(graph_ref_list, graph_pred_list, is_parallel=False):
''' Compute the distance between the degree distributions of two unordered sets of graphs.
Args:
graph_ref_list, graph_target_list: two lists of networkx graphs to be evaluated
'''
sample_ref = []
sample_pred = []
# in case an empty graph is generated
graph_pred_list_remove_empty = [G for G in graph_pred_list if not G.number_of_nodes() == 0]
prev = datetime.now()
if is_parallel:
with concurrent.futures.ProcessPoolExecutor() as executor:
for deg_hist in executor.map(degree_worker, graph_ref_list):
sample_ref.append(deg_hist)
with concurrent.futures.ProcessPoolExecutor() as executor:
for deg_hist in executor.map(degree_worker, graph_pred_list_remove_empty):
sample_pred.append(deg_hist)
else:
for i in range(len(graph_ref_list)):
degree_temp = np.array(nx.degree_histogram(graph_ref_list[i]))
sample_ref.append(degree_temp)
for i in range(len(graph_pred_list_remove_empty)):
degree_temp = np.array(nx.degree_histogram(graph_pred_list_remove_empty[i]))
sample_pred.append(degree_temp)
mmd_dist = mmd.compute_mmd(sample_ref, sample_pred, kernel=mmd.gaussian_emd)
elapsed = datetime.now() - prev
if PRINT_TIME:
print('Time computing degree mmd: ', elapsed)
return mmd_dist
def clustering_worker(param):
G, bins = param
clustering_coeffs_list = list(nx.clustering(G).values())
hist, _ = np.histogram(
clustering_coeffs_list, bins=bins, range=(0.0, 1.0), density=False)
return hist
def cluster_stats(graph_ref_list, graph_pred_list, bins=100, is_parallel=False):
sample_ref = []
sample_pred = []
graph_pred_list_remove_empty = [G for G in graph_pred_list if not G.number_of_nodes() == 0]
prev = datetime.now()
if is_parallel:
with concurrent.futures.ProcessPoolExecutor() as executor:
for clustering_hist in executor.map(clustering_worker,
[(G, bins) for G in graph_ref_list]):
sample_ref.append(clustering_hist)
with concurrent.futures.ProcessPoolExecutor() as executor:
for clustering_hist in executor.map(clustering_worker,
[(G, bins) for G in graph_pred_list_remove_empty]):
sample_pred.append(clustering_hist)
else:
for i in range(len(graph_ref_list)):
clustering_coeffs_list = list(nx.clustering(graph_ref_list[i]).values())
hist, _ = np.histogram(
clustering_coeffs_list, bins=bins, range=(0.0, 1.0), density=False)
sample_ref.append(hist)
for i in range(len(graph_pred_list_remove_empty)):
clustering_coeffs_list = list(nx.clustering(graph_pred_list_remove_empty[i]).values())
hist, _ = np.histogram(
clustering_coeffs_list, bins=bins, range=(0.0, 1.0), density=False)
sample_pred.append(hist)
mmd_dist = mmd.compute_mmd(sample_ref, sample_pred, kernel=mmd.gaussian_emd,
sigma=1.0/10, distance_scaling=bins)
elapsed = datetime.now() - prev
if PRINT_TIME:
print('Time computing clustering mmd: ', elapsed)
return mmd_dist
# maps motif/orbit name string to its corresponding list of indices from orca output
motif_to_indices = {
'3path' : [1, 2],
'4cycle' : [8],
}
COUNT_START_STR = 'orbit counts: \n'
def edge_list_reindexed(G):
idx = 0
id2idx = dict()
for u in G.nodes():
id2idx[str(u)] = idx
idx += 1
edges = []
for (u, v) in G.edges():
edges.append((id2idx[str(u)], id2idx[str(v)]))
return edges
def orca(graph):
tmp_fname = f'./src/eval/orca/tmp_{int(time.time() * 10)}.txt'
f = open(tmp_fname, 'w')
f.write(str(graph.number_of_nodes()) + ' ' + str(graph.number_of_edges()) + '\n')
for (u, v) in edge_list_reindexed(graph):
f.write(str(u) + ' ' + str(v) + '\n')
f.close()
output = subprocess.check_output([
'./src/eval/orca/orca', 'node', '4', tmp_fname, 'std'])
output = output.decode('utf8').strip()
idx = output.find(COUNT_START_STR) + len(COUNT_START_STR)
output = output[idx:]
node_orbit_counts = np.array([list(map(int, node_cnts.strip().split(' ')))
for node_cnts in output.strip('\n').split('\n')])
try:
os.remove(tmp_fname)
except OSError:
pass
return node_orbit_counts
def motif_stats(graph_ref_list, graph_pred_list, motif_type='4cycle', ground_truth_match=None, bins=100):
# graph motif counts (int for each graph)
# normalized by graph size
total_counts_ref = []
total_counts_pred = []
num_matches_ref = []
num_matches_pred = []
graph_pred_list_remove_empty = [G for G in graph_pred_list if not G.number_of_nodes() == 0]
indices = motif_to_indices[motif_type]
for G in graph_ref_list:
orbit_counts = orca(G)
motif_counts = np.sum(orbit_counts[:, indices], axis=1)
if ground_truth_match is not None:
match_cnt = 0
for elem in motif_counts:
if elem == ground_truth_match:
match_cnt += 1
num_matches_ref.append(match_cnt / G.number_of_nodes())
#hist, _ = np.histogram(
# motif_counts, bins=bins, density=False)
motif_temp = np.sum(motif_counts) / G.number_of_nodes()
total_counts_ref.append(motif_temp)
for G in graph_pred_list_remove_empty:
orbit_counts = orca(G)
motif_counts = np.sum(orbit_counts[:, indices], axis=1)
if ground_truth_match is not None:
match_cnt = 0
for elem in motif_counts:
if elem == ground_truth_match:
match_cnt += 1
num_matches_pred.append(match_cnt / G.number_of_nodes())
motif_temp = np.sum(motif_counts) / G.number_of_nodes()
total_counts_pred.append(motif_temp)
mmd_dist = mmd.compute_mmd(total_counts_ref, total_counts_pred, kernel=mmd.gaussian,
is_hist=False)
return mmd_dist
def orbit_stats(graph_ref_list, graph_pred_list):
total_counts_ref = []
total_counts_pred = []
graph_pred_list_remove_empty = [G for G in graph_pred_list if not G.number_of_nodes() == 0]
for G in graph_ref_list:
orbit_counts = orca(G)
orbit_counts_graph = np.sum(orbit_counts, axis=0) / G.number_of_nodes()
total_counts_ref.append(orbit_counts_graph)
for G in graph_pred_list:
orbit_counts = orca(G)
orbit_counts_graph = np.sum(orbit_counts, axis=0) / G.number_of_nodes()
total_counts_pred.append(orbit_counts_graph)
total_counts_ref =
|
np.array(total_counts_ref)
|
numpy.array
|
import numpy as np
from astropy import table
from astropy.io import ascii
import matplotlib.pyplot as plt
import glob
import os
import sys
import scipy.stats as stats
sys.path.insert(1,'/home/idoi/Dropbox/superfit/')
from get_metadata import *
from matplotlib import rcParams
from superfit.error_routines import savitzky_golay
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
## for Palatino and other serif fonts use:
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Palatino"],
})
from tqdm import tqdm
path='/home/idoi/Dropbox/superfit/results_rcf/'
out_path='/home/idoi/Dropbox/superfit/results_2018_all_exact_z.txt'
sample_path='/home/idoi/Dropbox/superfit/2018_sample/all/'
file_list=glob.glob(path+'*.csv')
sample=ascii.read('/home/idoi/Dropbox/Objects/RCF/2018_test_metadata.ascii')
snid_sample=ascii.read('/home/idoi/Dropbox/Objects/RCF/ML_sample_snid_2018.csv')
# flag and apth to copy png files to folder according to type
copy_plots=0
path_fold='/home/idoi/Dropbox/superfit/analysis/'
#write results to txt file?
write_sample_to_txt=1
# Create dictionaries to assign classifications to SF/SNID output
SF_class_dic={'Ia-norm':'Ia'
,'IIb-flash':'IIb'
,'II-flash':'II'
,'SLSN-IIn':'IIn'
,'IIn':'IIn'
,'Ia-CSM-(ambigious)':'Ia-CSM/IIn'
,'SLSN-Ib':'SLSN-I'
,'SLSN-IIb':'IIb'
,'\"super chandra\"':'Ia'
,'Ia 91T-like':'Ia'
,'Ia 91bg-like':'Ia'
,'Ia-02cx like':'Ia'
,'Ia 99aa-like':'Ia'
,'Ia-pec':'Ia'
,'Ia-rapid':'Ia'
,'Ia 02es-like':'Ia'
,'Ca-Ia':'Ia'
,'Ic-pec':'Ic'}
SNID_class_dic={'II-norm':'II'
,'Ia-norm':'Ia'
,'Ia-csm':'Ia-CSM'
,'Ia-03fg':'Ia'
,'Ib-norm':'Ib'
,'Ic-norm':'Ic'
,'Ia-91T':'Ia'
,'Ia-91bg':'Ia'
,'Ia-02cx':'Ia'
,'Ia-SC':'Ia'
,'Ic-SLSN':'SLSN-I'}
# define classification columns to appear on barplot.
type_list=['Ia','Ib','Ic','Ic-BL','II','IIb','Ibn','IIn','Ia-CSM','SLSN-I']
exact_list=[True]*len(type_list)
key_list=['Ia - all','Ib','Ic','Ic-BL','II-norm','IIb','Ibn','IIn','Ia-CSM','SLSN-I']
# define classes to appear in conf. matrix
classes_conf=np.array(['Ia', 'Ib', 'Ib/c', 'Ic', 'Ic-BL','Ibn','II', 'IIb', 'IIn','Ia-CSM', 'SLSN-I', 'SLSN-II','Other'])
def compute_SNR(spec_path,type='mean',percentile=0.32):
spec=np.genfromtxt(spec_path)
error_spec=savitzky_golay(spec)[:,1]
SNR_spec=np.abs(spec[:,1]/error_spec/np.nanmean(spec[:,1]))
if type=='mean':
SNR = np.nanmean(SNR_spec)
if type=='med':
SNR = np.nanmedian(SNR_spec)
elif type=='min':
SNR = np.nanmin(SNR_spec)
elif type=='percentile':
SNR = np.percentile(SNR_spec,percentile)
elif type=='iqr':
SNR = np.median(SNR_spec)-stats.iqr(SNR_spec)
elif type == 'sigma':
SNR = np.nanmean(SNR_spec)-np.nanstd(SNR_spec)
return SNR
def get_SN(SN_name,table=sample):
cond=[SN_name in x for x in sample['ZTFname']]
return table[cond]
def get_accuracy(sample,SN_type, exact=True, quality_cut='None', col='SF_fit_1'):
if quality_cut=='None':
quality_cut=np.array([True]*len(sample))
if bool(exact)==True:
real_true=sample['classification']==SN_type
if col!='all':
class_true=sample[col]==SN_type
elif col=='all':
class_true=(sample['SF_fit_1']==SN_type)&(sample['c_snid']==SN_type)
else:
real_true=np.array([SN_type in x for x in sample['classification']])
if col!='all':
class_true=np.array([SN_type in x for x in sample[col]])
elif col=='all':
class_true=np.array([SN_type in x for x in sample['SF_fit_1']]) & np.array([SN_type in x for x in sample['c_snid']])
TP = ((class_true ) & (real_true)) & (quality_cut)
FP = ((class_true ) & (~real_true)) & (quality_cut)
FN = real_true & ~TP
TN = ~real_true & (~class_true|(class_true&~quality_cut))
P= TP|FN
N= FP|TN
TPR = np.sum(TP)/np.sum(P)
TNR = np.sum(TN)/np.sum(N)
FPR = np.sum(FP)/np.sum(N)
FNR = np.sum(FN)/
|
np.sum(P)
|
numpy.sum
|
import numpy as np
import h5py
import matplotlib.pyplot as plt
import os
import sys
sys.path.append('F:\Linux')
import illustris_python as il
def HistValAndBin(nums, bins, more=0, mask=0):
if mask == 1:
reMask = []
val = []
tmp = nums[nums < bins[1]]
if mask == 1:
reMask.append(nums < bins[1])
val.append(len(tmp))
for i in range(1,len(bins)-1):
tmp = nums[(nums > bins[i]) & (nums <= bins[i+1])]
val.append(len(tmp))
if mask == 1:
reMask.append((nums > bins[i]) & (nums <= bins[i+1]))
if more == 1:
tmp = nums[nums > bins[-1]]
val.append(len(tmp))
if mask == 1:
reMask.append(nums > bins[-1])
if mask == 0:
return np.array(val)
else:
return np.array(val), np.array(reMask)
def LoadMergHist(simu, subhaloID):
'''
return subhalo's main progenitor and merger history with snapshot
'''
if simu == 'TNG':
ldir = 'f:/Linux/localRUN/tng_DiskMerTree/%d.json' % subhaloID
else:
ldir = 'f:/Linux/localRUN/il1_DiskMerTree/%d.json' % subhaloID
with open(ldir) as f:
data = json.load(f)
Main = np.array(data['Main'])
return dict(zip(Main[:, 0], Main[:, 1])), np.array(data['Mergers'])
def ErrorBarMedian(data):
#return 25%, 50%, 75%
if len(data) == 0:
return 0, 0, 0
elif len(data) < 3:
return 0, np.median(data), 0
else:
data.sort()
return data[int(len(data) / 4)], np.median(data), data[int(len(data) * 0.75)]
#TNG data
barID = np.load('f:/Linux/localRUN/barredID_4WP_TNG.npy')
diskID = np.load('f:/Linux/localRUN/diskID_4WP.npy')
#Gas Fraction Gf
mas = il.func.loadSubhalos('TNG', 99, 'SubhaloMassInRadType')
Gf = mas[:, 0] / (mas[:, 4] + mas[:, 0])
Gf[np.isnan(Gf)] = 0
#Stellar Particles
SP = il.func.loadSubhalos('TNG', 99, 'SubhaloLenType')[:, 4]
#Stellar Mass
sMass = il.func.loadSubhalos('TNG', 99, 'SubhaloMassType')[:, 4] / 0.6774
sMass = np.log10(sMass * 10 ** 10)
sMass[np.isinf(sMass)] = 0
#Illsutris-1 data
il1_barID = np.load('f:/Linux/localRUN/barredID_il1.npy')
il1_diskID = np.load('f:/Linux/localRUN/diskID_il1.npy')
il1_mas = il.func.loadSubhalos('il1', 135, 'SubhaloMassInRadType')
#Gas Fraction
il1_gf = il1_mas[:, 0] / (il1_mas[:, 4] + il1_mas[:, 0])
il1_gf[np.isnan(il1_gf)] = 0
#Stellar Particles
il1_SP = il.func.loadSubhalos('il1', 135, 'SubhaloLenType')[:, 4]
#Stellar Mass
il1_sMass = il.func.loadSubhalos('il1', 135, 'SubhaloMassType')[:, 4] / 0.704
il1_sMass = np.log10(il1_sMass * 10 ** 10)
il1_sMass[np.isinf(il1_sMass)] = 0
def plot_fig_1():
#Fig : 'TNG-4WP_GasFraction.png'
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('GasFraction')
ax1.set_ylabel('N')
ax1.set_title('TNG Gas Fraction & Bar Fraction')
ax2 = ax1.twinx()
ax2.set_ylabel('Bar Fraction')
ax2.set_ylim(0, 0.8)
bins = np.linspace(0, 0.45, 9)
n1 = HistValAndBin(Gf[diskID], bins)
n2 = HistValAndBin(Gf[barID], bins)
ax1.bar(bins[:-1], n1, width=(bins[1] - bins[0])*0.9,align = 'edge', label='TNG disk galaxies')
ax1.bar(bins[:-1], n2, width=(bins[1] - bins[0])*0.9,align = 'edge', label='TNG barred galaxies')
ax2.plot(bins[:-1] + 0.025, n2 / n1, marker='o', color='r')
ax1.legend()
plt.savefig('f:/Linux/local_result/tng-4WP_GasFraction.png', dpi = 300)
def plot_fig_2():
#Fig : 'TNG_Illustris-1_GF_Z=0.png'
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('GasFraction')
ax1.set_ylabel('N')
ax1.set_title('TNG & Illustris-1 Gas Fraction & Bar Fraction')
ax1.set_ylim(0, 1000)
ax2 = ax1.twinx()
ax2.set_ylabel('Total Bar Fraction')
ax2.set_ylim(0, 1.1)
bins = np.linspace(0, 0.6, 12)
disk_tng_n = HistValAndBin(Gf[diskID], bins)
bar_tng_n = HistValAndBin(Gf[barID], bins)
disk_il1_n = HistValAndBin(il1_gf[il1_diskID], bins)
bar_il1_n = HistValAndBin(il1_gf[il1_barID], bins)
ax1.bar(bins[:-1], disk_tng_n, width=(bins[1] - bins[0]) * 0.35,align = 'edge', label='TNG disk galaxies')
ax1.bar(bins[:-1], bar_tng_n, width=(bins[1] - bins[0]) * 0.35, align='edge', label='TNG barred galaxies')
ax1.bar(bins[:-1] + 0.02, disk_il1_n, width=(bins[1] - bins[0]) * 0.35, align = 'edge', label='Illustris-1 disk galaxies', color='c')
ax1.bar(bins[:-1] + 0.02, bar_il1_n, width=(bins[1] - bins[0]) * 0.35, align='edge', label='Illustris-1 barred galaxies', color='r')
frac = bar_tng_n / disk_tng_n
frac[-3:] = 0
ax2.plot(bins[:-1] + 0.021, frac, marker='o', label='TNG', color='b')
ax2.plot(bins[:-1] + 0.021, bar_il1_n / disk_il1_n, marker='o', label='Illsutis-1', color='k')
ax1.legend()
ax2.legend(loc=2)
plt.savefig('f:/Linux/local_result/TNG_Illustris-1_GF_Z=0.png', dpi=300)
def plot_GasF_TNGAndil1():
#Fig : 'TNG_Illustris-1_GF_Z=0.png'
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('GasFraction')
ax1.set_ylabel('N')
ax1.set_title('TNG & Illustris-1 Gas Fraction & Bar Fraction at Z=2')
ax1.set_ylim(0, 350)
ax2 = ax1.twinx()
ax2.set_ylabel('Total Bar Fraction')
ax2.set_ylim(0, 0.9)
bins = np.linspace(0, 1, 15)
disk_tng_n = HistValAndBin(Gf[diskID], bins)
bar_tng_n = HistValAndBin(Gf[barID], bins)
disk_il1_n = HistValAndBin(il1_gf[il1_diskID], bins)
bar_il1_n = HistValAndBin(il1_gf[il1_barID], bins)
ax1.bar(bins[:-1], disk_tng_n, width=(bins[1] - bins[0]) * 0.35,align = 'edge', label='TNG disk galaxies')
ax1.bar(bins[:-1], bar_tng_n, width=(bins[1] - bins[0]) * 0.35, align='edge', label='TNG barred galaxies', color='c')
ax1.bar(bins[:-1] + 0.02, disk_il1_n, width=(bins[1] - bins[0]) * 0.35, align = 'edge', label='Illustris-1 disk galaxies')
ax1.bar(bins[:-1] + 0.02, bar_il1_n, width=(bins[1] - bins[0]) * 0.35, align='edge', label='Illustris-1 barred galaxies', color='r')
frac = bar_tng_n / disk_tng_n
frac[-3:] = 0
ax2.plot(bins[:-1] + 0.021, frac, marker='o', label='TNG bar fraction', color='b')
ax2.plot(bins[:-1] + 0.021, bar_il1_n / disk_il1_n, marker='o', label='Illustris-1 bar fraction', color='k')
ax1.legend()
ax2.legend(loc=2)
plt.savefig('f:/Linux/local_result/TNG_Illustris-1_GF_Z=0.png', dpi=300)
'''
#Galaxies with more than 100k stellar particles
diskID_10WP = []
barID_10WP = []
for haloID in diskID:
if SP[haloID] >= 100000:
diskID_10WP.append(haloID)
if haloID in barID:
barID_10WP.append(haloID)
il1_diskID_10WP = []
il1_barID_10WP = []
for haloID in il1_diskID:
if il1_SP[haloID] >= 100000:
il1_diskID_10WP.append(haloID)
if haloID in il1_barID:
il1_barID_10WP.append(haloID)
diskID_10WP = np.array(diskID_10WP)
barID_10WP = np.array(barID_10WP)
il1_diskID_10WP = np.array(il1_diskID_10WP)
il1_barID_10WP = np.array(il1_barID_10WP)
'''
def plot_sMass():
#Campare TNG and il1 galaxies distribution in bins
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('Stellar Mass')
ax1.set_ylabel('N')
ax1.set_title('Stellar Mass distribution between TNG and Illustirs-1')
ax1.set_xlim(10.4, 12)
ax1.set_ylim(0, 550)
ax2 = ax1.twinx()
ax2.set_ylabel('Times')
ax2.set_ylim(0, 2.5)
y = np.ones(100)
x = np.linspace(10, 15, 100)
ax2.plot(x, y, c='b', linestyle='-.')
bins = np.linspace(10.2, 11.6, 20)
n1 = HistValAndBin(sMass[diskID], bins, more=1)
n2 = HistValAndBin(il1_sMass[il1_diskID], bins, more=1)
ax1.bar(bins, n1+n2, width=(bins[1] - bins[0])*0.9,align = 'edge', label='TNG disk galaxies')
ax1.bar(bins, n2, width=(bins[1] - bins[0])*0.9,align = 'edge', label='il1 disk galaxies')
ax2.plot(bins + (bins[1]-bins[0])/2, n1 / n2, marker='o', color='r', label='Number TNG / Il-1')
ax1.legend()
ax2.legend(loc=2)
plt.savefig('f:/Linux/local_result/TNG_Illustris-1_MassDistribution.png', dpi = 300)
def Plot_TNG_sMass_BarFraction():
#
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('Stellar Mass')
ax1.set_ylabel('N')
ax1.set_title('TNG Bar Fraction with Stellar Mass bin')
ax1.set_xlim(10.4, 12)
ax1.set_ylim(0, 400)
ax2 = ax1.twinx()
ax2.set_ylabel('Fraction')
ax2.set_ylim(0, 1)
bins =
|
np.linspace(10.2, 11.7, 20)
|
numpy.linspace
|
"""
Some heuristic method to clean after clustering:
* auto-split
* auto-merge
"""
import numpy as np
import os
import time
import sklearn
import sklearn.cluster
import sklearn.mixture
import sklearn.metrics
import sklearn.decomposition
from joblib import Parallel, delayed
import joblib
from .dip import diptest
from .waveformtools import compute_shared_channel_mask, equal_template_with_distrib_overlap, equal_template_with_distance
import hdbscan
debug_plot = False
#~ debug_plot = True
def _get_sparse_waveforms_flatten(cc, dense_mode, label, channel_adjacency):
peak_index, = np.nonzero(cc.all_peaks['cluster_label'] == label)
if dense_mode:
waveforms = cc.get_some_waveforms(peak_index, channel_indexes=None)
extremum_channel = 0
centroid = np.median(waveforms, axis=0)
else:
waveforms = cc.get_some_waveforms(peak_index, channel_indexes=None)
centroid = np.median(waveforms, axis=0)
peak_sign = cc.info['peak_detector']['peak_sign']
n_left = cc.info['extract_waveforms']['n_left']
if peak_sign == '-':
extremum_channel = np.argmin(centroid[-n_left,:], axis=0)
elif peak_sign == '+':
extremum_channel = np.argmax(centroid[-n_left,:], axis=0)
# TODO by sparsity level threhold and not radius
adjacency = channel_adjacency[extremum_channel]
waveforms = waveforms.take(adjacency, axis=2)
wf_flat = waveforms.swapaxes(1,2).reshape(waveforms.shape[0], -1)
return waveforms, wf_flat, peak_index
def _compute_one_dip_test(cc, dirname, chan_grp, label, n_components_local_pca, adjacency_radius_um):
# compute dip test to try to over split
from .dataio import DataIO
from .catalogueconstructor import CatalogueConstructor
if cc is None:
# reload because parralel jobs
dataio = DataIO(dirname)
cc = CatalogueConstructor(dataio=dataio, chan_grp=chan_grp)
peak_sign = cc.info['peak_detector']['peak_sign']
dense_mode = cc.info['mode'] == 'dense'
n_left = cc.info['extract_waveforms']['n_left']
n_right = cc.info['extract_waveforms']['n_right']
peak_width = n_right - n_left
nb_channel = cc.nb_channel
if dense_mode:
channel_adjacency = {c: np.arange(nb_channel) for c in range(nb_channel)}
else:
channel_adjacency = {}
for c in range(nb_channel):
nearest, = np.nonzero(cc.channel_distances[c, :] < adjacency_radius_um)
channel_adjacency[c] = nearest
#~ waveforms, wf_flat, peak_index = _get_sparse_waveforms_flatten(cc, dense_mode, label, channel_adjacency)
waveforms = cc.get_cached_waveforms(label)
centroid = cc.get_one_centroid(label)
#~ print('label', label, waveforms.shape, centroid.shape)
if not dense_mode:
# TODO by sparsity level threhold and not radius
if peak_sign == '-':
extremum_channel = np.argmin(centroid[-n_left,:], axis=0)
elif peak_sign == '+':
extremum_channel = np.argmax(centroid[-n_left,:], axis=0)
adjacency = channel_adjacency[extremum_channel]
waveforms = waveforms.take(adjacency, axis=2)
wf_flat = waveforms.swapaxes(1,2).reshape(waveforms.shape[0], -1)
#~ pca = sklearn.decomposition.IncrementalPCA(n_components=n_components_local_pca, whiten=True)
n_components = min(wf_flat.shape[1]-1, n_components_local_pca)
pca = sklearn.decomposition.TruncatedSVD(n_components=n_components)
try:
feats = pca.fit_transform(wf_flat)
except ValueError:
print('Erreur in diptest TruncatedSVD for label {}'.format(label))
return None
pval = diptest(np.sort(feats[:, 0]), numt=200)
return pval
def auto_split(catalogueconstructor,
n_spike_for_centroid=None,
adjacency_radius_um = 30,
n_components_local_pca=3,
pval_thresh=0.1,
min_cluster_size=20,
maximum_shift=2,
n_jobs=-1,
#~ n_jobs=1,
joblib_backend='loky',
):
cc = catalogueconstructor
assert cc.some_waveforms is not None, 'run cc.cache_some_waveforms() first'
peak_sign = cc.info['peak_detector']['peak_sign']
dense_mode = cc.info['mode'] == 'dense'
n_left = cc.info['extract_waveforms']['n_left']
n_right = cc.info['extract_waveforms']['n_right']
peak_width = n_right - n_left
nb_channel = cc.nb_channel
if dense_mode:
channel_adjacency = {c: np.arange(nb_channel) for c in range(nb_channel)}
else:
channel_adjacency = {}
for c in range(nb_channel):
nearest, = np.nonzero(cc.channel_distances[c, :] < adjacency_radius_um)
channel_adjacency[c] = nearest
if len(cc.positive_cluster_labels) ==0:
return
m =
|
np.max(cc.positive_cluster_labels)
|
numpy.max
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.ticker as ticker
import numpy as np
from numpy.fft import fftn, fftshift
matplotlib.use("Qt5Agg")
helptext = """
calculation of the diffraction pattern using FFTs with both conventions and kinematic
sum, to show the relationship between the phase and the displacement.
The object is a Ge-core / Si-shell nanowire.
"""
savedir = "C:/Users/carnis/Work Folders/Documents/data/CH4760_Pt/S2227/simu/Figures/phasing_kin_FFT/new/"
colorbar_range = [-7, 4] # [0, 9.5] # [vmin, vmax] log scale in photon counts
comment = "_GeSi_NW_scale" + str(colorbar_range) # should start with _
tick_spacing = 25 # for plots in real space, in nm
tick_length = 5 # in plots
tick_width = 2 # in plots
save_colorbar = 1 # to save the colorbar
phase_range = np.pi / 30 # in radians, for plots
# parameters for plotting
params = {
"backend": "ps",
"axes.labelsize": 20,
"text.fontsize": 20,
"legend.fontsize": 20,
"title.fontsize": 20,
"xtick.labelsize": 20,
"ytick.labelsize": 20,
"text.usetex": False,
"figure.figsize": (11, 9),
}
# define a colormap
cdict = {
"red": (
(0.0, 1.0, 1.0),
(0.11, 0.0, 0.0),
(0.36, 0.0, 0.0),
(0.62, 1.0, 1.0),
(0.87, 1.0, 1.0),
(1.0, 0.0, 0.0),
),
"green": (
(0.0, 1.0, 1.0),
(0.11, 0.0, 0.0),
(0.36, 1.0, 1.0),
(0.62, 1.0, 1.0),
(0.87, 0.0, 0.0),
(1.0, 0.0, 0.0),
),
"blue": (
(0.0, 1.0, 1.0),
(0.11, 1.0, 1.0),
(0.36, 1.0, 1.0),
(0.62, 0.0, 0.0),
(0.87, 0.0, 0.0),
(1.0, 0.0, 0.0),
),
}
my_cmap = matplotlib.colors.LinearSegmentedColormap("my_colormap", cdict, 256)
plt.ion()
##################
# Create the shape of the object
##################
half_window = 256 # half number of pixels in x (horizontal axis) and y (vertical axis)
aSi = 0.54309 # lattice spacing of Si in nm
aGe = 0.5658 # lattice spacing of Ge in nm
d400_Ge = aGe / 4 # the diffraction is calculated at Ge 400 peak
misfit = (aSi - aGe) / aGe # dimensionless
Zc = 32 # atomic number of Germanium core
Zs = 14 # atomic number of Silicon shell
voxel_size = aGe # in nm
radius_core = 20 * voxel_size
radius_NW = 40 * voxel_size
alpha = np.arccos(1 / np.sqrt(3))
tmp = np.mgrid[-half_window:half_window, -half_window:half_window]
ygrid, xgrid = tmp[0] * voxel_size, tmp[1] * voxel_size
area_nanowire = np.where(
(ygrid < radius_NW)
& (ygrid > -radius_NW)
& (ygrid < -np.tan(alpha - 10 * np.pi / 180) * (-xgrid - radius_NW))
& (ygrid > -np.tan(alpha) * (-xgrid + radius_NW))
& (ygrid > np.tan(alpha - 0 * np.pi / 180) * (xgrid - radius_NW)) #
& (ygrid < -np.tan(alpha + 30 * np.pi / 180) * (xgrid - radius_NW))
& (ygrid < np.tan(alpha) * (xgrid + radius_NW))
& (ygrid > -np.tan(alpha) * (xgrid + radius_NW)),
1,
0,
)
area_core = np.where(
(ygrid < radius_core)
& (ygrid > -radius_core)
& (ygrid < -np.tan(alpha - 10 * np.pi / 180) * (-xgrid - radius_core))
& (ygrid > -np.tan(alpha) * (-xgrid + radius_core))
& (ygrid > np.tan(alpha - 0 * np.pi / 180) * (xgrid - radius_core))
& (ygrid < -np.tan(alpha + 30 * np.pi / 180) * (xgrid - radius_core))
& (ygrid < np.tan(alpha) * (xgrid + radius_core))
& (ygrid > -np.tan(alpha) * (xgrid + radius_core)),
1,
0,
)
nanowire = area_core * abs(Zc) + (area_nanowire - area_core) * abs(Zs)
np.savez_compressed(savedir + "GeSi_NW_support.npz", obj=nanowire)
pixel_spacing = tick_spacing / voxel_size
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
nanowire[
half_window - 100 : half_window + 100, half_window - 100 : half_window + 100
],
cmap=my_cmap,
vmin=0,
vmax=35,
)
ax0.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.tick_params(
labelbottom="off",
labelleft="off",
direction="in",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "density.png", bbox_inches="tight")
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("X")
plt.ylabel("Y")
ax0.tick_params(
labelbottom="on",
labelleft="on",
labelsize=12,
direction="in",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "density_colorbar.png", bbox_inches="tight")
##################
# displacement
##################
nu = 0.27
theta = np.arctan2(xgrid, ygrid)
r = np.sqrt(ygrid * ygrid + xgrid * xgrid) # in nm
alpha = (
-radius_core
* radius_core
* misfit
* (1 + nu)
/ (2 * radius_NW * radius_NW * (1 - nu))
) # dimensionless
beta = alpha * radius_NW * radius_NW # nm2
epsilonR = alpha - beta / (r * r) # dimensionless
epsilonT = alpha + beta / (r * r) # dimensionless
epsilonXX = misfit + epsilonR * np.cos(theta) ** 2 + epsilonT * np.sin(theta) ** 2
epsilonXX_Si = epsilonR * np.cos(theta) ** 2 + epsilonT * np.sin(theta) ** 2
epsilon_xx = np.zeros((2 * half_window, 2 * half_window))
# calculation based on the calculation of elastic strain in radial and
# transverse direction for a core-shell SiGe NW
# reference:
displacement = ((area_core - area_nanowire) * epsilonXX_Si + area_core * epsilon_xx) * 2
# plt.figure()
# plt.imshow(disp)
displacement[np.isnan(displacement)] = 0 # for central pixel which is not defined
ux = np.copy(displacement)
displacement[nanowire == 0] = np.nan # for plots
# no displacement along y
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
displacement[
half_window - 100 : half_window + 100, half_window - 100 : half_window + 100
],
cmap=my_cmap,
vmin=-phase_range,
vmax=phase_range,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "ux.png", bbox_inches="tight")
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("X")
plt.ylabel("Y")
plt.pause(0.5)
plt.savefig(savedir + "ux_colorbar.png", bbox_inches="tight")
##################
# diffraction on Ge 400 peak
##################
q400_Ge = 2 * np.pi / d400_Ge # inverse nm
avg_q = np.matrix([q400_Ge, 0])
dq = 2 * np.pi / (2 * half_window * aGe) # inverse nm
qx = q400_Ge + np.arange(-dq * half_window, dq * half_window, dq)
qy = np.arange(-dq * half_window, dq * half_window, dq)
########################
# FFT with displacement field and symmetric
# normalization for comparison with mathematica
########################
complex_object = nanowire * np.exp(1j * (ux * avg_q[0, 0] + 0))
np.save(savedir + "GeSi_NW_complex_object.npy", complex_object)
print("Min(abs(object)", abs(complex_object).min())
print("Max(abs(object)", abs(complex_object).max())
amplitude = fftshift(fftn(nanowire * np.exp(1j * (ux * avg_q[0, 0] + 0)), norm="ortho"))
print("Min(abs(amplitude)", abs(amplitude).min()) # should be same as mathematica
print("Max(abs(amplitude)", abs(amplitude).max()) # should be same as mathematica
intensity = abs(amplitude) ** 2
print(
"Min(log10(intensity)", np.log10(intensity).min()
) # should be same as mathematica
print(
"Max(log10(intensity)", np.log10(intensity).max()
) # should be same as mathematica
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap="jet",
vmin=-7,
vmax=4,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_positive" + comment + "_ortho_jet.png", bbox_inches="tight")
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("Qx")
plt.ylabel("Qy")
ax0.tick_params(
labelbottom="on",
labelleft="on",
labelsize=12,
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(
savedir + "FFT_positive" + comment + "_ortho_colorbar_jet.png",
bbox_inches="tight",
)
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap="jet",
vmin=-7,
vmax=4,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(
savedir + "FFT_positive" + comment + "_ortho_zoom_jet.png", bbox_inches="tight"
)
########################
# FFT with displacement field of opposite sign and
# symmetric normalization for comparison with mathematica
########################
amplitude = fftshift(
fftn(nanowire * np.exp(1j * (-ux * avg_q[0, 0] + 0)), norm="ortho")
)
print("Min(abs(amplitude)", abs(amplitude).min()) # should be same as mathematica
print("Max(abs(amplitude)", abs(amplitude).max()) # should be same as mathematica
intensity = abs(amplitude) ** 2
print(
"Min(log10(intensity)", np.log10(intensity).min()
) # should be same as mathematica
print(
"Max(log10(intensity)", np.log10(intensity).max()
) # should be same as mathematica
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap="jet",
vmin=-7,
vmax=4,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_negative" + comment + "_ortho_jet.png", bbox_inches="tight")
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap="jet",
vmin=-7,
vmax=4,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(
savedir + "FFT_negative" + comment + "_ortho_zoom_jet.png", bbox_inches="tight"
)
########################
# FFT with displacement field and default normalization
########################
intensity = abs(fftshift(fftn(nanowire * np.exp(1j * (ux * avg_q[0, 0] + 0))))) ** 2
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
ax0.tick_params(
labelbottom="off",
labelleft="off",
top="on",
right="on",
labelsize=12,
direction="out",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_positive" + comment + ".png", bbox_inches="tight")
np.savez_compressed(savedir + "GeSi_NW_FFT_positive.npz", obj=intensity)
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("Qx")
plt.ylabel("Qy")
ax0.tick_params(
labelbottom="on",
labelleft="on",
labelsize=12,
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(
savedir + "FFT_positive" + comment + "_colorbar.png", bbox_inches="tight"
)
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_positive" + comment + "_zoom.png", bbox_inches="tight")
########################
# FFT with displacement field of opposite sign and default normalization
########################
intensity = abs(fftshift(fftn(nanowire * np.exp(1j * (-ux * avg_q[0, 0] + 0))))) ** 2
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
ax0.tick_params(
labelbottom="off",
labelleft="off",
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_negative" + comment + ".png", bbox_inches="tight")
np.savez_compressed(savedir + "GeSi_NW_FFT_negative.npz", obj=intensity)
fig, x0 = plt.subplots(1, 1)
plt0 = x0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
x0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_negative" + comment + "_zoom.png", bbox_inches="tight")
#######################
# kinematic sums
#######################
nanowire_zoom = nanowire[
half_window - 50 : half_window + 50, half_window - 50 : half_window + 50
]
plt.figure()
plt.imshow(nanowire_zoom)
qx = q400_Ge + np.arange(-dq * half_window, dq * half_window, dq)
qy = np.arange(-dq * half_window, dq * half_window, dq)
grid_x = xgrid + ux
grid_y = ygrid
grid_x = grid_x[
half_window - 50 : half_window + 50, half_window - 50 : half_window + 50
]
grid_y = grid_y[
half_window - 50 : half_window + 50, half_window - 50 : half_window + 50
]
qx1 = np.repeat(qx[np.newaxis, :], len(qy), axis=0)
qy1 = np.repeat(qy[:, np.newaxis], len(qx), axis=1)
##############################
# calculate the centered kinematic sum +1j +ux
##############################
Fhk1 = np.zeros((len(qy), len(qx))).astype(np.complex64)
for ii in range(len(qy)):
for jj in range(len(qx)):
Fhk1[ii, jj] = (
Fhk1[ii, jj]
+ (
nanowire_zoom
*
|
np.exp(+1j * (qx1[ii, jj] * grid_x + qy1[ii, jj] * grid_y))
|
numpy.exp
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for environments.gym_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing.absltest import mock
import gym
import gym.spaces
import numpy as np
from tf_agents.environments import gym_wrapper
from tf_agents.utils import test_utils
class GymWrapperSpecTest(test_utils.TestCase):
def test_spec_from_gym_space_discrete(self):
discrete_space = gym.spaces.Discrete(3)
spec = gym_wrapper._spec_from_gym_space(discrete_space)
self.assertEqual((), spec.shape)
self.assertEqual(np.int64, spec.dtype)
self.assertEqual(0, spec.minimum)
self.assertEqual(2, spec.maximum)
def test_spec_from_gym_space_multi_discrete(self):
multi_discrete_space = gym.spaces.MultiDiscrete([1, 2, 3, 4])
spec = gym_wrapper._spec_from_gym_space(multi_discrete_space)
self.assertEqual((4,), spec.shape)
self.assertEqual(np.int32, spec.dtype)
np.testing.assert_array_equal(np.array([0], dtype=np.int), spec.minimum)
np.testing.assert_array_equal(
np.array([0, 1, 2, 3], dtype=np.int), spec.maximum)
def test_spec_from_gym_space_multi_binary(self):
multi_binary_space = gym.spaces.MultiBinary(4)
spec = gym_wrapper._spec_from_gym_space(multi_binary_space)
self.assertEqual((4,), spec.shape)
self.assertEqual(np.int8, spec.dtype)
np.testing.assert_array_equal(np.array([0], dtype=np.int), spec.minimum)
np.testing.assert_array_equal(np.array([1], dtype=np.int), spec.maximum)
def test_spec_from_gym_space_box_scalars(self):
for dtype in (np.float32, np.float64):
box_space = gym.spaces.Box(-1.0, 1.0, (3, 4), dtype=dtype)
spec = gym_wrapper._spec_from_gym_space(box_space)
self.assertEqual((3, 4), spec.shape)
self.assertEqual(dtype, spec.dtype)
np.testing.assert_array_equal(-np.ones((3, 4)), spec.minimum)
np.testing.assert_array_equal(np.ones((3, 4)), spec.maximum)
def test_spec_from_gym_space_box_scalars_simplify_bounds(self):
box_space = gym.spaces.Box(-1.0, 1.0, (3, 4))
spec = gym_wrapper._spec_from_gym_space(box_space, simplify_box_bounds=True)
self.assertEqual((3, 4), spec.shape)
self.assertEqual(np.float32, spec.dtype)
np.testing.assert_array_equal(
|
np.array([-1], dtype=np.int)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
- <NAME> <<EMAIL>>
- <NAME> <<EMAIL>>
DESCRIPTION
usage: cwt_analysis_synthesis.py [-h] [-v] [-M MODE] [-m MEAN_F0] [-o OUTPUT]
[-P]
input_file
Tool for CWT analysis/synthesis of the F0
positional arguments:
input_file Input signal or F0 file
optional arguments:
-h, --help show this help message and exit
-v, --verbosity increase output verbosity
-M MODE, --mode MODE script mode: 0=analysis, 1=synthesis, 2=analysis/synthesis
-m MEAN_F0, --mean_f0 MEAN_F0
Mean f0 needed for synthesis (unsed for analysis modes)
-o OUTPUT, --output OUTPUT
output directory for analysis or filename for synthesis.
(Default: input_file directory [Analysis] or <input_file>.f0 [Synthesis])
-P, --plot Plot the results
LICENSE
See https://github.com/asuni/wavelet_prosody_toolkit/blob/master/LICENSE.txt
"""
import sys
import os
import traceback
import argparse
import time
import logging
import yaml
# Collections
from collections import defaultdict
import warnings
# Plotting
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# Wavelet import
from wavelet_prosody_toolkit.prosody_tools import misc
from wavelet_prosody_toolkit.prosody_tools import cwt_utils
from wavelet_prosody_toolkit.prosody_tools import f0_processing
import numpy as np
# List of logging levels used to setup everything using verbose option
LEVEL = [logging.WARNING, logging.INFO, logging.DEBUG]
# FIXME: be more specific!
warnings.simplefilter("ignore", np.ComplexWarning) # Plotting can't deal with complex, but we don't care
###############################################################################
# Functions
###############################################################################
def apply_configuration(current_configuration, updating_part):
"""Utils to update the current configuration using the updating part
Parameters
----------
current_configuration: dict
The current state of the configuration
updating_part: dict
The information to add to the current configuration
Returns
-------
dict
the updated configuration
"""
if not isinstance(current_configuration, dict):
return updating_part
if current_configuration is None:
return updating_part
if updating_part is None:
return current_configuration
for k in updating_part:
if k not in current_configuration:
current_configuration[k] = updating_part[k]
else:
current_configuration[k] = apply_configuration(current_configuration[k], updating_part[k])
return current_configuration
def load_f0(input_file, binary_mode=False, configuration=None):
"""Load the f0 from a text file or extract it from a wav file
Parameters
----------
input_file: string
The input file name.
Returns
-------
1D arraylike
the raw f0 values
"""
if input_file.lower().endswith(".csv"):
if binary_mode:
raise Exception("cannot have a csv file in binary mode")
else:
raw_f0 = np.loadtxt(input_file)
if input_file.lower().endswith(".f0"):
if binary_mode:
raw_f0 = np.fromfile(input_file, dtype=np.float32)
else:
raw_f0 = np.loadtxt(input_file)
elif input_file.lower().endswith(".lf0"):
if binary_mode:
raw_f0 = np.fromfile(input_file, dtype=np.float32)
else:
raw_f0 = np.loadtxt(input_file)
raw_f0 = np.exp(raw_f0)
elif input_file.lower().endswith(".wav"):
logging.info("Extracting the F0 from the signal")
(fs, wav_form) = misc.read_wav(input_file)
raw_f0 = f0_processing.extract_f0(wav_form, fs,
configuration["f0"]["min_f0"],
configuration["f0"]["max_f0"])
return raw_f0
###############################################################################
# Main function
###############################################################################
def run():
"""Main entry function
This function contains the code needed to achieve the analysis and/or the synthesis
"""
global args
warnings.simplefilter("ignore", FutureWarning) # Plotting can't deal with complex, but we don't care
# Loading default configuration
configuration = defaultdict()
with open(os.path.dirname(os.path.realpath(__file__)) + "/configs/default.yaml", 'r') as f:
configuration = apply_configuration(configuration, defaultdict(lambda: False, yaml.load(f)))
logging.debug("default configuration")
logging.debug(configuration)
# Loading dedicated analysis.synthesis configuration
with open(os.path.dirname(os.path.realpath(__file__)) + "/configs/synthesis.yaml", 'r') as f:
configuration = apply_configuration(configuration, defaultdict(lambda: False, yaml.load(f)))
logging.debug("configuration filled with synthesis part")
logging.debug(configuration)
# Loading user configuration
if args.configuration_file:
try:
with open(args.configuration_file, 'r') as f:
configuration = apply_configuration(configuration, defaultdict(lambda: False, yaml.load(f)))
logging.debug("configuration filled with user part")
logging.debug(configuration)
except IOError as ex:
logging.error("configuration file " + args.config + " could not be loaded:")
logging.error(ex.msg)
sys.exit(1)
# Analysis Mode
if args.mode == 0:
raw_f0 = load_f0(args.input_file, args.binary_mode, configuration)
logging.info("Processing f0")
f0 = f0_processing.process(raw_f0)
# FIXME: reintegrated
if args.plot:
plt.title("F0 preprocessing and interpolation")
plt.plot(f0, color="red", alpha=0.5, linewidth=3)
plt.plot(raw_f0, color="gray", alpha=0.5)
plt.show()
# # FIXME: read this?
# logging.info("writing interpolated lf0\t" + output_file + ".interp")
# np.savetxt(output_file + ".interp", f0.astype('float'),
# fmt="%f", delimiter="\n")
# Perform continuous wavelet transform of mean-substracted f0 with 12 scales, one octave apart
logging.info("Starting analysis with (num_scale=%d, scale_distance=%f, mother_name=%s)" %
(configuration["wavelet"]["num_scales"], configuration["wavelet"]["scale_distance"], configuration["wavelet"]["mother_wavelet"]))
full_scales, widths, _ = cwt_utils.cwt_analysis(f0 - np.mean(f0),
mother_name=configuration["wavelet"]["mother_wavelet"],
period=configuration["wavelet"]["period"],
num_scales=configuration["wavelet"]["num_scales"],
scale_distance=configuration["wavelet"]["scale_distance"],
apply_coi=False)
full_scales = np.real(full_scales)
# SSW parameterization, adjacent scales combined (with extra scales to handle long utterances)
scales = cwt_utils.combine_scales(np.real(full_scales), configuration["wavelet"]["combined_scales"])
for i in range(0, len(scales)):
logging.debug("Mean scale[%d]: %s" % (i, str(np.mean(scales[i]))))
# Saving matrix
logging.info("writing wavelet matrix in \"%s\"" % args.output_file)
if args.binary_mode:
with open(args.output_file, "wb") as f_out:
scales.T.astype(np.float32).tofile(f_out)
else:
np.savetxt(args.output_file, scales.T.astype('float'), fmt="%f", delimiter=",")
# Synthesis mode
if args.mode == 1:
if args.binary_mode:
scales = np.fromfile(args.input_file, dtype=np.float32)
scales = scales.reshape(-1, len(configuration["wavelet"]["combined_scales"])).T
else:
scales = np.loadtxt(args.input_file, delimiter=",").T # FIXME: hardcoded
rec = cwt_utils.cwt_synthesis(scales, args.mean_f0)
logging.info("Save reconstructed f0 in %s" % args.output_file)
if args.binary_mode:
with open(args.output_file, "wb") as f_out:
rec.astype(np.float32).tofile(f_out)
else:
np.savetxt(args.output_file, rec, fmt="%f")
# Debugging /plotting part
if args.plot:
nb_sub = 2
if args.mode == 0:
nb_sub = 3
ax = plt.subplot(nb_sub, 1, 1)
# pylab.title("CWT decomposition to % scales and reconstructed signal" % len(configuration["wavelet"]["combined_scales"]))
if args.mode == 0:
plt.plot(f0, linewidth=1, color="red")
rec = cwt_utils.cwt_synthesis(scales, np.mean(f0))
plt.plot(rec, color="blue", alpha=0.3)
plt.subplot(nb_sub, 1, 2, sharex=ax)
for i in range(0, len(scales)):
plt.plot(scales[i] + max(rec)*1.5 + i*75,
color="blue", alpha=0.5)
#plt.plot(scales[len(scales)-i-1] + max(rec)*1.5 + i*75,
if args.mode == 0:
plt.subplot(nb_sub, 1, 3, sharex=ax)
plt.contourf(np.real(full_scales), 100,
norm=colors.SymLogNorm(linthresh=0.2, linscale=0.05,
vmin=
|
np.min(full_scales)
|
numpy.min
|
from typing import Tuple
import scipy.spatial
import numpy as np
def compute_distance_matrix(embeddings: np.array) -> np.array:
condensed = scipy.spatial.distance.pdist(embeddings, "euclidean")
matrix = scipy.spatial.distance.squareform(condensed)
return matrix
def compute_knn_confusions(distance_matrix: np.array, domains: np.array, num_neighbors: int) -> Tuple[np.array, Tuple[np.array, np.array]]:
knn_distances, knn_indices = compute_knn(distance_matrix, num_neighbors)
knn_domains = np.take(domains, knn_indices)
confusions, confusions_if_scan, confusions_if_cad = compute_domain_confusion(knn_domains, domains)
return confusions, (confusions_if_scan, confusions_if_cad)
def compute_knn(distance_matrix: np.array, k: int = 100) -> Tuple[np.array, np.array]:
k += 1 # k nearest neighbors + the element itself
k_i = distance_matrix.argpartition(k, axis=0)
k_d = np.take_along_axis(distance_matrix, k_i, axis=0)
sorted_indices = k_d.argsort(axis=0)
k_i_sorted = np.take_along_axis(k_i, sorted_indices, axis=0)[1:k]
k_d_sorted =
|
np.take_along_axis(distance_matrix, k_i_sorted, axis=0)
|
numpy.take_along_axis
|
# Copyright (c) 2018, NVIDIA CORPORATION.
from __future__ import division, print_function
import functools
import pickle
import numpy as np
import pandas as pd
import pyarrow as pa
import nvstrings
import rmm
import cudf
from cudf.core.buffer import Buffer
from cudf.core.column import (
CategoricalColumn,
ColumnBase,
DatetimeColumn,
NumericalColumn,
StringColumn,
column,
)
from cudf.core.frame import Frame
from cudf.utils import cudautils, ioutils, utils
from cudf.utils.docutils import copy_docstring
from cudf.utils.dtypes import is_categorical_dtype, is_scalar, min_signed_type
from cudf.utils.utils import cached_property
def _to_frame(this_index, index=True, name=None):
"""Create a DataFrame with a column containing this Index
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index
name : str, default None
Name to be used for the column
Returns
-------
DataFrame
cudf DataFrame
"""
from cudf import DataFrame
if name is not None:
col_name = name
elif this_index.name is None:
col_name = 0
else:
col_name = this_index.name
return DataFrame(
{col_name: this_index._values}, index=this_index if index else None
)
class Index(Frame):
"""The root interface for all Series indexes.
"""
def serialize(self):
"""Serialize into pickle format suitable for file storage or network
transmission.
"""
header = {}
header["index_column"] = {}
# store metadata values of index separately
# Indexes: Numerical/DateTime/String are often GPU backed
header["index_column"], frames = self._values.serialize()
header["name"] = pickle.dumps(self.name)
header["dtype"] = pickle.dumps(self.dtype)
header["type"] = pickle.dumps(type(self))
header["frame_count"] = len(frames)
return header, frames
def __contains__(self, item):
return item in self._values
@classmethod
def deserialize(cls, header, frames):
"""
"""
h = header["index_column"]
idx_typ = pickle.loads(header["type"])
name = pickle.loads(header["name"])
col_typ = pickle.loads(h["type"])
index = col_typ.deserialize(h, frames[: header["frame_count"]])
return idx_typ(index, name=name)
@property
def name(self):
return next(iter(self._data.keys()))
@name.setter
def name(self, value):
col = self._data.pop(self.name)
self._data[value] = col
def dropna(self):
"""
Return a Series with null values removed.
"""
return super().dropna(subset=[self.name])
def take(self, indices):
"""Gather only the specific subset of indices
Parameters
---
indices: An array-like that maps to values contained in this Index.
"""
return self[indices]
def argsort(self, ascending=True):
indices = self._values.argsort(ascending=ascending)
indices.name = self.name
return indices
@property
def values(self):
return np.asarray([i for i in self._values])
def to_pandas(self):
return pd.Index(self._values.to_pandas(), name=self.name)
def to_arrow(self):
return self._values.to_arrow()
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
import cudf.io.dlpack as dlpack
return dlpack.to_dlpack(self)
@property
def gpu_values(self):
return self._values.data_array_view
def min(self):
return self._values.min()
def max(self):
return self._values.max()
def sum(self):
return self._values.sum()
def find_segments(self):
"""Return the beginning index for segments
Returns
-------
result : NumericalColumn
"""
segments, _ = self._find_segments()
return segments
def _find_segments(self):
seg, markers = cudautils.find_segments(self.gpu_values)
return (
column.build_column(data=Buffer(seg), dtype=seg.dtype),
markers,
)
@classmethod
def _concat(cls, objs):
data = ColumnBase._concat([o._values for o in objs])
names = {obj.name for obj in objs}
if len(names) == 1:
[name] = names
else:
name = None
result = as_index(data)
result.name = name
return result
def _apply_op(self, fn, other=None):
from cudf.core.series import Series
idx_series = Series(self, name=self.name)
op = getattr(idx_series, fn)
if other is not None:
return as_index(op(other))
else:
return as_index(op())
def unique(self):
return as_index(self._values.unique())
def __add__(self, other):
return self._apply_op("__add__", other)
def __radd__(self, other):
return self._apply_op("__radd__", other)
def __sub__(self, other):
return self._apply_op("__sub__", other)
def __rsub__(self, other):
return self._apply_op("__rsub__", other)
def __mul__(self, other):
return self._apply_op("__mul__", other)
def __rmul__(self, other):
return self._apply_op("__rmul__", other)
def __mod__(self, other):
return self._apply_op("__mod__", other)
def __rmod__(self, other):
return self._apply_op("__rmod__", other)
def __pow__(self, other):
return self._apply_op("__pow__", other)
def __floordiv__(self, other):
return self._apply_op("__floordiv__", other)
def __rfloordiv__(self, other):
return self._apply_op("__rfloordiv__", other)
def __truediv__(self, other):
return self._apply_op("__truediv__", other)
def __rtruediv__(self, other):
return self._apply_op("__rtruediv__", other)
__div__ = __truediv__
def __and__(self, other):
return self._apply_op("__and__", other)
def __or__(self, other):
return self._apply_op("__or__", other)
def __xor__(self, other):
return self._apply_op("__xor__", other)
def __eq__(self, other):
return self._apply_op("__eq__", other)
def __ne__(self, other):
return self._apply_op("__ne__", other)
def __lt__(self, other):
return self._apply_op("__lt__", other)
def __le__(self, other):
return self._apply_op("__le__", other)
def __gt__(self, other):
return self._apply_op("__gt__", other)
def __ge__(self, other):
return self._apply_op("__ge__", other)
def equals(self, other):
if self is other:
return True
if len(self) != len(other):
return False
elif len(self) == 1:
val = self[0] == other[0]
# when self is multiindex we need to checkall
if isinstance(val, np.ndarray):
return val.all()
return bool(val)
else:
result = self == other
if isinstance(result, bool):
return result
else:
return result._values.all()
def join(self, other, method, how="left", return_indexers=False):
column_join_res = self._values.join(
other._values,
how=how,
return_indexers=return_indexers,
method=method,
)
if return_indexers:
joined_col, indexers = column_join_res
joined_index = as_index(joined_col)
return joined_index, indexers
else:
return column_join_res
def rename(self, name, inplace=False):
"""
Alter Index name.
Defaults to returning new index.
Parameters
----------
name : label
Name(s) to set.
Returns
-------
Index
"""
if inplace is True:
self.name = name
return None
else:
out = self.copy(deep=False)
out.name = name
return out.copy(deep=True)
def astype(self, dtype):
"""Convert to the given ``dtype``.
Returns
-------
If the dtype changed, a new ``Index`` is returned by casting each
values to the given dtype.
If the dtype is not changed, ``self`` is returned.
"""
if dtype == self.dtype:
return self
return as_index(self._values.astype(dtype), name=self.name)
def to_array(self, fillna=None):
"""Get a dense numpy array for the data.
Parameters
----------
fillna : str or None
Defaults to None, which will skip null values.
If it equals "pandas", null values are filled with NaNs.
Non integral dtype is promoted to np.float64.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
return self._values.to_array(fillna=fillna)
def to_series(self):
from cudf.core.series import Series
return Series(self._values)
def isnull(self):
"""Identify missing values in an Index.
"""
return as_index(self._values.isnull(), name=self.name)
def isna(self):
"""Identify missing values in an Index. Alias for isnull.
"""
return self.isnull()
def notna(self):
"""Identify non-missing values in an Index.
"""
return as_index(self._values.notna(), name=self.name)
def notnull(self):
"""Identify non-missing values in an Index. Alias for notna.
"""
return self.notna()
@property
@property
def is_unique(self):
raise (NotImplementedError)
@property
def is_monotonic(self):
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
raise (NotImplementedError)
@property
def is_monotonic_decreasing(self):
raise (NotImplementedError)
def get_slice_bound(self, label, side, kind):
raise (NotImplementedError)
def __array_function__(self, func, types, args, kwargs):
from cudf.core.series import Series
# check if the function is implemented for the current type
cudf_index_module = type(self)
for submodule in func.__module__.split(".")[1:]:
# point cudf_index_module to the correct submodule
if hasattr(cudf_index_module, submodule):
cudf_index_module = getattr(cudf_index_module, submodule)
else:
return NotImplemented
fname = func.__name__
handled_types = [Index, Series]
# check if we don't handle any of the types (including sub-class)
for t in types:
if not any(
issubclass(t, handled_type) for handled_type in handled_types
):
return NotImplemented
if hasattr(cudf_index_module, fname):
cudf_func = getattr(cudf_index_module, fname)
# Handle case if cudf_func is same as numpy function
if cudf_func is func:
return NotImplemented
else:
return cudf_func(*args, **kwargs)
else:
return NotImplemented
def isin(self, values):
return self.to_series().isin(values)
@property
def __cuda_array_interface__(self):
raise (NotImplementedError)
def repeat(self, repeats, axis=None):
assert axis in (None, 0)
return as_index(self._values.repeat(repeats))
def memory_usage(self, deep=False):
return self._values._memory_usage(deep=deep)
@classmethod
def from_pandas(cls, index):
if not isinstance(index, pd.Index):
raise TypeError("not a pandas.Index")
ind = as_index(pa.Array.from_pandas(index))
ind.name = index.name
return ind
@classmethod
def _from_table(cls, table):
if table._num_columns == 0:
raise ValueError("Cannot construct Index from any empty Table")
if table._num_columns == 1:
return as_index(
next(iter(table._data.values())),
name=next(iter(table._data.keys())),
)
else:
return cudf.MultiIndex._from_table(table)
class RangeIndex(Index):
"""An iterable integer index defined by a starting value and ending value.
Can be sliced and indexed arbitrarily without allocating memory for the
complete structure.
Properties
---
_start: The first value
_stop: The last value
name: Name of the index
"""
def __init__(self, start, stop=None, name=None):
"""RangeIndex(size), RangeIndex(start, stop)
Parameters
----------
start, stop: int
name: string
"""
if isinstance(start, range):
therange = start
start = therange.start
stop = therange.stop
if stop is None:
start, stop = 0, start
self._start = int(start)
self._stop = int(stop)
self._cached_values = None
self._index = None
self._name = name
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@cached_property
def _values(self):
if len(self) > 0:
vals = cudautils.arange(self._start, self._stop, dtype=self.dtype)
return column.as_column(vals)
else:
return column.column_empty(0, masked=False, dtype=self.dtype)
@property
def _data(self):
from cudf.utils.utils import OrderedColumnDict
return OrderedColumnDict({self.name: self._values})
def __contains__(self, item):
if not isinstance(
item, tuple(np.sctypes["int"] + np.sctypes["float"] + [int, float])
):
return False
if not item % 1 == 0:
return False
if self._start <= item < self._stop:
return True
else:
return False
def copy(self, deep=True):
return RangeIndex(start=self._start, stop=self._stop, name=self.name)
def __repr__(self):
return (
"{}(start={}, stop={}".format(
self.__class__.__name__, self._start, self._stop
)
+ (
", name='{}'".format(str(self.name))
if self.name is not None
else ""
)
+ ")"
)
def __len__(self):
return max(0, self._stop - self._start)
def __getitem__(self, index):
from numbers import Number
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
sln = (stop - start) // step
sln = max(0, sln)
start += self._start
stop += self._start
if sln == 0:
return RangeIndex(0, None, self.name)
elif step == 1:
return RangeIndex(start, stop, self.name)
else:
return index_from_range(start, stop, step)
elif isinstance(index, Number):
index = utils.normalize_index(index, len(self))
index += self._start
return index
elif isinstance(index, (list, np.ndarray)):
index = np.asarray(index)
index = rmm.to_device(index)
else:
if is_scalar(index):
index = min_signed_type(index)(index)
index = column.as_column(index)
return as_index(self._values[index], name=self.name)
def __eq__(self, other):
return super(type(self), self).__eq__(other)
def __reduce__(self):
return (RangeIndex, (self._start, self._stop, self.name))
def equals(self, other):
if self is other:
return True
if len(self) != len(other):
return False
if isinstance(other, cudf.core.index.RangeIndex):
return self._start == other._start and self._stop == other._stop
else:
return (self == other)._values.all()
def serialize(self):
"""Serialize Index file storage or network transmission.
"""
header = {}
header["index_column"] = {}
# store metadata values of index separately
# We don't need to store the GPU buffer for RangeIndexes
# cuDF only needs to store start/stop and rehydrate
# during de-serialization
header["index_column"]["start"] = self._start
header["index_column"]["stop"] = self._stop
frames = []
header["name"] = pickle.dumps(self.name)
header["dtype"] = pickle.dumps(self.dtype)
header["type"] = pickle.dumps(type(self))
header["frame_count"] = 0
return header, frames
@classmethod
def deserialize(cls, header, frames):
"""
"""
h = header["index_column"]
name = pickle.loads(header["name"])
start = h["start"]
stop = h["stop"]
return RangeIndex(start=start, stop=stop, name=name)
@property
def dtype(self):
return np.dtype(np.int64)
@property
def is_contiguous(self):
return True
@property
def size(self):
return max(0, self._stop - self._start)
def find_label_range(self, first, last):
# clip first to range
if first is None or first < self._start:
begin = self._start
elif first < self._stop:
begin = first
else:
begin = self._stop
# clip last to range
if last is None:
end = self._stop
elif last < self._start:
end = begin
elif last < self._stop:
end = last + 1
else:
end = self._stop
# shift to index
return begin - self._start, end - self._start
@copy_docstring(_to_frame)
def to_frame(self, index=True, name=None):
return _to_frame(self, index, name)
def to_gpu_array(self):
return self._values.to_gpu_array()
def to_pandas(self):
return pd.RangeIndex(
start=self._start,
stop=self._stop,
dtype=self.dtype,
name=self.name,
)
@property
def is_unique(self):
return True
@property
def is_monotonic_increasing(self):
return self._start <= self._stop
@property
def is_monotonic_decreasing(self):
return self._start >= self._stop
def get_slice_bound(self, label, side, kind):
if label < self._start:
return 0
elif label >= self._stop:
return len(self)
else:
if side == "left":
return label - self._start
elif side == "right":
return (label - self._start) + 1
@property
def __cuda_array_interface__(self):
return self._values.__cuda_array_interface__
def memory_usage(self, **kwargs):
return 0
def unique(self):
# RangeIndex always has unique values
return self
def index_from_range(start, stop=None, step=None):
vals = cudautils.arange(start, stop, step, dtype=np.int64)
return as_index(vals)
class GenericIndex(Index):
"""An array of orderable values that represent the indices of another Column
Attributes
---
_values: A Column object
name: A string
"""
def __init__(self, values, **kwargs):
"""
Parameters
----------
values : Column
The Column of values for this index
name : str optional
The name of the Index. If not provided, the Index adopts the value
Column's name. Otherwise if this name is different from the value
Column's, the values Column will be cloned to adopt this name.
"""
from cudf.core.series import Series
kwargs = _setdefault_name(values, kwargs)
# normalize the input
if isinstance(values, Series):
values = values._column
elif isinstance(values, column.ColumnBase):
values = values
else:
if isinstance(values, (list, tuple)):
if len(values) == 0:
values = np.asarray([], dtype="int64")
else:
values =
|
np.asarray(values)
|
numpy.asarray
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.