prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Generates plots according to an execution stats trace, and saves them into a /Plot/ folder.
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
sys.path.insert(1, "../")
from Utils.Stats import unpack_stats
# Path to the execution folder
path = "../temp/NNSGA_4f0/"
# Indicates if the plots should be generated for every sub directory of the 'path' folder
sub_directories = False
def plot_raw(key, dic, destination):
"""Plot raw stats as they were saved, ex. an objective for every single agent."""
for tup in dic[key]:
extended_ord = np.empty((tup[0][-1]))
extended_ord[:] = np.nan
for i in range(len(tup[0])):
extended_ord[tup[0][i]] = tup[1][i]
plt.plot(extended_ord)
plt.savefig(f"{destination}/Raw_{key}")
def plot(value, key, destination):
extensions = list()
max_range = 0
for tup in value:
if tup[0][-1] > max_range:
max_range = tup[0][-1] + 1
extended_ord = np.empty((tup[0][-1]+1))
extended_ord[:] = np.nan
for i in range(len(tup[0])):
extended_ord[tup[0][i]] = tup[1][i]
extensions.append(extended_ord)
plt.plot(extended_ord) # Raw stats
plt.title(f"{key} raw data")
plt.savefig(f"{destination}/Raw_{key}")
plt.clf()
# Min and Max
max_ext = np.empty(max_range)
max_ext[:] = np.nan
min_ext = np.empty(max_range)
min_ext[:] = np.nan
cat = [list() for i in range(max_range)]
for ext in extensions:
for i in range(len(ext)):
if ext[i] != np.nan:
cat[i].append(ext[i])
med_ext = np.empty(max_range)
med_ext[:] = np.nan
quart1 = np.empty(max_range)
quart1[:] = np.nan
quart3 = np.empty(max_range)
quart3[:] = np.nan
for i in range(len(cat)):
if len(cat[i]) != 0:
val = np.array(cat[i])
med_ext[i] = np.quantile(val, 0.5)
quart1[i] = np.quantile(val, 0.25)
quart3[i] = np.quantile(val, 0.75)
max_ext[i] = np.quantile(val, 1.0)
min_ext[i] = np.quantile(val, 0.0)
plt.fill_between( | np.arange(max_range) | numpy.arange |
"""
Operation representation classes for the `densitymx_slow` evolution type.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import itertools as _itertools
import numpy as _np
import scipy.sparse as _sps
from scipy.sparse.linalg import LinearOperator
from .statereps import StateRep as _StateRep
from .. import basereps as _basereps
from pygsti.baseobjs.statespace import StateSpace as _StateSpace
from ...tools import basistools as _bt
from ...tools import internalgates as _itgs
from ...tools import lindbladtools as _lbt
from ...tools import matrixtools as _mt
from ...tools import optools as _ot
class OpRep(_basereps.OpRep):
def __init__(self, state_space):
self.state_space = state_space
@property
def dim(self):
return self.state_space.dim
def acton(self, state):
raise NotImplementedError()
def adjoint_acton(self, state):
raise NotImplementedError()
def aslinearoperator(self):
def mv(v):
if v.ndim == 2 and v.shape[1] == 1: v = v[:, 0]
in_state = _StateRep(_np.ascontiguousarray(v, 'd'), self.state_space)
return self.acton(in_state).to_dense(on_space='HilbertSchmidt')
def rmv(v):
if v.ndim == 2 and v.shape[1] == 1: v = v[:, 0]
in_state = _StateRep(_np.ascontiguousarray(v, 'd'), self.state_space)
return self.adjoint_acton(in_state).to_dense(on_space='HilbertSchmidt')
return LinearOperator((self.dim, self.dim), matvec=mv, rmatvec=rmv) # transpose, adjoint, dot, matmat?
class OpRepDenseSuperop(OpRep):
def __init__(self, mx, state_space):
state_space = _StateSpace.cast(state_space)
if mx is None:
mx = | _np.identity(state_space.dim, 'd') | numpy.identity |
# Copyright (c) 2017, <NAME>
# See LICENSE file for details: <https://github.com/moble/quaternion/blob/master/LICENSE>
from __future__ import print_function, division, absolute_import
import numpy as np
import quaternion
from quaternion.numba_wrapper import njit
def slerp(R1, R2, t1, t2, t_out):
"""Spherical linear interpolation of rotors
This function uses a simpler interface than the more fundamental
`slerp_evaluate` and `slerp_vectorized` functions. The latter
are fast, being implemented at the C level, but take input `tau`
instead of time. This function adjusts the time accordingly.
Parameters
----------
R1: quaternion
Quaternion at beginning of interpolation
R2: quaternion
Quaternion at end of interpolation
t1: float
Time corresponding to R1
t2: float
Time corresponding to R2
t_out: float or array of floats
Times to which the rotors should be interpolated
"""
tau = (t_out-t1)/(t2-t1)
return | np.slerp_vectorized(R1, R2, tau) | numpy.slerp_vectorized |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Overview:
Registration and mapping cells onto CUBIC-Atlas
Usage:
AtlasMapping.py registration PARAM_FILE [-p NUM_CPUS]
AtlasMapping.py annotation PARAM_FILE [-p NUM_CPUS]
AtlasMapping.py full PARAM_FILE [-p NUM_CPUS]
Options:
-h --help Show this screen.
--version Show version.
-p NUM_CPUS Number of CPUs used for ANTs. [default: 20]
"""
import json, os.path, os, re, time
import tifffile
import joblib
from docopt import docopt
import subprocess as sp
import pandas as pd
import numpy as np
import nibabel as nib
import scipy.spatial
from HalfBrainCells import dt_local
from HDoG_classifier import get_X_3d
dt_annotated = np.dtype([
('mapped_x', 'f4'), ('mapped_y', 'f4'), ('mapped_z', 'f4'),
('atlas_id', 'u2')
])
def run_antsRegistration(prefix_ants, atlas_file, moving_file, dst_dir, threads):
cmd = "ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={THREADS} && "
cmd += "{EXECUTABLE} -d 3 "
cmd += "--initial-moving-transform [{ATLAS_FILE},{MOVING_FILE},1] "
cmd += "--interpolation Linear "
cmd += "--use-histogram-matching 0 "
cmd += "--float 0 "
cmd += "--output [{DST_PREFIX},{WARPED_FILE},{INVWARPED_FILE}] "
cmd += "--transform Affine[0.1] --metric MI[{ATLAS_FILE},{MOVING_FILE},1,128,Regular,0.5] --convergence [10000x10000x10000,1e-5,15] --shrink-factors 4x2x1 --smoothing-sigmas 2x1x0vox "
cmd += "--transform SyN[0.1,3.0,0.0] --metric CC[{ATLAS_FILE},{MOVING_FILE},1,5] --convergence [300x100x30,1e-6,10] --shrink-factors 4x2x1 --smoothing-sigmas 2x1x0vox"
cmd = cmd.format(
THREADS = threads,
EXECUTABLE = os.path.join(prefix_ants, "antsRegistration"),
DST_PREFIX = os.path.join(dst_dir, "F2M_"),
WARPED_FILE = os.path.join(dst_dir, "F2M_Warped.nii.gz"),
INVWARPED_FILE = os.path.join(dst_dir, "F2M_InvWarped.nii.gz"),
ATLAS_FILE = atlas_file,
MOVING_FILE = moving_file,
)
print("[*] Executing : {}".format(cmd))
sp.call(cmd, shell=True)
return
def run_antsApplyTransformsToPoints(prefix_ants, src_csv, dst_csv, dst_dir):
cmd = "{EXECUTABLE} "
cmd += "-d 3 "
cmd += "-i {SRC_CSV} "
cmd += "-o {DST_CSV} "
cmd += "-t [{AFFINE_MAT},1] "
cmd += "-t {INVWARP_NII}"
cmd = cmd.format(
EXECUTABLE = os.path.join(prefix_ants, "antsApplyTransformsToPoints"),
AFFINE_MAT = os.path.join(dst_dir, "F2M_0GenericAffine.mat"),
INVWARP_NII = os.path.join(dst_dir, "F2M_1InverseWarp.nii.gz"),
SRC_CSV = src_csv,
DST_CSV = dst_csv,
)
#print("[*] Executing : {}".format(cmd))
# supress output
with open(os.devnull, 'w') as devnull:
sp.check_call(cmd, shell=True, stdout=devnull)
return
def register(atlas_basedir, merging_basedir, mapping_basedir,
prefix_ANTs, atlas_voxel_unit, num_cpus=36,
atlas_basename="iso_80um",
):
atlas_tif_path = os.path.join(atlas_basedir, "{}.tif".format(atlas_basename))
atlas_nii_path = os.path.join(atlas_basedir, "{}.nii.gz".format(atlas_basename))
moving_tif_path = os.path.join(merging_basedir, "whole.tif")
moving_nii_path = os.path.join(mapping_basedir, "whole.nii.gz")
# prepare nifti image for atlas
print("[*] Preparing nifti image for atlas...")
img_atlas = tifffile.imread(atlas_tif_path)
if not os.path.exists(atlas_nii_path):
nii_atlas = nib.Nifti1Image(np.swapaxes(img_atlas,0,2), affine=None)
aff = np.diag([-atlas_voxel_unit,-atlas_voxel_unit,atlas_voxel_unit,1])
nii_atlas.header.set_qform(aff, code=2)
nii_atlas.to_filename(atlas_nii_path)
# prepare nifti image for moving
print("[*] Preparing nifti image for moving...")
img_moving = tifffile.imread(moving_tif_path)
nii_moving = nib.Nifti1Image(np.swapaxes(img_moving,0,2), affine=None)
aff = np.diag([-atlas_voxel_unit,-atlas_voxel_unit,atlas_voxel_unit,1])
nii_moving.header.set_qform(aff, code=2)
nii_moving.to_filename(moving_nii_path)
# run registration
run_antsRegistration(prefix_ants = prefix_ANTs,
atlas_file = atlas_nii_path,
moving_file = moving_nii_path,
dst_dir = mapping_basedir,
threads = num_cpus)
return
def map_and_annotate_cellstacks(list_src_pkl_path, list_annotated_pkl_path, total_num_cells,
prefix_ANTs, mapping_basedir, atlas_points_path,
downscale_unit, HDoG_basedir, clf, max_distance):
# apply transforms and annotate cells in stacks
# initialize
print("[{}] Loading point atlas and constructing KD Tree...".format(os.getpid()))
if atlas_points_path.endswith("pkl"):
df_atlas = joblib.load(atlas_points_path)
elif atlas_points_path.endswith("csv") or atlas_points_path.endswith("csv.gz"):
df_atlas = pd.read_csv(atlas_points_path, skiprows=1, header=None,
names=["X(um)","Y(um)","Z(um)","atlasID"],
dtype={"X(um)":np.float32, "Y(um)":np.float32, "Z(um)":np.float32, "atlasID":np.uint16})
tree = scipy.spatial.cKDTree( np.array([
df_atlas["X(um)"].values,
df_atlas["Y(um)"].values,
df_atlas["Z(um)"].values
]).T )
print("[{}] KD Tree Construction completed.".format(os.getpid()))
pat = re.compile(os.path.join(r'(?P<FWRV>FW|RV)', r'(?P<XYNAME>\d+_\d+)\.pkl$'))
count = 0
for src_pkl_path,annotated_pkl_path in zip(list_src_pkl_path, list_annotated_pkl_path):
start = time.time()
print("[{}]({:.2f}%| {:.0f}s) Loading scalemerged data ({})...".format(
os.getpid(), float(count)/total_num_cells*100,
time.time()-start, src_pkl_path))
data_scalemerged = joblib.load(src_pkl_path)
if data_scalemerged.shape[0] == 0:
print("[{}]({:.2f}%| {:.0f}s) No data points. skipping".format(
os.getpid(), float(count)/total_num_cells*100), time.time()-start)
continue
# use predicted cells if classifier is specified
if clf is not None:
m = pat.search(src_pkl_path)
if not m: raise ValueError
HDoG_bin_path = os.path.join(HDoG_basedir[m.group("FWRV")], m.group("XYNAME")+".bin")
print("[{}]({:.2f}%| {:.0f}s) Loading HDoG local data ({})...".format(
os.getpid(), float(count)/total_num_cells*100,
time.time()-start, HDoG_bin_path))
data_local = np.fromfile(HDoG_bin_path, dtype=dt_local)
X = get_X_3d(data_local)
pred = clf.predict(X)
is_valid = np.bitwise_and(pred, data_scalemerged["is_valid"])
else:
is_valid = data_scalemerged["is_valid"]
print("[{}]({:.2f}%| {:.0f}s) {:.1f} % valid data points.".format(
os.getpid(), float(count)/total_num_cells*100, time.time()-start,
float(np.count_nonzero(is_valid))/is_valid.shape[0]*100))
data_scalemerged_valid = data_scalemerged[is_valid]
if data_scalemerged_valid.shape[0] == 0:
print("[{}]({:.2f}%| {:.0f}s) No valid data points. skipping".format(
os.getpid(), float(count)/total_num_cells*100, time.time()-start))
continue
# write out coordinates as csv file for transformation
print("[{}]({:.2f}%| {:.0f}s) Transforming points...".format(
os.getpid(), float(count)/total_num_cells*100,
time.time()-start))
df = pd.DataFrame({
"X(um)":pd.Series(data_scalemerged_valid["scaled_x"]*downscale_unit, dtype=np.float32),
"Y(um)":pd.Series(data_scalemerged_valid["scaled_y"]*downscale_unit, dtype=np.float32),
"Z(um)":pd.Series(data_scalemerged_valid["scaled_z"]*downscale_unit, dtype=np.float32)
})
FWRV = os.path.basename(os.path.dirname(src_pkl_path))
basename = os.path.basename(src_pkl_path).replace(".pkl", ".csv")
tmp_csv_path = "/tmp/AtlasMapping-moving-{}-{}".format(FWRV, basename)
df.to_csv(tmp_csv_path, index=False, header=True, chunksize=50000,
columns=["X(um)","Y(um)","Z(um)"], float_format="%.3f")
transformed_csv_path = annotated_pkl_path.replace(".pkl", ".csv")
run_antsApplyTransformsToPoints(
prefix_ants = prefix_ANTs,
src_csv = tmp_csv_path,
dst_csv = transformed_csv_path,
dst_dir = mapping_basedir)
os.remove(tmp_csv_path)
print("[{}]({:.2f}%| {:.0f}s) Loading transformed csv({})...".format(
os.getpid(), float(count)/total_num_cells*100,
time.time()-start, transformed_csv_path))
df_transformed = pd.read_csv(
transformed_csv_path,
dtype={"X(um)":np.float32, "Y(um)":np.float32, "Z(um)":np.float32}
)
# start annotating
print("[{}]({:.2f}%| {:.0f}s) Starting annotation...".format(
os.getpid(), float(count)/total_num_cells*100, time.time()-start))
dist, idx = tree.query( np.array([
df_transformed["X(um)"].values,
df_transformed["Y(um)"].values,
df_transformed["Z(um)"].values,
]).T, k=1, eps=0, p=2, distance_upper_bound=max_distance)
os.remove(transformed_csv_path)
print("[{}]({:.2f}%| {:.0f}s) Finished annotation...".format(
os.getpid(), float(count)/total_num_cells*100, time.time()-start))
# save result
print("[{}]({:.2f}%| {:.0f}s) Saving annotated result to {}...".format(
os.getpid(), float(count)/total_num_cells*100,
time.time()-start, annotated_pkl_path))
print("[{}]({:.2f}%| {:.0f}s) There are {} orphan points.".format(
os.getpid(), float(count)/total_num_cells*100, time.time()-start,
np.count_nonzero(idx == tree.n)))
atlas_ID = | np.zeros(idx.shape) | numpy.zeros |
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c), <NAME> and <NAME>'. #
# All rights reserved. #
# #
# This file is part of the Quantas code. #
# #
# For further information on the license, see the LICENSE file #
##############################################################################
import numpy as np
from scipy.integrate import quad
from scipy.constants import Avogadro as NA
from scipy.constants import Planck as h
from scipy.constants import Boltzmann as kb
class Kieffer(object):
def __init__(self, frequencies, cutoff=1.e-10):
""" Constructor method for the Kieffer's calculator.
Parameters
----------
frequencies: ndarray
Array of acoustic frequencies (in Hz).
"""
self.cutoff = cutoff
self.acofreq = frequencies
return
@property
def acofreq(self):
""" Acoustic frequencies stored in the class.
Returns
-------
ndarray(dtype=float, ndim=1)
Array containing the acoustic frequency values (in Hz).
"""
return self._acofreq
@acofreq.setter
def acofreq(self, frequencies):
""" Acoustic frequencies stored in the class.
Parameters
----------
frequencies: ndarray(dtype=float, ndim=1)
Array containing the acoustic frequency values (in Hz).
"""
self._acofreq = np.asarray(frequencies)
return
@property
def acofreq_exp(self):
""" Read-only property.
Returns
-------
ndarray(dtype=float, ndim=1)
Array of the exponents without temperature.
"""
return self._acofreq * h / kb
def helmholtz(self, temperature):
""" Calculate the acoustic contribution to the Helmholtz free energy
according to the Kieffer's model.
Parameters
----------
temperature: float
Temperature value at which the contribution is calculated.
Return
------
value: float
Acoustic contribution to the Helmholtz free energy.
"""
value = 0.
for acofreq in self.acofreq_exp:
value += self._helmholtz_integral(temperature, acofreq)
return value
def _helmholtz_integral(self, temperature, xnti):
"""
"""
def helmholtz_function(x, temperature, xnti):
xi = xnti / temperature
num = np.power((np.arcsin(x/xi)),2)*np.log(1-np.exp(-x))
den = np.power((np.power(xi, 2.) - np.power(x, 2.)), 0.5)
value = num / den
return value
if temperature == 0.:
return 0.
wmin = 1.e-6
wmax = xnti/temperature
function = lambda x: helmholtz_function(x, temperature, xnti)
integral, err = quad(function, wmin, wmax, epsrel=self.cutoff)
factor = 3 * temperature * NA * kb * np.power(2./np.pi, 3.)
return integral * factor
def heat_capacity(self, temperature):
""" Calculate the acoustic contribution to the isochoric (constant
volume) heat capacity according to the Kieffer's model.
Parameters
----------
temperature: float
Temperature value at which the contribution is calculated.
Return
------
value: float
Acoustic contribution to the isochoric heat capacity.
"""
value = 0.
for acofreq in self.acofreq_exp:
value += self._heat_capacity_integral(temperature, acofreq)
return value
def _heat_capacity_integral(self, temperature, xnti):
"""
"""
def heat_capacity_function(x, temperature, xnti):
""" Function for the acoustic contribution to the heat
capacity according to the Kieffer's model.
Parameters
----------
x: float
Current value of the term hv_i/(k_B T).
temperature: float
Temperature value (in K).
xnti: float
Maximum value of the term hv_i/(k_B T).
Return
------
value: float
Heat capacity value.
"""
xi = xnti / temperature
num = np.power(np.arcsin(x/xi), 2.) * np.power(x, 2.)
num *= np.exp(x)
den = np.power((np.power(xi, 2.) - np.power(x, 2.)), 0.5)
den *= np.power(np.exp(x) - 1., 2.)
value = num / den
return value
if temperature == 0.:
return 0.
wmin = 1.e-6
wmax = xnti/temperature
function = lambda x: heat_capacity_function(x, temperature, xnti)
integral, err = quad(function, wmin, wmax, epsrel=self.cutoff)
return integral * 3 * NA * kb * np.power(2./np.pi, 3.)
def entropy(self, temperature):
""" Calculate the acoustic contribution to entropy according to
the Kieffer's model.
Parameters
----------
temperature: float
Temperature value at which the contribution is calculated.
Return
------
value: float
Acoustic contribution to entropy.
"""
value = 0.
for acofreq in self.acofreq_exp:
value += self._entropy_integral(temperature, acofreq)
return value
def _entropy_integral(self, temperature, xnti):
"""
"""
def entropy_function(x, temperature, xnti):
""" Function for the acoustic contribution to the entropy
according to the Kieffer's model.
Parameters
----------
x: float
Current value of the term hv_i/(k_B T).
temperature: float
Temperature value (in K).
xnti: float
Maximum value of the term hv_i/(k_B T).
Return
------
value: float
Entropy value.
"""
xi = xnti / temperature
# Calculate the first addendum
num = np.power(np.arcsin(x/xi), 2.) *x
den = np.power((np.power(xi, 2.) - np.power(x, 2.)), 0.5)
den *= np.power( | np.exp(x) | numpy.exp |
#!/usr/bin/env python3
import gym
import torch
import numpy as np
import multiprocessing as mp
import os
import pickle
import sys
import time
import logging
import cma
import argparse
from torchmodel import StandardFCNet
def _makedir(name):
if not os.path.exists(name):
os.makedirs(name)
def get_logger():
_makedir('log')
_makedir('data')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s')
logger = logging.getLogger('MAIN')
logger.setLevel(logging.DEBUG)
return logger
class Task:
def __init__(self, envname, hidden_size, max_steps, target, pop_size, reps, test_reps, weight_decay, noise_std, sigma):
self.task = envname
self.env_fn = lambda: gym.make(self.task)
self.repetitions = reps
self.test_repetitions = test_reps
env = self.env_fn()
self.action_dim = env.action_space.shape[0]
self.state_dim = env.observation_space.shape[0]
self.reward_to_fitness = lambda r: r
self.max_steps = max_steps
self.pop_size = pop_size
self.num_workers = mp.cpu_count()
self.action_clip = lambda a: np.clip(a, -1, 1)
self.target = target
self.hidden_size = hidden_size
self.model_fn = lambda: StandardFCNet(self.state_dim, self.action_dim, self.hidden_size)
model = self.model_fn()
self.initial_weight = model.get_weight()
self.weight_decay = weight_decay
self.action_noise_std = noise_std
self.sigma = sigma
self.tag = 'CMA-%d' % (hidden_size)
class BaseModel:
def get_weight(self):
weight = []
for param in self.parameters():
weight.append(param.data.numpy().flatten())
weight = np.concatenate(weight, 0)
return weight
def set_weight(self, solution):
offset = 0
for param in self.parameters():
param_shape = param.data.numpy().shape
param_size = | np.prod(param_shape) | numpy.prod |
import numpy as np
from openmdao.components.meta_model import MetaModel
from openmdao.core.component import _NotSet
# generate variable names taking into account fidelity level
def _get_name_fi(name, fi_index):
if fi_index>0:
return "%s_fi%d" % (name, fi_index+1)
else:
return name
class MultiFiMetaModel(MetaModel):
""" Class that generalizes the MetaModel class to be able to train surrogates
with multi-fidelity training inputs. For a given number of levels of fidelity
**nfi** (given at initialization) the corresponding training input variables
*train:<invar>_fi<2..nfi>* and *train:<outvar>_fi<2..nfi>* are automatically created
besides the given *train:<invar>* and *train:<outvar>* variables.
Note the index starts at 2, the index 1 is omitted considering
the simple name *<var>* is equivalent to *<var>_fi1* which is intended
to be the data of highest fidelity.
The surrogate models are trained with a list of (m samples, n dim)
ndarrays built from the various training input data. By convention,
the fidelities are intended to be ordered from highest to lowest fidelity.
Obviously for a given level of fidelity corresponding lists
*train:<var>_fi<n>* have to be of the same size.
Thus given the initialization::
>>> mm = MultiFiMetaModel(nfi=2)`
>>> mm.add_param('x1', 0.)
>>> mm.add_param('x2', 0.)
>>> mm.add_ouput('y1', 0.)
>>> mm.add_ouput('y2', 0.)
the following supplementary training input variables
``train:x1_fi2`` and ``train:x2_fi2`` are created together with the classic
ones ``train:x1`` and ``train:x2`` and the output variables ``train:y1_fi2``
and ``train:y2_fi2`` are created as well.
The embedded surrogate for y1 will be trained with a couple (X, Y).
Where X is the list [X_fi1, X_fi2] where X_fi1 is an (m1, 2) ndarray
filled with the m1 samples [x1 value, x2 value], X_fi2 is an (m2, 2) ndarray
filled with the m2 samples [x1_fi2 value, x2_fi2 value]
Where Y is a list [Y1_fi1, Y1_fi2] where Y1_fi1 is a (m1, 1) ndarray of
y1 values and Y1_fi2 a (m2, 1) ndarray y1_fi2 values.
.. note:: when *nfi* ==1 a :class:`MultiFiMetaModel` object behaves as
a :class:`MetaModel` object.
Options
-------
deriv_options['type'] : str('user')
Derivative calculation type ('user', 'fd', 'cs')
Default is 'user', where derivative is calculated from
user-supplied derivatives. Set to 'fd' to finite difference
this system. Set to 'cs' to perform the complex step
if your components support it.
deriv_options['form'] : str('forward')
Finite difference mode. (forward, backward, central)
deriv_options['step_size'] : float(1e-06)
Default finite difference stepsize
deriv_options['step_calc'] : str('absolute')
Set to absolute, relative
deriv_options['check_type'] : str('fd')
Type of derivative check for check_partial_derivatives. Set
to 'fd' to finite difference this system. Set to
'cs' to perform the complex step method if
your components support it.
deriv_options['check_form'] : str('forward')
Finite difference mode: ("forward", "backward", "central")
During check_partial_derivatives, the difference form that is used
for the check.
deriv_options['check_step_calc'] : str('absolute',)
Set to 'absolute' or 'relative'. Default finite difference
step calculation for the finite difference check in check_partial_derivatives.
deriv_options['check_step_size'] : float(1e-06)
Default finite difference stepsize for the finite difference check
in check_partial_derivatives"
deriv_options['linearize'] : bool(False)
Set to True if you want linearize to be called even though you are using FD.
"""
def __init__(self, nfi=1):
super(MultiFiMetaModel, self).__init__()
self._nfi = nfi
# generalize MetaModel training inputs to a list of training inputs
self._training_input = nfi*[np.zeros(0)]
self._input_sizes = nfi*[0]
def add_param(self, name, val=_NotSet, **kwargs):
super(MultiFiMetaModel, self).add_param(name, val, **kwargs)
self._input_sizes[0]=self._input_size
# Add train:<invar>_fi<n>
for fi in range(self._nfi):
if fi > 0:
name_with_fi = 'train:'+_get_name_fi(name, fi)
super(MetaModel, self).add_param(name_with_fi, val=[], pass_by_obj=True)
self._input_sizes[fi]+=self._init_params_dict[name]['size']
def add_output(self, name, val=_NotSet, **kwargs):
super(MultiFiMetaModel, self).add_output(name, val, **kwargs)
self._training_output[name]=self._nfi*[np.zeros(0)]
# Add train:<outvar>_fi<n>
for fi in range(self._nfi):
if fi > 0:
name_with_fi = 'train:'+_get_name_fi(name, fi)
super(MetaModel, self).add_param(name_with_fi, val=[], pass_by_obj=True)
def _train(self):
"""Override MetaModel _train method to take into account multi-fidelity
input data. Basicall
"""
if self._nfi==1:
# shortcut: fallback to base class behaviour immediatly
super(MultiFiMetaModel, self)._train()
return
num_sample = self._nfi*[None]
for name, sz in self._surrogate_param_names:
for fi in range(self._nfi):
name = _get_name_fi(name, fi)
val = self.params['train:' + name]
if num_sample[fi] is None:
num_sample[fi] = len(val)
elif len(val) != num_sample[fi]:
msg = "MetaModel: Each variable must have the same number"\
" of training points. Expected {0} but found {1} "\
"points for '{2}'."\
.format(num_sample[fi], len(val), name)
raise RuntimeError(msg)
for name, shape in self._surrogate_output_names:
for fi in range(self._nfi):
name = _get_name_fi(name, fi)
val = self.params['train:' + name]
if len(val) != num_sample[fi]:
msg = "MetaModel: Each variable must have the same number" \
" of training points. Expected {0} but found {1} " \
"points for '{2}'." \
.format(num_sample[fi], len(val), name)
raise RuntimeError(msg)
if self.warm_restart:
inputs = []
new_inputs = self._nfi*[None]
num_old_pts = self._nfi*[0]
for fi in range(self._nfi):
num_old_pts[fi] = self._training_input[fi].shape[0]
inputs.append(np.zeros((num_sample[fi] + num_old_pts[fi],
self._input_sizes[fi])))
if num_old_pts[fi] > 0:
inputs[fi][:num_old_pts[fi], :] = self._training_input[fi]
new_inputs[fi] = inputs[fi][num_old_pts[fi]:, :]
else:
inputs = [np.zeros((num_sample[fi], self._input_sizes[fi]))
for fi in range(self._nfi)]
new_inputs = inputs
self._training_input = inputs
# add training data for each input
idx = self._nfi*[0]
for name, sz in self._surrogate_param_names:
for fi in range(self._nfi):
if num_sample[fi] > 0:
name = _get_name_fi(name, fi)
val = self.params['train:' + name]
if isinstance(val[0], float):
new_inputs[fi][:, idx[fi]] = val
idx[fi] += 1
else:
for row_idx, v in enumerate(val):
if not isinstance(v, np.ndarray):
v = np.array(v)
new_inputs[fi][row_idx, idx[fi]:idx[fi]+sz] = v.flat
# add training data for each output
outputs=self._nfi*[None]
new_outputs=self._nfi*[None]
for name, shape in self._surrogate_output_names:
for fi in range(self._nfi):
name_fi = _get_name_fi(name, fi)
if num_sample[fi] > 0:
output_size = | np.prod(shape) | numpy.prod |
import os
import tempfile
from StringIO import StringIO
import numpy as np
import pandas as pd
import pytest
import yaml
from pandas.util import testing as pdt
from .. import yamlio
@pytest.fixture
def test_cfg():
return {
'name': 'test',
'ytransform': 'xyz',
'unordered': 'abc'
}
@pytest.fixture
def expected_yaml():
return (
'name: test{linesep}{linesep}'
'ytransform: xyz{linesep}{linesep}'
'unordered: abc{linesep}').format(linesep=os.linesep)
@pytest.fixture
def test_file(request):
name = tempfile.NamedTemporaryFile(suffix='.yaml').name
def cleanup():
if os.path.exists(name):
os.remove(name)
request.addfinalizer(cleanup)
return name
def test_ordered_yaml(test_cfg, expected_yaml):
test_yaml = yamlio.ordered_yaml(test_cfg)
assert test_yaml == expected_yaml
def test_convert_to_yaml_str(test_cfg, expected_yaml):
test_yaml = yamlio.convert_to_yaml(test_cfg, str_or_buffer=None)
assert test_yaml == expected_yaml
def test_convert_to_yaml_file(test_cfg, expected_yaml, test_file):
yamlio.convert_to_yaml(test_cfg, test_file)
with open(test_file) as f:
assert f.read() == expected_yaml
def test_convert_to_yaml_buffer(test_cfg, expected_yaml):
test_buffer = StringIO()
yamlio.convert_to_yaml(test_cfg, test_buffer)
assert test_buffer.getvalue() == expected_yaml
class Test_yaml_to_dict(object):
@classmethod
def setup_class(cls):
cls.yaml_str = """
a:
x: 1
y: 2
z: 3
b:
x: 3
y: 4
z: 5
"""
cls.expect_dict = {
'a': {'x': 1, 'y': 2, 'z': 3},
'b': {'x': 3, 'y': 4, 'z': 5}}
def test_str(self):
assert yamlio.yaml_to_dict(yaml_str=self.yaml_str) == self.expect_dict
def test_file(self, test_file):
with open(test_file, 'w') as f:
f.write(self.yaml_str)
assert yamlio.yaml_to_dict(str_or_buffer=test_file) == self.expect_dict
def test_buffer(self):
buff = StringIO(self.yaml_str)
buff.seek(0)
assert yamlio.yaml_to_dict(str_or_buffer=buff) == self.expect_dict
def test_raises(self):
with pytest.raises(ValueError):
yamlio.yaml_to_dict()
def test_series_to_yaml_safe_int_index():
s = pd.Series( | np.arange(100, 103) | numpy.arange |
""" Unit tests for visibility scatter gather
"""
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from processing_components.simulation.configurations import create_named_configuration
from processing_components.visibility.gather_scatter import visibility_gather_time, visibility_gather_w, \
visibility_scatter_time, visibility_scatter_w, visibility_scatter_channel, \
visibility_gather_channel
from processing_components.visibility.iterators import vis_wslices, vis_timeslices
from processing_components.visibility.base import create_visibility, create_blockvisibility
import logging
log = logging.getLogger(__name__)
class TestVisibilityGatherScatter(unittest.TestCase):
def setUp(self):
self.lowcore = create_named_configuration('LOWBD2-CORE')
self.times = numpy.linspace(-300.0, 300.0, 11) * numpy.pi / 43200.0
self.frequency = numpy.linspace(1e8, 1.5e9, 7)
self.channel_bandwidth = numpy.array(7 * [self.frequency[1] - self.frequency[0]])
self.phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
def actualSetUp(self, times=None):
if times is not None:
self.times = times
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0)
self.vis.data['vis'][:, 0] = self.vis.time
self.blockvis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0)
self.blockvis.data['vis'][...] = 1.0
def test_vis_scatter_gather_wstack(self):
self.actualSetUp()
vis_slices = vis_wslices(self.vis, 10.0)
vis_list = visibility_scatter_w(self.vis, vis_slices)
newvis = visibility_gather_w(vis_list, self.vis, vis_slices)
assert self.vis.nvis == newvis.nvis
assert numpy.max(numpy.abs(newvis.vis)) > 0.0
def test_vis_scatter_gather_timeslice(self):
self.actualSetUp()
vis_slices = vis_timeslices(self.vis, 'auto')
vis_list = visibility_scatter_time(self.vis, vis_slices)
newvis = visibility_gather_time(vis_list, self.vis, vis_slices)
assert self.vis.nvis == newvis.nvis
assert numpy.max(numpy.abs(newvis.vis)) > 0.0
def test_vis_scatter_gather_channel(self):
self.actualSetUp()
nchan = len(self.blockvis.frequency)
vis_list = visibility_scatter_channel(self.blockvis)
assert len(vis_list) == nchan
assert vis_list[0].vis.shape[-2] == 1
assert numpy.max(numpy.abs(vis_list[0].vis)) > 0.0
newvis = visibility_gather_channel(vis_list, self.blockvis)
assert len(newvis.frequency) == len(self.blockvis.frequency)
assert self.blockvis.nvis == newvis.nvis
assert numpy.max(numpy.abs(newvis.vis)) > 0.0
def test_vis_scatter_gather_channel_None(self):
self.actualSetUp()
vis_list = visibility_scatter_channel(self.blockvis)
assert len(vis_list) == len(self.blockvis.frequency)
assert vis_list[0].vis.shape[-2] == 1
assert numpy.max( | numpy.abs(vis_list[0].vis) | numpy.abs |
import numpy as np
from sklearn.metrics import accuracy_score
import multiprocessing as mp
import sys, os
sys.path.append(os.getcwd())
try:
from quadboost.weak_learner import _WeakLearnerBase
from quadboost.utils import split_int, timed, ComparableMixin
from quadboost.utils.multiprocessing_utils import PicklableExceptionWrapper, SafeQueue, parallel_processes
except ModuleNotFoundError:
from weak_learner import _WeakLearnerBase
from utils import split_int, timed, ComparableMixin
from utils.multiprocessing_utils import PicklableExceptionWrapper, SafeQueue, parallel_processes
class MulticlassDecisionStump(_WeakLearnerBase):
"""
Decision stump classifier with innate multiclass algorithm.
It finds a stump to partition examples into 2 parts which minimizes the quadratic multiclass risk.
It assigns a confidence rates (scalar) for each class for each partition.
Parallelization is implemented for the 'fit' method.
"""
def fit(self, X, Y, W=None, n_jobs=1, sorted_X=None, sorted_X_idx=None):
"""
Fits the model by finding the best decision stump using the algorithm implemented in the StumpFinder class.
Args:
X (Array of shape (n_examples, ...)): Examples
Y (Array of shape (n_examples,) or (n_examples, n_classes)): Labels for the examples. If an encoder was provided at construction, Y should be a vector to be encoded.
W (Array of shape (n_examples, n_classes)): Weights of each examples according to their class. Should be None if Y is not encoded.
n_jobs (int, optional, default=1): Number of processes to execute in parallel to find the stump.
sorted_X (Array of shape (n_examples, ...), optional, default=None): Sorted examples along axis 0. If None, 'X' will be sorted, else it will not.
sorted_X_idx (Array of shape (n_examples, ...), optional, default=None): Indices of the sorted examples along axis 0 (corresponds to argsort). If None, 'X' will be argsorted, else it will not.
Returns self
"""
if self.encoder is not None:
Y, W = self.encoder.encode_labels(Y)
if sorted_X is None or sorted_X_idx is None:
sorted_X, sorted_X_idx = self.sort_data(X)
stump = self.find_stump(sorted_X, sorted_X_idx, Y, W, n_jobs)
for attr in ['feature', 'confidence_rates', 'stump', 'stump_idx', 'risks', 'risk']:
setattr(self, attr, getattr(stump, attr))
return self
def find_stump(self, sorted_X, sorted_X_idx, Y, W, n_jobs):
stump_finder = StumpFinder(sorted_X, sorted_X_idx, Y, W)
stumps_queue = SafeQueue()
if n_jobs > 1: # Need parallelization
n_features = sorted_X.shape[1]
args_iter = ((stumps_queue, sub_idx) for sub_idx in split_int(n_features, n_jobs))
parallel_processes(stump_finder.safe_find_stump, args_iter)
else: # No parallelization
stump_finder.find_stump(stumps_queue)
return min(stump for stump in stumps_queue)
def predict(self, X):
n_partitions, n_classes = self.confidence_rates.shape
n_examples = X.shape[0]
Y_pred = np.zeros((n_examples, n_classes))
for i, partition in enumerate(self.partition_generator(X)):
Y_pred[i] = self.confidence_rates[partition]
return Y_pred
def partition_generator(self, X):
"""
Partition examples into 2 sets denoted by 0 and 1 in an lazy iterator fashion.
"""
n_examples = X.shape[0]
for x in X.reshape((n_examples, -1)):
yield int(x[self.feature] > self.stump)
def partition(self, X, dtype=bool):
return np.array([p for p in self.partition_generator(X)], dtype=dtype)
@staticmethod
def sort_data(X):
"""
Necessary sorting operations on the data to find the optimal stump. It is useful to sort the data prior to boost to speed up the algorithm, since the sorting step will not be made at each round.
'sorted_X' and 'sorted_X_idx' should be passed as keyword arguments to the 'fit' method to speed up the algorithm.
"""
X = X.reshape((X.shape[0],-1))
n_examples, n_features = X.shape
sorted_X_idx = np.argsort(X, axis=0)
sorted_X = X[sorted_X_idx, range(n_features)]
return sorted_X, sorted_X_idx
class StumpFinder:
"""
Implements the algorithm to find the stump. It is separated from the class MulticlassDecisionStump so that it can be pickled when parallelized with 'multiprocessing' (which uses pickle).
"""
def __init__(self, sorted_X, sorted_X_idx, Y, W):
# multiprocessing Arrays are shared between processed to alleviate pickling
self.sorted_X = np.ctypeslib.as_array(mp.RawArray('d', sorted_X.size)).reshape(sorted_X.shape)
self.sorted_X[:] = sorted_X
self.sorted_X_idx = np.ctypeslib.as_array(mp.RawArray('i', sorted_X_idx.size)).reshape(sorted_X_idx.shape)
self.sorted_X_idx[:] = sorted_X_idx
self.zeroth_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.zeroth_moments[:] = W
self.first_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.first_moments[:] = W*Y
self.second_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.second_moments[:] = self.first_moments*Y
# # multiprocessing Arrays are shared between processed to alleviate pickling
# self.X_shape = sorted_X.shape
# self.X_idx_shape = sorted_X_idx.shape
# self.moments_shape = W.shape
# self.sorted_X = mp.Array('d', sorted_X.reshape(-1))
# self.sorted_X_idx = mp.Array('i', sorted_X_idx.reshape(-1))
# self.zeroth_moments = mp.Array('d', W.reshape(-1))
# self.first_moments = mp.Array('d', (W*Y).reshape(-1))
# self.second_moments = mp.Array('d', (W*Y*Y).reshape(-1))
def safe_find_stump(self, stumps_queue, sub_idx=(None,)):
"""
Handles exception raised in a subprocess so the script will not hang indefinitely.
This is basically a decorator for find_stump, but parallelizing requires pickling, and decorators cannot be pickled.
"""
with stumps_queue: # Context manager handles exceptions
self.find_stump(stumps_queue, sub_idx)
def find_stump(self, stumps_queue, sub_idx=(None,)):
"""
Algorithm to the best stump within the sub array of X specified by the bounds 'sub_idx'.
"""
X = self.sorted_X[:,slice(*sub_idx)]
X_idx = self.sorted_X_idx[:,slice(*sub_idx)]
_, n_classes = self.zeroth_moments.shape
n_examples, n_features = X.shape
n_partitions = 2
n_moments = 3
moments = np.zeros((n_moments, n_partitions, n_features, n_classes))
# At first, all examples are in partition 1
# Moments are not normalized so they can be computed cumulatively
moments[0,1] = np.sum(self.zeroth_moments[X_idx[:,0]], axis=0)
moments[1,1] = np.sum(self.first_moments[X_idx[:,0]], axis=0)
moments[2,1] = np.sum(self.second_moments[X_idx[:,0]], axis=0)
risks = self.compute_risks(moments) # Shape (n_partitions, n_features)
best_stump = Stump(risks, moments)
for i, row in enumerate(X_idx[:-1]):
self.update_moments(moments, row)
possible_stumps = ~np.isclose(X[i+1] - X[i], 0)
if possible_stumps.any():
risk = self.compute_risks(moments[:,:,possible_stumps,:])
best_stump.update(risk, moments, possible_stumps, stump_idx=i+1)
best_stump.compute_stump_value(X)
best_stump.feature += sub_idx[0] if sub_idx[0] is not None else 0
stumps_queue.append(best_stump)
def update_moments(self, moments, row_idx):
moments_update = np.array([self.zeroth_moments[row_idx],
self.first_moments[row_idx],
self.second_moments[row_idx]])
moments[:,0] += moments_update
moments[:,1] -= moments_update
def compute_risks(self, moments):
"""
Computes the risks for each partitions for every features.
"""
moments[ | np.isclose(moments,0) | numpy.isclose |
# Copyright © Simphony Project Contributors
# Licensed under the terms of the MIT License
# (see simphony/__init__.py for details)
import numpy as np
import pytest
from simphony.libraries import siepic
from simphony.simulation import Detector, DifferentialDetector, Laser, Simulation
from simphony.tools import wl2freq
@pytest.fixture
def mzi():
gc_input = siepic.GratingCoupler()
y_splitter = siepic.YBranch()
wg_long = siepic.Waveguide(length=150e-6)
wg_short = siepic.Waveguide(length=50e-6)
y_recombiner = siepic.YBranch()
gc_output = siepic.GratingCoupler()
y_splitter.multiconnect(gc_input, wg_long, wg_short)
y_recombiner.multiconnect(gc_output, wg_short, wg_long)
return (gc_input, gc_output)
@pytest.fixture
def oh():
x1 = siepic.GratingCoupler(name="x1")
s = siepic.GratingCoupler(name="s")
p1 = siepic.GratingCoupler(name="p1")
p2 = siepic.GratingCoupler(name="p2")
lo = siepic.GratingCoupler(name="lo")
x2 = siepic.GratingCoupler(name="x2")
xdc = siepic.BidirectionalCoupler()
lodc = siepic.BidirectionalCoupler()
pdc = siepic.BidirectionalCoupler()
x1_xdc = siepic.Waveguide(length=514e-6)
x2_xdc = siepic.Waveguide(length=514e-6)
s_y = siepic.Waveguide(length=208e-6)
lo_lodc = siepic.Waveguide(length=208e-6)
p1_pdc = siepic.Waveguide(length=81e-6)
p2_pdc = siepic.Waveguide(length=81e-6)
y_xdc = siepic.Waveguide(length=12e-6)
y_pdc = siepic.Waveguide(length=12e-6)
pdc_lodc = siepic.Waveguide(length=12e-6)
xdc_lodc = siepic.Waveguide(length=12e-6)
y = siepic.YBranch()
terminator = siepic.Terminator()
xdc.multiconnect(y_xdc, xdc_lodc, x1_xdc, x2_xdc)
lodc.multiconnect(lo_lodc, terminator, pdc_lodc, xdc_lodc)
pdc.multiconnect(p1_pdc, p2_pdc, y_pdc, pdc_lodc)
y.multiconnect(s_y, y_xdc, y_pdc)
x1.connect(x1_xdc)
s.connect(s_y)
p1.connect(p1_pdc)
p2.connect(p2_pdc)
lo.connect(lo_lodc)
x2.connect(x2_xdc)
return (x1, s, p1, p2, lo, x2)
class TestSimulation:
seed117 = [
0.00017481,
0.01219353,
-0.01773873,
0.02061959,
-0.00290609,
-0.0066712,
0.00846216,
0.00488167,
-0.01002604,
0.00672506,
-0.01299871,
0.0126199,
0.0007396,
0.00115915,
-0.00602,
0.00979,
-0.00520642,
-0.01741927,
-0.0240019,
0.03115938,
-0.00537727,
-0.00066326,
-0.00495342,
0.0002517,
-0.01819794,
-0.00936641,
0.00736962,
-0.01756158,
0.01517604,
0.00298318,
0.00553522,
-0.00281899,
0.01784163,
0.00610215,
-0.00944377,
-0.00967335,
0.03266932,
-0.00754913,
-0.00785714,
0.03044863,
-0.00879942,
0.02543895,
-0.00322589,
-0.00785712,
0.00815186,
-0.01540587,
0.00631346,
0.01470638,
-0.0051735,
0.00150219,
0.01991704,
-0.00193712,
0.01432663,
0.00699449,
0.00281496,
-0.0075551,
0.00341335,
0.01141054,
-0.00696104,
0.00628623,
-0.00156238,
0.00271096,
-0.00631849,
0.00724422,
0.00808875,
0.00742942,
-0.02009245,
0.0071186,
-0.00098557,
-0.01329963,
-0.00692713,
0.01484593,
0.01073398,
0.01623651,
-0.00623136,
-0.01092318,
-0.00766223,
-0.00344117,
0.01897063,
0.01066724,
-0.00842774,
-0.01002413,
0.01600654,
-0.00786538,
0.01610357,
0.01215284,
0.0039726,
0.0194278,
-0.00150813,
-0.00359058,
-0.00125099,
0.01863215,
-0.01533298,
-0.00367189,
0.005698,
-0.00949113,
0.00948224,
-0.00325547,
0.01019897,
0.00419238,
-0.00354101,
]
def test_context(self, mzi):
gc_input, gc_output = mzi
with Simulation() as sim1:
assert sim1.circuit is None
l1 = Laser().connect(gc_input)
d1 = Detector().connect(gc_output)
assert l1.circuit == gc_input.circuit == sim1.circuit
assert d1.circuit == gc_output.circuit == sim1.circuit
with Simulation() as _:
assert sim1.circuit is None
assert l1.circuit != gc_input.circuit
assert d1.circuit != gc_output.circuit
def test_sampling(self, mzi):
gc_input, gc_output = mzi
with Simulation() as sim1:
Laser().connect(gc_input)
Detector().connect(gc_output)
assert len(sim1.sample(100)[0][0]) == 100
assert len(sim1.sample(101)[0][0]) == 101
def test_seed(self, mzi):
gc_input, gc_output = mzi
with Simulation(seed=117) as sim1:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim1.sample(101)
assert np.allclose(data[0][0], self.seed117, rtol=0, atol=1e-8)
with Simulation(seed=118) as sim2:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim2.sample(101)
assert not np.allclose(data[0][0], self.seed117, rtol=0, atol=1e-8)
with Simulation() as sim3:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim3.sample(101)
assert not np.allclose(data[0][0], self.seed117, rtol=0, atol=1e-8)
with Simulation(seed=117) as sim4:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim4.sample(101)
assert np.allclose(data[0][0], self.seed117, rtol=0, atol=1e-8)
def test_sampling_frequency(self, mzi):
gc_input, gc_output = mzi
data1 = None
with Simulation(fs=10e9, seed=117) as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data1 = sim.sample(1001)
data2 = None
with Simulation(fs=10e9, seed=117) as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data2 = sim.sample(1001)
assert np.allclose(data1[0][0], data2[0][0], rtol=0, atol=1e-11)
class TestSingleDetector:
result = 0.00017544
results = [
1.80576404e-04,
1.08063217e-02,
-1.84591717e-02,
2.11631266e-02,
-4.24527434e-03,
-5.53885990e-03,
8.67396297e-03,
5.28644276e-03,
-1.02520694e-02,
8.05882087e-03,
-1.25512983e-02,
1.18939574e-02,
-3.92095769e-06,
3.61245566e-03,
-6.60295137e-03,
9.18355753e-03,
-2.92043587e-03,
-1.80968121e-02,
-2.20941667e-02,
3.09025569e-02,
-5.98374595e-03,
-6.09039074e-05,
-6.12987780e-03,
]
def test_single_sample(self, mzi):
gc_input, gc_output = mzi
with Simulation() as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector().connect(gc_output)
data = sim.sample()
assert np.allclose(data[0][0], [self.result], rtol=0, atol=1e-8)
def test_conversion_gain(self, mzi):
gc_input, gc_output = mzi
with Simulation() as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector(conversion_gain=7).connect(gc_output)
data = sim.sample()
assert np.allclose(data[0][0], [self.result * 7], rtol=0, atol=1e-7)
def test_noise(self, mzi):
gc_input, gc_output = mzi
with Simulation(seed=117) as sim:
Laser(power=1e-3, wl=1550e-9).connect(gc_input)
Detector(noise=1e-3).connect(gc_output)
data = sim.sample(23)
assert np.allclose(data[0][0], self.results, rtol=0, atol=1e-8)
class TestDifferentialDetector:
cmrr_x = [
0.00185109,
0.00438826,
-0.00162186,
0.0004025,
0.00595868,
-0.00064218,
0.00427251,
0.00205762,
0.00079248,
-0.00233544,
0.00097263,
0.00339304,
-0.00215764,
0.00184303,
-0.00052858,
0.00076543,
-0.001965,
0.0021287,
0.00238189,
0.00219444,
-0.00612456,
0.00209419,
-0.00035425,
]
cmrr_p = [
-0.00470416,
0.00939843,
0.00632678,
0.00376477,
-0.00135284,
-0.00634382,
-0.00374078,
0.00145949,
-0.0010054,
0.00687253,
-0.00553449,
0.00346154,
-0.00358327,
-0.00438276,
-0.0039282,
-0.00549966,
0.00577782,
-0.00183013,
-0.00431677,
0.00059047,
0.00173069,
0.00035287,
0.00030604,
]
result = [
6.820933398426216e-05,
-2.51779027237116e-06,
7.072712425663332e-05,
7.528059784829445e-05,
-2.2353588319872576e-06,
7.751595668028171e-05,
]
x1results = [
7.25832807e-05,
6.45213384e-03,
-1.16782238e-02,
1.32502362e-02,
-2.92429196e-03,
-3.29487901e-03,
5.40473883e-03,
3.32631865e-03,
-6.47341674e-03,
5.21927531e-03,
-7.78813016e-03,
7.24665505e-03,
-1.74786835e-04,
2.64408006e-03,
-4.26117438e-03,
5.57803566e-03,
-1.45885813e-03,
-1.14445296e-02,
-1.34812942e-02,
1.91818955e-02,
-3.87934796e-03,
2.70018878e-05,
-4.07081299e-03,
]
xresults = [
0.00185101,
0.00642166,
-0.0046529,
0.00386158,
0.00543731,
-0.00180061,
0.0056746,
0.00285391,
-0.00093357,
-0.00122723,
-0.00125639,
0.00549862,
-0.00206215,
0.00200947,
-0.0015768,
0.00239221,
-0.00287553,
-0.0008483,
-0.00170886,
0.00743684,
-0.00706408,
0.00195226,
-0.00122203,
]
x2results = [
7.51125467e-05,
6.59171046e-03,
-1.18798537e-02,
1.34858866e-02,
-2.95698094e-03,
-3.37021160e-03,
5.50204769e-03,
3.38258002e-03,
-6.58734386e-03,
5.29622841e-03,
-7.93593533e-03,
7.39107533e-03,
-1.65775053e-04,
2.65791212e-03,
-4.32937195e-03,
5.69003709e-03,
-1.51750747e-03,
-1.16429994e-02,
-1.37543005e-02,
1.95380050e-02,
-3.94001069e-03,
1.98397377e-05,
-4.12678903e-03,
]
p1results = [
-0.00012833,
0.00777554,
-0.01088095,
0.01399814,
-0.00186632,
-0.00567741,
0.00499696,
0.00325294,
-0.00684533,
0.00448832,
-0.0088845,
0.00729312,
0.00244817,
-0.00056501,
-0.00356172,
0.00697164,
-0.00434339,
-0.01221006,
-0.0154076,
0.02053649,
-0.00297821,
-0.00016577,
-0.00318841,
]
presults = [
-0.00470424,
0.01131443,
0.00347076,
0.00702412,
-0.00184411,
-0.00743536,
-0.00241965,
0.0022098,
-0.00263178,
0.00791674,
-0.0076348,
0.00544554,
-0.00349329,
-0.00422592,
-0.0049159,
-0.00396682,
0.00491986,
-0.00463523,
-0.00817131,
0.00553016,
0.00084541,
0.00021914,
-0.00051163,
]
p2results = [
-0.00012596,
0.00789399,
-0.01105161,
0.01419768,
-0.00189359,
-0.00574133,
0.0050792,
0.00330085,
-0.00694156,
0.00455373,
-0.00900964,
0.00741552,
0.00245592,
-0.00055348,
-0.00361925,
0.00706657,
-0.00439297,
-0.0123776,
-0.01563877,
0.02083776,
-0.00302955,
-0.00017171,
-0.00323547,
]
def test_single_sample(self, oh):
x1, s, p1, p2, lo, x2 = oh
with Simulation() as sim:
Laser(power=1e-3, wl=1550e-9).connect(lo)
DifferentialDetector().multiconnect(x1, x2)
DifferentialDetector().multiconnect(p1, p2)
x1, x, x2, p1, p, p2 = sim.sample()
assert np.allclose(
[
x1[0][0][0],
x[0][0][0],
x2[0][0][0],
p1[0][0][0],
p[0][0][0],
p2[0][0][0],
],
self.result,
)
def test_conversion_gain(self, oh):
x1, s, p1, p2, lo, x2 = oh
with Simulation() as sim:
Laser(power=1e-3, wl=1550e-9).connect(lo)
DifferentialDetector(
monitor_conversion_gain=7, rf_conversion_gain=7
).multiconnect(x1, x2)
DifferentialDetector(
monitor_conversion_gain=7, rf_conversion_gain=7
).multiconnect(p1, p2)
x1, x, x2, p1, p, p2 = sim.sample()
assert np.allclose(
[
x1[0][0][0],
x[0][0][0],
x2[0][0][0],
p1[0][0][0],
p[0][0][0],
p2[0][0][0],
],
np.array(self.result) * 7,
)
def test_noise(self, oh):
x1, s, p1, p2, lo, x2 = oh
with Simulation(seed=117) as sim:
Laser(power=1e-3, wl=1550e-9).connect(lo)
DifferentialDetector(monitor_noise=800e-6, rf_noise=4e-3).multiconnect(
x1, x2
)
DifferentialDetector(monitor_noise=800e-6, rf_noise=4e-3).multiconnect(
p1, p2
)
x1, x, x2, p1, p, p2 = sim.sample(23)
assert np.allclose(x1[0][0], self.x1results, rtol=0, atol=1e-7)
assert np.allclose(x[0][0], self.xresults, rtol=0, atol=1e-7)
assert np.allclose(x2[0][0], self.x2results, rtol=0, atol=1e-7)
assert np.allclose(p1[0][0], self.p1results, rtol=0, atol=1e-7)
assert | np.allclose(p[0][0], self.presults, rtol=0, atol=1e-7) | numpy.allclose |
import numpy as np
import pandas as pd
import pytest
from ..viewers import spectrum
from scipy.spatial import distance
from ..cooking_machine.models.base_model import BaseModel
# to run all test
@pytest.fixture(scope="function")
def experiment_enviroment(request):
"""
Creates environment for experiment.
"""
problem_size_x1 = 10
problem_size_y = 2
matrix_left = np.random.rand(problem_size_x1, problem_size_y)
matrix = distance.squareform(distance.pdist(matrix_left, 'jensenshannon'))
np.fill_diagonal(matrix, 10 * np.max(matrix))
return matrix
def test_triplet_generator():
""" """
left_answer = list(spectrum.generate_all_segments(6))
right_answer = [[0, 2, 4]]
np.testing.assert_array_equal(left_answer, right_answer)
def test_random_generator_len():
""" """
left_answer = len(spectrum.generate_index_candidates(10))
right_answer = 3
assert left_answer == right_answer
def test_random_generator_sort():
""" """
left_answer = spectrum.generate_index_candidates(10)
assert np.all(np.diff(left_answer) > 0)
def test_swap_all_unique(experiment_enviroment):
"""
Checks if swap works.
"""
matrix = experiment_enviroment
init = list(np.append(np.arange(10), [0]))
seq = [0, 4, 8]
tour = spectrum.make_three_opt_swap(init, matrix, seq)[0]
assert set(range(10)) == set(tour)
def test_swap_same_len(experiment_enviroment):
""" """
matrix = experiment_enviroment
init = list(np.append(np.arange(10), [0]))
seq = [0, 4, 8]
tour = spectrum.make_three_opt_swap(init, matrix, seq)[0]
assert len(init) == len(tour)
def test_solve_tsp():
""" """
matrix = np.array([
[0.0, 0.0],
[0.0, 1],
[0.0, -1],
[5, 0.0],
[-5, 0.0],
[0.5, 0.5],
])
distance_m = distance.squareform(distance.pdist(matrix, 'euclidean'))
np.fill_diagonal(distance_m, 10 * np.max(distance_m))
init = list(np.append( | np.arange(6) | numpy.arange |
import unittest
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from clusterz.algs.kzmeans import (
DistributedKZMeans, BELDistributedKMeans, k_means_my, kz_means, KZMeans, KMeansWrapped
)
class MyTestCase(unittest.TestCase):
def setUp(self):
cluster = np.random.uniform(-1, 1, size=(40, 2))
self.centers_ = np.array([
[0, 30], [0, -30]
])
self.outliers_ = np.array([
[80, 0], [-80, 0]
])
# data set on a single machine
self.X_without_outliers_ = np.vstack(
[self.centers_,
# clusters
cluster + self.centers_[0] + np.array([5, 0]),
cluster + self.centers_[0] + np.array([-5, 0]),
cluster + self.centers_[1] + np.array([5, 0]),
cluster + self.centers_[1] + np.array([-5, 0])])
self.X_with_outliers_ = np.vstack(
[self.centers_,
self.outliers_,
# clusters
cluster + self.centers_[0] + | np.array([5, 0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt; plt.ioff()
import copy
from .class_utils import *
from .utils import *
from astropy.cosmology import Planck15
import astropy.constants as co
c = co.c.value # speed of light, in m/s
G = co.G.value # gravitational constant in SI units
Msun = co.M_sun.value # solar mass, in kg
Mpc = 1e6*co.pc.value # 1 Mpc, in m
arcsec2rad = np.pi/(180.*3600.)
rad2arcsec =3600.*180./np.pi
deg2rad = np.pi/180.
rad2deg = 180./np.pi
__all__ = ['LensRayTrace','GenerateLensingGrid','thetaE','get_caustics','CausticsSIE']
def LensRayTrace(xim,yim,lens,Dd,Ds,Dds):
"""
Wrapper to pass off lensing calculations to any number of functions
defined below, accumulating lensing offsets from multiple lenses
and shear as we go.
"""
# Ensure lens is a list, for convenience
lens = list(np.array([lens]).flatten())
ximage = xim.copy()
yimage = yim.copy()
for i,ilens in enumerate(lens):
if ilens.__class__.__name__ == 'SIELens': ilens.deflect(xim,yim,Dd,Ds,Dds)
elif ilens.__class__.__name__ == 'ExternalShear': ilens.deflect(xim,yim,lens[0])
ximage += ilens.deflected_x; yimage += ilens.deflected_y
return ximage,yimage
def GenerateLensingGrid(data=None,xmax=None,emissionbox=[-5,5,-5,5],fieldres=None,emitres=None):
"""
Routine to generate two grids for lensing. The first will be a lower-resolution
grid with resolution determined by fieldres and size determined
by xmax. The second is a much higher resolution grid which will be used for
the lensing itself, with resolution determined by emitres and size
determined from emissionbox - i.e., emissionbox should contain the coordinates
which conservatively encompass the real emission, so we only have to lens that part
of the field at high resolution.
Since we're going to be FFT'ing with these coordinates, the resolution isn't
directly set-able. For the low-res full-field map, it instead is set to the next-higher
power of 2 from what would be expected from having ~4 resolution elements across
the synthesized beam.
Inputs:
data:
A Visdata object, used to determine the resolutions of
the two grids (based on the image size or maximum uvdistance in the dataset)
xmax:
Field size for the low-resolution grid in arcsec, which will extend from
(-xmax,-xmax) to (+xmax,+xmax), e.g. (-30,-30) to (+30,+30)arcsec. Should be
at least a bit bigger than the primary beam. Not needed for images.
emissionbox:
A 1x4 list of [xmin,xmax,ymin,ymax] defining a box (in arcsec) which contains
the source emission. Coordinates should be given in arcsec relative to the
pointing/image center.
fieldres,emitres:
Resolutions of the coarse, full-field and fine (lensed) field, in arcsec.
If not given, suitable values will be calculated from the visibilities.
fieldres is unnecessary for images.
Returns:
If there are any Visdata objects in the datasets, returns:
xmapfield,ymapfield:
2xN matrices containing x and y coordinates for the full-field, lower-resolution
grid, in arcsec.
xmapemission,ymapemission:
2xN matrices containing x and y coordinates for the smaller, very high resolution
grid, in arcsec.
indices:
A [4x1] array containing the indices of xmapfield,ymapfield which overlap with
the high resolution grid.
"""
# Factors higher-resolution than (1/2*max(uvdist)) to make the field and emission grids
Nover_field = 4.
Nover_emission = 8.
# Allow multiple visdata objects to be passed, pick the highest resolution point of all
uvmax = 0.
try:
for vis in data:
uvmax = max(uvmax,vis.uvdist.max())
except TypeError:
uvmax = data.uvdist.max()
# Calculate resolutions of the grids
if fieldres is None: fieldres = (2*Nover_field*uvmax)**-1.
else: fieldres *= arcsec2rad
if emitres is None: emitres = (2*Nover_emission*uvmax)**-1.
else: emitres *= arcsec2rad
# Calculate the field grid size as a power of 2.
Nfield = 2**np.ceil(np.log2(2*np.abs(xmax)*arcsec2rad/fieldres))
# Calculate the grid coordinates for the larger field.
fieldcoords = np.linspace(-np.abs(xmax),np.abs(xmax),int(Nfield))
xmapfield,ymapfield = np.meshgrid(fieldcoords,fieldcoords)
# Calculate the indices where the high-resolution lensing grid meets the larger field grid
indices = np.round(np.interp(np.asarray(emissionbox),fieldcoords,np.arange(Nfield)))
indices = [int(ind) for ind in indices] # cast to int's for newer numpy versions
# Calculate the grid coordinates for the high-res lensing grid; grids meet at indices. Some pixel-shifting reqd.
Nemx = 1 + np.abs(indices[1]-indices[0])*np.ceil((fieldcoords[1]-fieldcoords[0])/(2*emitres*rad2arcsec))
Nemy = 1 + np.abs(indices[3]-indices[2])*np.ceil((fieldcoords[1]-fieldcoords[0])/(2*emitres*rad2arcsec))
xemcoords = np.linspace(fieldcoords[indices[0]],fieldcoords[indices[1]],int(Nemx))
yemcoords = np.linspace(fieldcoords[indices[2]],fieldcoords[indices[3]],int(Nemy))
xmapemission,ymapemission = np.meshgrid(xemcoords,yemcoords)
xmapemission -= (xmapemission[0,1]-xmapemission[0,0])
ymapemission -= abs((ymapemission[1,0]-ymapemission[0,0]))
return xmapfield,ymapfield,xmapemission,ymapemission,indices
def thetaE(ML,zL,zS,cosmo=Planck15):
"""
Calculate the Einstein radius in arcsec of a lens of mass ML,
assuming redshifts zL and zS. If cosmo is None, Planck15
is assumed. ML is in solar masses.
"""
Dd = cosmo.angular_diameter_distance(zL).value # in Mpc
Ds = cosmo.angular_diameter_distance(zS).value
Dds= cosmo.angular_diameter_distance_z1z2(zL,zS).value
thE = np.sqrt((4*G*ML*Msun*Dds) / (c**2 * Dd*Ds*Mpc)) * rad2arcsec
return thE
def get_caustics(lens,Dd,Ds,Dds,highresbox=[-2.,2.,-2.,2.],numres=0.01):
"""
Routine to calculate the locations for the lensing caustics.
If lens is either a single SIELens or a [SIELens,ExternalShear],
we calculate the caustics analytically, otherwise it has to be
a numerical calculation.
Inputs:
lens:
Either a single SIELens object, or a list of lens/shear
objects. If a single SIELens or one lens and shear, we
calculate the caustic locations analytically. If there's
more than one lens, we calculate the caustics numerically.
In this case you may want to play with the highresbox
and numres parameters, which affect how precisely the
caustics are calculated.
Dd,Ds,Dds:
Angular diameter distances to the lens, source, and lens-source,
respectively.
highresbox:
List of four coordinates, [xmin, xmax, ymin, ymax], within
which the caustics lie. You want this box to fully contain
the caustics. A good rule of thumb would be to pad the
positions of your lenses with the Einstein radius of the most
massive lens.
numres:
Resolution (in arcsec) of the highresbox above. A smaller
number here will make the caustics look nicer, because there
are more points to connect to make the caustics. This matters
most for the outer (ellipse-shaped, usually) caustic.
Returns:
2xN list:
Arrays containing the x and y coordinates for the caustics that
exist, with x and y in arcsec under the same convention as the
rest of the code (+y = north, +x = east). You can plot them with,
e.g.,
Standard matplotlib axis object:
for caustic in caustics:
ax.plot(caustic[:,0],caustic[:,1],ls='-',marker='',lw=1)
aplpy FITSFigure for fits image plotting:
ax = aplpy.FITSFigure('myimage.fits')
myfitshead = astropy.fits.open('myimage.fits')[0].header
ref_ra, ref_dec = myfitshead['CRVAL1'], myfitshead['CRVAL2']
for i in range(caustics.shape[0]):
ax.show_lines([np.vstack([caustics[:,0]+ref_ra,caustics[:,1]+ref_dec])],color='k',lw=1)
"""
# Figure out if we can do this analytically
lens = list(np.array([lens]).flatten())
for l in lens: l._altered = True
lens = [copy.deepcopy(l) for l in lens]
whichlens = [isinstance(l,SIELens) for l in lens]
whichshear = [isinstance(l,ExternalShear) for l in lens]
if sum(whichlens) == 1:
if sum(whichshear) == 1:
return CausticsSIE(lens[0],Dd,Ds,Dds,lens[1])
else:
return CausticsSIE(lens[0],Dd,Ds,Dds,Shear=None)
else: # we calculate them numerically
# first try to figure out where the caustics are going to be
# based on position & einstein radii
cximage = np.arange(highresbox[0],highresbox[1],numres)
cyimage = np.arange(highresbox[2],highresbox[3],numres)
cximage, cyimage = np.meshgrid(cximage,cyimage)
xsource,ysource = LensRayTrace(cximage,cyimage,lens,Dd,Ds,Dds)
jxy, jxx = np.gradient(xsource); jyy, jyx = np.gradient(ysource)
A = jxx*jyy - jxy*jyx
# it's pretty dumb that we have to do this...
tmpfig = plt.figure(); dummyax = tmpfig.add_subplot(111)
cset = dummyax.contour(xsource,ysource,A,levels=[0.])
plt.close(tmpfig)
contours = cset.collections[0].get_paths()
caustics = []
for contour in contours:
xcon,ycon = contour.vertices[:,0], contour.vertices[:,1]
caustic = np.vstack([xcon,ycon]).T
caustics.append(caustic)
for l in lens: l._altered = True
return caustics
def CausticsSIE(SIELens,Dd,Ds,Dds,Shear=None):
"""
Routine to calculate and return the analytical solutions for the caustics
of an SIE Lens, following Kormann+94.
Inputs:
SIELens:
An SIELens object for which to calculate the caustics.
Dd,Ds,Dds:
Angular diameter distances to the lens, source, and lens-source, respectively.
Shear:
An ExternalShear object describing the shear of the lens.
Returns:
2xN list:
Arrays containing the x and y coordinates for the caustics that exist (i.e.,
will have [[xr,yr]] for the radial caustic only if lens ellipticity==0, otherwise
will have [[xr,yr],[xt,yt]] for radial+diamond caustics)
"""
# Following Kormann+ 1994 for the lensing. Easier to work with axis ratio than ellipticity
f = 1. - SIELens.e['value']
fprime = np.sqrt(1. - f**2.)
# K+94 parameterize lens in terms of LOS velocity dispersion; calculate here in m/s
sigma_lens = ((SIELens.M['value']*Ds*G*Msun*c**2.)/(4.*np.pi**2. * Dd*Dds*Mpc))**(1./4.)
# Einstein radius, for normalizing the size of the caustics, b in notation of Keeton+00
b = 4 * np.pi * (sigma_lens/c)**2. * (Dds/Ds) * rad2arcsec
# Caustics calculated over a full 0,2pi angle range
phi = np.linspace(0,2*np.pi,2000)
# K+94, eq 21c; needed for diamond caustic
Delta = np.sqrt(np.cos(phi)**2. + f**2. * np.sin(phi)**2.)
if ((Shear is None) or (np.isclose(Shear.shear['value'],0.))):
# Need to account for when ellipticity=0, as caustic equations have cancelling infinities
# In that case, Delta==1 and there's only one (radial and circular) caustic
if np.isclose(f,1.):
xr,yr = -b*np.cos(phi)+SIELens.x['value'], -b*np.sin(phi)+SIELens.y['value']
caustic = np.atleast_3d([xr,yr])
return caustic.reshape(caustic.shape[2],caustic.shape[0],caustic.shape[1])
else:
# Calculate the radial caustic coordinates
xr = (b*np.sqrt(f)/fprime)*np.arcsinh(np.cos(phi)*fprime/f)
yr = (-b*np.sqrt(f)/fprime)*np.arcsin(np.sin(phi)*fprime)
# Now rotate & shift the caustic to match the PA & loc of the lens
r,th = cart2pol(xr,yr)
xr,yr = pol2cart(r,th+SIELens.PA['value']*deg2rad)
xr += SIELens.x['value']
yr += SIELens.y['value']
# Calculate the tangential caustic coordinates
xt = b*(((np.sqrt(f)/Delta) * np.cos(phi)) - ((np.sqrt(f)/fprime)*np.arcsinh(fprime/f * | np.cos(phi) | numpy.cos |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 14:26:54 2019
@author: Mnemosyne
Functions to compute the features of the song
"""
import os
import shutil
import glob
import sys
import random
import re
import numpy as np
import scipy as sp
import scipy.io.wavfile as wav
from scipy.fftpack import fft, rfft
from scipy.optimize import curve_fit
import scipy.signal as signal
from scipy.stats.mstats import gmean
from sklearn.cluster import KMeans
from pydub import AudioSegment
from pydub import silence
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.mplot3d import axes3d
import matplotlib.colors as colors
from threading import Thread
import librosa.feature
import librosa.effects
from songbird_data_analysis import Song_functions
# Onset and offset of the syllables: to compute duration of syllables and gaps
def cut(rawsong, sr, threshold, min_syl_dur, min_silent_dur, f_cut_min, f_cut_max):
"""
This function is meant to be used on a single recording, create an external loop
to apply on several recordings (see function distribution).
VARIABLES:
- rawsong: the wav file a song
- sr: sampling rate
OUTPUT:
- onset and offset of each syllable of the song
So for syllable 1 of a song, its onset is onsets[0] and its offset is offsets[0].
To get that segment of the spectrogram, you'd take spect[:,onsets[0]:offsets[0]]
"""
# parameters that might be adjusted dependending on the bird
rawsong = rawsong.astype(float)
rawsong = rawsong.flatten()
amp = Song_functions.smooth_data(rawsong,sr,freq_cutoffs=(f_cut_min, f_cut_max))
(onsets, offsets) = Song_functions.segment_song(amp,segment_params={'threshold': threshold, 'min_syl_dur': min_syl_dur, 'min_silent_dur': min_silent_dur},samp_freq=sr) # Detects syllables according to the threshold you set
return amp, onsets, offsets
def test_features(songfile, args):
"""
A function to tune the parameter depending on the dataset and test the feature extraction
INPUT:
One recording.
OUTPUT
- plot of the spectrogram, onset & offset and amplitude of the selected syllables
- plot the pitches
- plot the coupling of the features two by two
"""
# read the data
sr, samples = wav.read(songfile[0])
y, sr = librosa.load(songfile[0], sr=16000)
# determine onset and offset of the syllables for this song
amp, onsets, offsets = cut(samples, sr, args.threshold, args.min_syl_dur, args.min_silent_dur, args.f_cut_min, args.f_cut_max)
# Make output directory
aux_output_dir = os.path.join(args.data_dir,args.output_dir)
if not os.path.isdir(aux_output_dir):
os.makedirs(aux_output_dir)
os.chdir(aux_output_dir)
# Spectrogram with librosa
X = librosa.stft(y, n_fft=args.N, hop_length=args.H, win_length=args.N, window='hann', pad_mode='constant', center=True)
Y = np.log(1 + 100 * np.abs(X) ** 2)
T_coef = np.arange(X.shape[1]) * args.H / sr
K = args.N // 2
F_coef = np.arange(K + 1) * sr / args.N
# Plot
noverlap = args.nperseg - args.overlap
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)
# Plots spectrogram
#(f,t,spect)=sp.signal.spectrogram(samples, sr, args.window, args.nperseg, noverlap, mode='complex')
#ax1.imshow(10*np.log10(np.square(abs(spect))), origin="lower", aspect="auto", interpolation="none", extent=[0, max(t)*1000, min(f), max(f)], cmap = 'inferno')
extent = [T_coef[0], T_coef[-1], F_coef[0], 8000]
ax1.imshow(Y, cmap=args.color, aspect='auto', origin='lower', extent=extent, norm=colors.PowerNorm(gamma=0.2))
ax1.set_ylabel('Frequency (Hz)')
# Plots song signal amplitude
x_amp=np.arange(len(amp))
ax2.plot(x_amp/sr*1000,samples,color='grey')
for i in range(0,len(onsets)):
ax2.axvline(x=onsets[i]/sr*1000,color='olivedrab',linestyle='dashed')
ax2.axvline(x=offsets[i]/sr*1000,color='darkslategrey',linestyle='dashed')
ax2.set_ylabel('Amplitude (V)')
# Plot smoothed amplitude of the song as per spectrogram index
ax3.plot(x_amp/sr*1000, amp,color='grey')
for i in range(0,len(onsets)):
ax3.axvline(x=onsets[i]/sr*1000,color='olivedrab',linestyle='dashed')
ax3.axvline(x=offsets[i]/sr*1000,color='darkslategrey',linestyle='dashed')
ax3.axhline(y=args.threshold,color='black',label='Threshold')
ax3.legend()
ax3.set_ylabel('Amplitude (V)')
ax3.set_xlabel('Time (ms)')
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.spines['bottom'].set_visible(False)
ax1.tick_params(axis='x', labelbottom=False, bottom=False)
ax2.tick_params(axis='x', labelbottom=False, bottom=False)
ax3.tick_params(axis='x', labelbottom=True, bottom=True)
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'Test_selection_0' + '.' + args.format)
# Duration, spectral flatness, mean pitch
dur_syll = np.zeros((np.size(onsets),))
dur_gap = np.zeros((np.size(onsets),))
wiener = np.zeros((np.size(onsets),))
mean_pitch = np.zeros((np.size(onsets),))
max_pitch = np.zeros((np.size(onsets),))
min_pitch = np.zeros((np.size(onsets),))
direction_pitch = np.zeros((np.size(onsets),))
for j in range(0,np.size(onsets)):
# Syllable duration
dur_syll[j] = offsets[j] - onsets[j]
if j<(np.size(onsets)-1):
dur_gap[j] = onsets[j+1] - offsets[j]
# Spectral flatness/wiener entropy
wiener[j] = np.mean(librosa.feature.spectral_flatness(samples[onsets[j]:offsets[j]].astype(np.float)))
# Pitch detection, max and min frequency
pitches, magnitudes = librosa.core.piptrack(samples[onsets[j]:offsets[j]].astype(np.float), sr=sr, n_fft=256, fmin=1000, fmax=8000) #, win_length=100)
pitches_all = 0
for interval in range(0,magnitudes.shape[1]):
index = magnitudes[:,interval].argmax()
pitches_all = np.append(pitches_all,pitches[index,interval])
pitches_all = pitches_all[np.nonzero(pitches_all)]
mean_pitch[j] = np.mean(pitches_all)
max_pitch[j] = np.max(pitches_all)
min_pitch[j] = np.min(pitches_all)
if pitches_all[0]<pitches_all[-1]:
direction_pitch[j] = 1
else:
direction_pitch[j] = -1
np.save('pitches_syll_' + str(j) + '.npy', pitches_all)
# Plot all the pitches
colors_list = list(colors._colors_full_map.values())[0::5]
fig, ax = plt.subplots()
#(f, t, spect) = sp.signal.spectrogram(samples, sr, args.window, args.nperseg, noverlap, mode='complex')
#ax.imshow(10 * np.log10(np.square(abs(spect))), origin="lower", aspect="auto", interpolation="none", extent=[0, max(t) * 1000, min(f), max(f)], cmap='inferno')
extent = [T_coef[0], T_coef[-1], F_coef[0], 8000]
ax.imshow(Y, cmap=args.color, aspect='auto', origin='lower', extent=extent, norm=colors.PowerNorm(gamma=0.2))
for j in range(0, np.size(onsets)):
pitches_all= np.load('pitches_syll_' + str(j) + '.npy')
x = np.linspace(onsets[j]/sr*1000, offsets[j]/sr*1000 - 25, np.size(pitches_all))
ax.plot(x, pitches_all, 'o', c=colors_list[j])
plt.ylabel('Frequency(Hz)')
plt.xlabel('Time(ms)')
plt.title('Pitch')
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'syll_pitch.' + args.format)
# Mean pitch over the interval of computation
x_axis_pitches = np.zeros((np.size(samples),))
for j in range(0, np.size(onsets)):
x_axis_pitches[int(np.mean([onsets[j], offsets[j]]) / sr * 1000)] = mean_pitch[j]
x_axis_pitches_max = np.zeros((np.size(samples),))
for j in range(0, np.size(onsets)):
x_axis_pitches_max[int(np.mean([onsets[j], offsets[j]]) / sr * 1000)] = max_pitch[j]
x_axis_pitches_min = np.zeros((np.size(samples),))
for j in range(0, np.size(onsets)):
x_axis_pitches_min[int(np.mean([onsets[j], offsets[j]]) / sr * 1000)] = min_pitch[j]
# Plot the mean, min and max pitches
fig, ax = plt.subplots()
#ax.imshow(10 * np.log10(np.square(abs(spect))), origin="lower", aspect="auto", interpolation="none", extent=[0, max(t) * np.size(samples), min(f), max(f)], cmap='inferno')
extent = [T_coef[0], T_coef[-1], F_coef[0], 8000]
ax.imshow(Y, cmap=args.color, aspect='auto', origin='lower', extent=extent, norm=colors.PowerNorm(gamma=0.2))
ax.plot(x_axis_pitches, color='black', linewidth=0, markersize=4, marker='X', label='mean')
ax.plot(x_axis_pitches_max, color='red', linewidth=0, markersize=4, marker='*', label='max')
ax.plot(x_axis_pitches_min, color='red', linewidth=0, markersize=4, marker='*', label='min')
ax.legend()
ax.set_xlim([0, T_coef[-1]])
plt.ylabel('Frequency(Hz)')
plt.xlabel('Time(ms)')
plt.title('Pitch')
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'Pitch.' + args.format)
# Cumulative plot
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(15,9))
# Plot the spectrogram
#ax3.imshow(10*np.log10(np.square(abs(spect))), origin="lower", aspect="auto", interpolation="none", extent=[0, max(t)*1000, min(f), max(f)], cmap = 'inferno')
ax1.imshow(Y, cmap=args.color, aspect='auto', origin='lower', extent=extent, norm=colors.PowerNorm(gamma=0.2))
ax1.set_ylabel('Frequency(Hz)', fontsize=15)
ax1.set_xticks([])
ax1.title.set_text('Spectrogram')
# Plots song signal amplitude
x_amp = np.arange(len(amp))
ax2.plot(x_amp / sr * 1000, amp, color='grey', label='_Hidden label')
for i in range(0, len(onsets)-2):
ax2.axvline(x=onsets[i] / sr * 1000, color='olivedrab', linestyle='dashed')
ax2.axvline(x=offsets[i] / sr * 1000, color='darkslategrey', linestyle='dashed')
ax2.axvline(x=onsets[len(onsets)-1] / sr * 1000, color='olivedrab', linestyle='dashed', label='Onset')
ax2.axvline(x=offsets[len(onsets)-1] / sr * 1000, color='darkslategrey', linestyle='dashed', label='Offset')
ax2.legend()
ax2.set_ylabel('Amplitude (V)', fontsize=15)
ax2.title.set_text('Selection')
plt.xlabel('Time(ms)', fontsize=15)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'Summary.' + args.format)
# Cumulative plot 2
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(15, 15))
# Plots song signal amplitude
x_amp = np.arange(len(amp))
ax1.plot(x_amp / sr * 1000, amp, color='grey')
for i in range(0, len(onsets)):
ax1.axvline(x=onsets[i] / sr * 1000, color='olivedrab', linestyle='dashed')
ax1.axvline(x=offsets[i] / sr * 1000, color='darkslategrey', linestyle='dashed')
ax1.set_ylabel('Amplitude (V)')
ax1.title.set_text('Syllable selection')
# Plot all the pitches
#(f, t, spect) = sp.signal.spectrogram(samples, sr, args.window, args.nperseg, noverlap, mode='complex')
#ax2.imshow(10 * np.log10(np.square(abs(spect))), origin="lower", aspect="auto", interpolation="none", extent=[0, max(t) * 1000, min(f), max(f)], cmap='inferno')
extent = [T_coef[0], T_coef[-1], F_coef[0], 8000]
ax2.imshow(Y, cmap=args.color, aspect='auto', origin='lower', extent=extent, norm=colors.PowerNorm(gamma=0.2))
for j in range(0, np.size(onsets)):
pitches_all = np.load('pitches_syll_' + str(j) + '.npy')
x = np.linspace(onsets[j] / sr * 1000, offsets[j] / sr * 1000 - 25, np.size(pitches_all))
ax2.plot(x, pitches_all, 'o', c=colors_list[j])
ax2.set_ylabel('Frequency(Hz)')
ax2.title.set_text('Pitch trajectory')
plt.xlabel('Time(ms)')
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'Summary_solo.' + args.format)
# Clustering the syllables depending on the features
# Duration VS pitch
plt.subplots()
plt.plot(np.round(dur_syll)/16,mean_pitch, '*')
plt.xlabel('Duration(ms)')
plt.ylabel('Pitch(Hz)')
plt.title('Duration VS pitch')
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'durationVSpitch' + args.format)
# Duration VS wiener
plt.subplots()
plt.plot(np.round(dur_syll)/16,wiener, '*')
plt.xlabel('Duration(ms)')
plt.ylabel('Wiener entropy(dB)')
plt.title('Duration VS Wiener entropy')
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'durationVSwiener.' + args.format)
# Wiener VS pitch
plt.subplots()
plt.plot(wiener,mean_pitch, '*')
plt.xlabel('Wiener entropy(dB)')
plt.ylabel('Pitch(Hz)')
plt.title('Wiener entropy VS pitch')
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'wienerVSpitch.' + args.format)
print('Done')
def repertoire(songfile, classes, args):
"""
:param songfile: list of wav files (one per element of the repertoire)
:param classes: list of the names of each element of the repertoire
:return: a figure with one example per element of the repertoire
"""
samples_repertoire = []
T_coef_all = []
for s in range(0, np.size(songfile)):
y, sr = librosa.load(songfile[s], sr=16000)
# cut the silence
X = librosa.stft(y, n_fft=args.N, hop_length=args.H, win_length=args.N, window='hann', pad_mode='constant', center=True)
Y = np.log(1 + 100 * np.abs(X) ** 2)
T_coef = np.arange(X.shape[1]) * args.H / sr * 1000
K = args.N // 2
F_coef = np.arange(K + 1) * sr / args.N
samples_repertoire.append(Y)
T_coef_all.append(T_coef[-1])
# Plots spectrogram
#plt.style.use('dark_background')
fig, axs = plt.subplots(nrows=4, ncols=4, figsize=(10, 14), sharey=True)
for i in range(0, 4):
for j in range(0,4):
extent = [0, T_coef_all[4*i + j], 0, 8000]
axs[i, j].imshow(samples_repertoire[4*i + j], cmap=args.color, extent = extent, aspect='auto', origin='lower', norm=colors.PowerNorm(gamma=0.2))
axs[i, j].set_title(classes[4*i + j], fontsize=12)
axs[i, j].set_xlim(0, 350)
axs[i, j].spines['top'].set_color('none')
axs[i, j].spines['right'].set_color('none')
axs[0, j].set_xlabel('Time (ms)', fontsize=15)
axs[i, 3].set_ylabel('Frequency (Hz)', fontsize=15)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + 'Repertoire.' + args.format)
print('Done')
def single_syllable_features_from_song(songfile, args):
"""
VARIABLES:
- songfile: list of recordings
OUTPUT:
- .npy file containing the features stored, one single file for the whole directory
"""
# inizialization of variables
dur_syll = [0]
dur_gap = [0]
wiener = [0]
pitch = [0]
for i in range(0,np.size(songfile)):
sr, samples = wav.read(songfile[i])
# determine onset and offset of the syllables for this song
onsets = cut(samples, sr, args.threshold, args.min_syl_dur, args.min_silent_dur, args.f_cut_min, args.f_cut_max)[0]
offsets = cut(samples, sr, args.threshold, args.min_syl_dur, args.min_silent_dur, args.f_cut_min, args.f_cut_max)[1]
dur_syll_aux = np.zeros((np.size(onsets),))
dur_gap_aux = np.zeros((np.size(onsets),))
wiener_aux = np.zeros((np.size(onsets),))
pitch_aux = np.zeros((np.size(onsets),))
for j in range(0,np.size(onsets)):
# syllable duration
dur_syll_aux[j] = offsets[j] - onsets[j]
if j<(np.size(onsets)-1):
dur_gap_aux[j] = onsets[j+1] - offsets[j]
# spectral flatness/wiener entropy
wiener_aux[j] = np.mean(librosa.feature.spectral_flatness(samples[onsets[j]:offsets[j]].astype(np.float)))
# pitch detection
pitches, magnitudes = librosa.core.piptrack(samples[onsets[j]:offsets[j]].astype(np.float), sr=sr, nfft=100, fmin=500, fmax=8000)
pitches_all = 0
for interval in range(0,magnitudes.shape[1]):
index = magnitudes[:,interval].argmax()
pitches_all = np.append(pitches_all,pitches[index,interval])
pitch_aux[j] = np.mean(pitches_all[1::])
dur_syll = np.append(dur_syll, dur_syll_aux) #collect syllable duration for all the i-th recordings
dur_gap = np.append(dur_gap, dur_gap_aux) #collect gap duration for all the i-th recordings
wiener = np.append(wiener, wiener_aux) #collect wiener entropy value for all the i-th recordings
pitch = np.append(pitch, pitch_aux) #collect pitch for all the i-th recordings
# save the data
data = {'File_name': songfile[0::], 'How_many': | np.size(songfile) | numpy.size |
import numpy as np
from sco_py.expr import BoundExpr, QuadExpr, AffExpr
from pma import backtrack_ll_solver_gurobi as backtrack_ll_solver
from pma import backtrack_ll_solver_OSQP as backtrack_ll_solver_OSQP
from core.util_classes.namo_grip_predicates import (
RETREAT_DIST,
dsafe,
opposite_angle,
gripdist,
ColObjPred,
BoxObjPred,
)
class NAMOSolverGurobi(backtrack_ll_solver.BacktrackLLSolverGurobi):
def get_resample_param(self, a):
return a.params[0] # Experiment with avoiding robot pose symbols
if a.name == "moveto":
## find possible values for the final pose
rs_param = None # a.params[2]
elif a.name == "movetoholding":
## find possible values for the final pose
rs_param = None # a.params[2]
elif a.name.find("grasp") >= 0:
## sample the grasp/grasp_pose
rs_param = a.params[4]
elif a.name == "putdown":
## sample the end pose
rs_param = None # a.params[4]
elif a.name.find("place") >= 0:
rs_param = a.params[2]
elif a.name.find("moveto") >= 0:
rs_param = a.params[4]
# rs_param = None
elif a.name.find("place_at") >= 0:
# rs_param = None
rs_param = a.params[2]
elif a.name == "short_grasp":
rs_param = a.params[4]
# rs_param = None
elif a.name == "short_movetograsp":
rs_param = a.params[4]
# rs_param = None
elif a.name == "short_place_at":
# rs_param = None
rs_param = a.params[2]
else:
raise NotImplementedError
return rs_param
def freeze_rs_param(self, act):
return False
def obj_pose_suggester(self, plan, anum, resample_size=1, st=0):
robot_pose = []
assert anum + 1 <= len(plan.actions)
if anum + 1 < len(plan.actions):
act, next_act = plan.actions[anum], plan.actions[anum + 1]
else:
act, next_act = plan.actions[anum], None
robot = plan.params["pr2"]
robot_body = robot.openrave_body
start_ts, end_ts = act.active_timesteps
start_ts = max(st, start_ts)
old_pose = robot.pose[:, start_ts].reshape((2, 1))
robot_body.set_pose(old_pose[:, 0])
oldx, oldy = old_pose.flatten()
old_rot = robot.theta[0, start_ts]
for i in range(resample_size):
if next_act != None and (
next_act.name == "grasp" or next_act.name == "putdown"
):
target = next_act.params[2]
target_pos = target.value - [[0], [0.0]]
robot_pose.append(
{
"value": target_pos,
"gripper": np.array([[-1.0]])
if next_act.name == "putdown"
else np.array([[1.0]]),
}
)
elif (
act.name == "moveto"
or act.name == "new_quick_movetograsp"
or act.name == "quick_moveto"
):
target = act.params[2]
grasp = act.params[5]
target_rot = -np.arctan2(target.value[0,0] - oldx, target.value[1,0] - oldy)
if target.value[1] > 1.7:
target_rot = max(min(target_rot, np.pi/4), -np.pi/4)
elif target.value[1] < -7.7 and np.abs(target_rot) < 3*np.pi/4:
target_rot = np.sign(target_rot) * 3*np.pi/4
while target_rot < old_rot:
target_rot += 2 * np.pi
while target_rot > old_rot:
target_rot -= 2*np.pi
if np.abs(target_rot-old_rot) > np.abs(target_rot-old_rot+2*np.pi): target_rot += 2*np.pi
dist = gripdist + dsafe
target_pos = target.value - [[-dist*np.sin(target_rot)], [dist*np.cos(target_rot)]]
robot_pose.append({'pose': target_pos, 'gripper': np.array([[0.1]]), 'theta': np.array([[target_rot]])})
# robot_pose.append({'pose': target_pos + grasp.value, 'gripper': np.array([[-1.]])})
elif act.name == "transfer" or act.name == "new_quick_place_at":
target = act.params[4]
grasp = act.params[5]
target_rot = -np.arctan2(target.value[0,0] - oldx, target.value[1,0] - oldy)
if target.value[1] > 1.7:
target_rot = max(min(target_rot, np.pi/4), -np.pi/4)
elif target.value[1] < -7.7 and | np.abs(target_rot) | numpy.abs |
#!/usr/bin/env python
# coding: utf-8
from LV_model import loguniform_prior, simulator, lotka_volterra, newData
from bnn_model import LVClassifier, sample_local, sample_local_2D, inBin
import numpy as np
import pandas as pd
import time
def run_bnn(max_rounds=10,max_gen=10,Ndata=1000,seed=0, multi_dim=False, num_bins=10, thresh=0.0, verbose=False):
"""
Run the BNN for multiple rounds and multiple generations
*param*
max_rounds: the number of rounds to run, i.e., new seeds.
max_gen: the number of sequential, adaptive, generations.
Ndata: max number of model simulations per generation.
seed: random number seed
multi_dim: solve marginal or cross-terms
num_bins: binning of data
thresh: cut-of when resampling.
"""
np.random.seed(seed)
use_small = True # use smaller network arch.
samplePrior = loguniform_prior
target_ts = np.load('target_ts.npy')
res_per_round = {'theta': [], 'theta_corrected': [], 'time': []}
for round_ in range(max_rounds):
print(f'round {round_+1} out of {max_rounds}')
theta = []
theta_corrected = []
theta.append(samplePrior(Ndata, True))
time_ticks = []
for i in range(max_gen):
print(f'gen {i+1} out of {max_gen}')
time_begin = time.time()
# of the previous gen dataset, which ones can we re-use? Goal is to maximize
# the number of datapoints available.
if i > 0:
data_ts_, data_thetas_ = inBin(data_ts, data_thetas, theta[i])
# generate new data
data_ts, data_thetas = newData(theta[i], toexp=True)
if i > 0:
data_ts = np.append(data_ts, data_ts_, axis=0)
data_thetas = np.append(data_thetas, data_thetas_, axis=0)
# saving not only the full parameter arrays THETA but also the ones that are
# removed because of timeout signal.
theta_corrected.append(data_thetas)
# Classify new data
lv_c = LVClassifier(name_id=f'lv_{i}', seed=0)
lv_c.train_thetas = data_thetas
lv_c.train_ts = lv_c.reshape_data(data_ts)
#if multi_dim:
# num_bins = 5
#else:
# num_bins = 10
lv_c.run(target=target_ts, num_bins=num_bins, batch_size=128, split=True, toload=False,
verbose=verbose, use_small=use_small, multi_dim=multi_dim)
# save model for evaluation
#lv_c.model1.save(f'lv_gen{i}_model1')
#lv_c.model2.save(f'lv_gen{i}_model2')
#lv_c.model3.save(f'lv_gen{i}_model3')
# resample
if multi_dim:
new_rate1, new_bins1 = sample_local_2D(lv_c.probs1, lv_c.multidim_bins_rate12, num_samples=Ndata, use_thresh=True, thresh=thresh)
new_rate2, new_bins2 = sample_local_2D(lv_c.probs2, lv_c.multidim_bins_rate13, num_samples=Ndata, use_thresh=True, thresh=thresh)
new_rate3, new_bins3 = sample_local_2D(lv_c.probs3, lv_c.multidim_bins_rate23, num_samples=Ndata, use_thresh=True, thresh=thresh)
rate1 = | np.hstack([new_rate1[:,0], new_rate2[:,0]]) | numpy.hstack |
import numpy as np
from scipy import stats
from typing import List, Dict, Any
from .bandits import Bandit
class BanditPolicy(object):
"""
Base Class for Multi-armed Bandit solving Policy
:param bandit: The Bandit to solve
:param requires_init_run: Indicated if initialisation of Q values is required
:type bandit: Bandit type object
"""
def __init__(self, bandit: Bandit, requires_init_run: bool = False):
self._bandit = bandit
self._regret = 0.0
self._action_hist = []
self._regret_hist = []
self._reward_hist = []
self._counts = np.zeros(self._bandit.arms)
self._requires_init_run = requires_init_run
@property
def action_hist(self) -> List[int]:
"""
Get the history of actions taken
:returns: List of actions
:rtype: list
"""
return self._action_hist
@property
def regret_hist(self) -> List[float]:
"""
Get the history of regrets computed for each step
:returns: List of regrets
:rtype: list
"""
return self._regret_hist
@property
def regret(self) -> float:
"""
Get the current regret
:returns: The current regret
:rtype: float
"""
return self._regret
@property
def reward_hist(self) -> List[float]:
"""
Get the history of rewards received for each step
:returns: List of rewards
:rtype: list
"""
return self._reward_hist
@property
def counts(self) -> np.ndarray:
"""
Get the number of times each action has been taken
:returns: Numpy array with count for each action
:rtype: numpy.ndarray
"""
return self._counts
def select_action(self, t: int) -> int:
"""
Select an action
This method needs to be implemented in the specific policy.
:param t: timestep to choose action for
:type t: int
:returns: Selected action
:rtype: int
"""
raise NotImplementedError
def update_params(self, action: int, reward: float) -> None:
"""
Update parmeters for the policy
This method needs to be implemented in the specific policy.
:param action: action taken for the step
:param reward: reward obtained for the step
:type action: int
:type reward: float
"""
raise NotImplementedError
def learn(self, n_timesteps: int = 1000) -> None:
"""
Learn to solve the environment over given number of timesteps
Selects action, takes a step in the bandit and then updates
the parameters according to the reward received. If policy
requires an initial run, it takes each action once before starting
:param n_timesteps: number of steps to learn for
:type: int
"""
if self._requires_init_run:
for action in range(self._bandit.arms):
reward = self._bandit.step(action)
self.update_params(action, reward)
n_timesteps -= self._bandit.arms
for t in range(n_timesteps):
action = self.select_action(t)
reward = self._bandit.step(action)
self.update_params(action, reward)
class EpsGreedyPolicy(BanditPolicy):
"""
Multi-Armed Bandit Solver with Epsilon Greedy Action Selection Strategy.
Refer to Section 2.3 of Reinforcement Learning: An Introduction.
:param bandit: The Bandit to solve
:param eps: Probability with which a random action is to be selected.
:type bandit: Bandit type object
:type eps: float
"""
def __init__(self, bandit: Bandit, eps: float = 0.05):
super(EpsGreedyPolicy, self).__init__(bandit)
self._eps = eps
self._Q = np.zeros(bandit.arms)
@property
def eps(self) -> float:
"""
Get the asscoiated epsilon for the policy
:returns: Probability with which a random action is to be selected
:rtype: float
"""
return self._eps
@property
def Q(self) -> np.ndarray:
"""
Get the q values assigned by the policy to all actions
:returns: Numpy array of q values for all actions
:rtype: numpy.ndarray
"""
return self._Q
def select_action(self, t: int) -> int:
"""
Select an action according to epsilon greedy startegy
A random action is selected with espilon probability over
the optimal action according to the current Q values to
encourage exploration of the policy.
:param t: timestep to choose action for
:type t: int
:returns: Selected action
:rtype: int
"""
if | np.random.random() | numpy.random.random |
"""
<EMAIL>
"""
import numpy as np
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtOpenGL
from PyQt5.QtGui import QOpenGLShaderProgram, QOpenGLShader
import OpenGL.GL as GL
from spimagine.gui.gui_utils import fillTexture2d
class MyWidget(QtOpenGL.QGLWidget):
def __init__(self, parent = None):
super(MyWidget, self).__init__(parent)
self.quadCoord = np.array([[-1., -1., 0.],
[1., -1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[-1., 1., 0.],
[-1., -1., 0.]])
self.quadCoordTex = np.array([[0, 0],
[1., 0.],
[1., 1.],
[1., 1.],
[0, 1.],
[0, 0]])
def initializeGL(self):
GL.glClearColor(1.0, 0.0, 0.0, 1.0)
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc (GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
print("OpenGL.GL: " + str(GL.glGetString(GL.GL_VERSION)))
print("GL.GLSL: " + str(GL.glGetString(GL.GL_SHADING_LANGUAGE_VERSION)))
print("OpenGL ATTRIBUTES:\n",", ".join(d for d in dir(GL) if d.startswith("GL_")))
self.program = QOpenGLShaderProgram()
self.program.addShaderFromSourceCode(QOpenGLShader.Vertex, """#version 120
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 mytexcoord;
void main() {
gl_Position = vec4(position, 0., 1.0);
mytexcoord = texcoord;
}""")
self.program.addShaderFromSourceCode(QOpenGLShader.Fragment, """#version 120
uniform sampler2D texture;
varying vec2 mytexcoord;
void main() {
gl_FragColor = texture2D(texture,mytexcoord);
}""")
print(self.program.log())
self.program.link()
self.texture = fillTexture2d(np.outer(np.linspace(0, 1, 128), | np.ones(128) | numpy.ones |
# Import folder where sorting algorithms
import sys
import unittest
import numpy as np
# For importing from different folders
# OBS: This is supposed to be done with automated testing,
# hence relative to folder we want to import from
sys.path.append("ML/algorithms/linearregression")
# If run from local:
# sys.path.append('../../ML/algorithms/linearregression')
from linear_regression_gradient_descent import LinearRegression
class TestLinearRegression_GradientDescent(unittest.TestCase):
def setUp(self):
# test cases we want to run
self.linearReg = LinearRegression()
self.X1 = np.array([[0, 1, 2]])
self.y1 = np.array([[1, 2, 3]])
self.W1_correct = np.array([[1, 1]]).T
self.X2 = np.array([[0, 1]])
self.y2 = np.array([[1, 0]])
self.W2_correct = np.array([[1, -1]]).T
self.X3 = np.array([[1, 2, 3], [1, 2, 4]])
self.y3 = np.array([[5, 10, 18]])
self.W3_correct = np.array([[0, 2, 3]]).T
self.X4 = np.array([[0, 0]])
self.y4 = np.array([[0, 0]])
self.W4_correct = np.array([[0, 0]]).T
self.X5 = np.array([[0, 1, 2, 3, 4, 5]])
self.y5 = np.array([[0, 0.99, 2.01, 2.99, 4.01, 4.99]])
self.W5_correct = np.array([[0, 1]]).T
def test_perfectpositiveslope(self):
W = self.linearReg.main(self.X1, self.y1)
boolean_array = np.isclose(W, self.W1_correct, atol=0.1)
self.assertTrue(boolean_array.all())
def test_perfectnegativeslope(self):
W = self.linearReg.main(self.X2, self.y2)
boolean_array = np.isclose(W, self.W2_correct, atol=0.1)
self.assertTrue(boolean_array.all())
def test_multipledimension(self):
W = self.linearReg.main(self.X3, self.y3)
boolean_array = | np.isclose(W, self.W3_correct, atol=0.1) | numpy.isclose |
import astropy.units as u
from astropy.table import QTable
import numpy as np
def test_psf():
from pyirf.irf import psf_table
from pyirf.utils import cone_solid_angle
np.random.seed(0)
N = 1000
TRUE_SIGMA_1 = 0.2
TRUE_SIGMA_2 = 0.1
TRUE_SIGMA = np.append( | np.full(N, TRUE_SIGMA_1) | numpy.full |
import matplotlib.pyplot as plt
import numpy as np
from .base import BaseProcessor
from ..plotter import OneDimPlotter, TwoDimPlotter, cdf_pdf
class ImageProcessor(BaseProcessor):
""" Process the information related to image, get several statistical distribution.
Args:
data (dict): Data to be processed.
Examples:
>>> import numpy as np
>>> data = dict(
>>> shapes=np.array([np.array([100,300]), np.array([150, 1000])]),
>>> labels = np.array([np.array([0, 1]), np.array([1])]),
>>> )
>>> self = ImageProcessor(data)
>>> self.default_plot()
>>> # export
>>> self.export('./result', save_mode='folder')
>>> # what statistical data processed
>>> print(self.processor)
"""
def __init__(self, data):
super(ImageProcessor, self).__init__(data)
self.processor = ['hw', 'ratio', 'scale', 'ratio_log2', 'instances_per_image']
if self.data.get('shapes', None) is None:
print("Image size distribution, ratio distribution, scale distribution"
" and log2(ratio) is related to 'shapes'. "
"But got no 'shapes' in input data.")
self.processor = ['instances_per_image']
if self.data.get('labels', None) is None:
print("Instances per image is related to 'labels'. "
"But got no 'labels' in input data.")
self.processor.remove('instances_per_image')
@property
def hw(self):
"""Height and width distribution of image."""
if self.data.get('shapes', None) is None:
return None
w, h = self.data['shapes'][:, 0], self.data['shapes'][:, 1]
return TwoDimPlotter([w, h], 'image hw distribution', plt.scatter,
axis_label=['width', 'height'],
marker='.', alpha=0.1)
@property
def ratio(self):
"""Ratio (height/width) distribution of image."""
if self.data.get('shapes', None) is None:
return None
w, h = self.data['shapes'][:, 0], self.data['shapes'][:, 1]
hw_ratio = h / w
return OneDimPlotter(hw_ratio, r'image h/w ratio',
cdf_pdf,
axis_label=['ratio: h/w', 'normalized number'],
bins=20)
@property
def ratio_log2(self):
"""Ratio (log2(height/width)) distribution of image."""
if self.data.get('shapes', None) is None:
return None
w, h = self.data['shapes'][:, 0], self.data['shapes'][:, 1]
hw_ratio = h / w
log_ratio = np.log2(hw_ratio)
return OneDimPlotter(log_ratio, r'image h/w ratio (log2)',
cdf_pdf,
axis_label=['ratio: log2(h/2)', 'normalized number'],
bins=20)
@property
def scale(self):
"""Scale (sqrt(width*height)) distribution of image."""
if self.data.get('shapes', None) is None:
return None
w, h = self.data['shapes'][:, 0], self.data['shapes'][:, 1]
sqrt_hw = np.sqrt(h * w)
range_ = ( | np.min(sqrt_hw) | numpy.min |
from scipy import sparse
import numpy as np
import xarray as xr
from sklearn.utils.validation import check_is_fitted
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing._data import _handle_zeros_in_scale
class MinMaxScaler(TransformerMixin, BaseEstimator):
"""Transform data to a given range.
This estimator scales and translates the data distribution such
that it is in the given range on the training set, e.g. between
zero and one. If NaN values are present there will be replaced by a given
value, e.g. minus one.
The transformation is given by::
X_std = (X - X.min(axis)) / (X.max(axis) - X.min(axis))
X_scaled = X_std * (max - min) + min
where min, max = value_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
value_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
axis : int, tuple of int or None, default=None
Axis or axes along which the minimum and maximum will be computed (via
``np.nanmin`` and ``np.nanmax`` functions). If None then the new range
is computed from the whole dataset (all dimensions/axes).
fillnanto : float or int, deafult=-1
Value to be used when filling in NaN values.
Notes
-----
NaNs are disregarded in fit when transforming to the new value range, and
then replaced according to ``fillnanto`` in transform.
"""
def __init__(self, value_range=(0, 1), copy=True, axis=None, fillnanto=-1):
self.value_range = value_range
self.copy = copy
self.fillnanto = fillnanto
self.axis = axis
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, because they are all set together
# in partial_fit
if hasattr(self, "scale_"):
del self.scale_
del self.min_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Computation of min and max on X for later scaling.
"""
value_range = self.value_range
if value_range[0] >= value_range[1]:
raise ValueError(
"Minimum of desired value_range must be smaller than maximum. Got %s."
% str(range)
)
if sparse.issparse(X):
raise TypeError("MinMaxScaler does not support sparse input.")
### creating a nan mask
if np.any(np.isnan(X)):
self.nan_mask = | np.isnan(X) | numpy.isnan |
#!/usr/bin/env python3
import pytest
import os
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
from torch.utils.data import DataLoader
from neuralprophet import (
NeuralProphet,
df_utils,
time_dataset,
configure,
)
log = logging.getLogger("NP.test")
log.setLevel("WARNING")
log.parent.setLevel("WARNING")
DIR = pathlib.Path(__file__).parent.parent.absolute()
DATA_DIR = os.path.join(DIR, "tests", "test-data")
PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv")
AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")
YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv")
NROWS = 512
EPOCHS = 3
LR = 1.0
PLOT = False
def test_impute_missing():
"""Debugging data preprocessing"""
log.info("testing: Impute Missing")
allow_missing_dates = False
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
name = "test"
df[name] = df["y"].values
if not allow_missing_dates:
df_na, _ = df_utils.add_missing_dates_nan(df.copy(deep=True), freq="D")
else:
df_na = df.copy(deep=True)
to_fill = pd.isna(df_na["y"])
# TODO fix debugging printout error
log.debug("sum(to_fill): {}".format(sum(to_fill.values)))
# df_filled, remaining_na = df_utils.fill_small_linear_large_trend(
# df.copy(deep=True),
# column=name,
# allow_missing_dates=allow_missing_dates
# )
df_filled = df.copy(deep=True)
df_filled.loc[:, name], remaining_na = df_utils.fill_linear_then_rolling_avg(
df_filled[name], limit_linear=5, rolling=20
)
# TODO fix debugging printout error
log.debug("sum(pd.isna(df_filled[name])): {}".format(sum(pd.isna(df_filled[name]).values)))
if PLOT:
if not allow_missing_dates:
df, _ = df_utils.add_missing_dates_nan(df, freq="D")
df = df.loc[200:250]
fig1 = plt.plot(df["ds"], df[name], "b-")
fig1 = plt.plot(df["ds"], df[name], "b.")
df_filled = df_filled.loc[200:250]
# fig3 = plt.plot(df_filled['ds'], df_filled[name], 'kx')
fig4 = plt.plot(df_filled["ds"][to_fill], df_filled[name][to_fill], "kx")
plt.show()
def test_time_dataset():
# manually load any file that stores a time series, for example:
df_in = pd.read_csv(AIR_FILE, index_col=False, nrows=NROWS)
log.debug("Infile shape: {}".format(df_in.shape))
n_lags = 3
n_forecasts = 1
valid_p = 0.2
df_train, df_val = df_utils.split_df(df_in, n_lags, n_forecasts, valid_p)
# create a tabularized dataset from time series
df = df_utils.check_dataframe(df_train)
df_dict, _ = df_utils.prep_copy_df_dict(df)
local_data_params, global_data_params = df_utils.init_data_params(df_dict=df_dict, normalize="minmax")
df = df_utils.normalize(df, global_data_params)
inputs, targets = time_dataset.tabularize_univariate_datetime(
df,
n_lags=n_lags,
n_forecasts=n_forecasts,
)
log.debug(
"tabularized inputs: {}".format(
"; ".join(["{}: {}".format(inp, values.shape) for inp, values in inputs.items()])
)
)
def test_normalize():
length = 100
days = pd.date_range(start="2017-01-01", periods=length)
y = np.ones(length)
y[1] = 0
y[2] = 2
y[3] = 3.3
df = pd.DataFrame({"ds": days, "y": y})
m = NeuralProphet(
normalize="soft",
learning_rate=LR,
)
# with config
m.config_normalization.init_data_params(
df_utils.prep_copy_df_dict(df)[0], m.config_covar, m.regressors_config, m.events_config
)
df_norm = m._normalize(df_utils.prep_copy_df_dict(df)[0])
m.config_normalization.unknown_data_normalization = True
df_norm = m._normalize(df_utils.prep_copy_df_dict(df)[0])
m.config_normalization.unknown_data_normalization = False
# using config for utils
df_norm = df_utils.normalize(df.copy(deep=True), m.config_normalization.global_data_params)
df_norm = df_utils.normalize(
df_utils.prep_copy_df_dict(df)[0]["__df__"], m.config_normalization.local_data_params["__df__"]
)
# with utils
local_data_params, global_data_params = df_utils.init_data_params(
df_dict=df_utils.prep_copy_df_dict(df)[0],
normalize=m.config_normalization.normalize,
covariates_config=m.config_covar,
regressor_config=m.regressors_config,
events_config=m.events_config,
global_normalization=m.config_normalization.global_normalization,
global_time_normalization=m.config_normalization.global_time_normalization,
)
df_norm = df_utils.normalize(df.copy(deep=True), global_data_params)
df_norm = df_utils.normalize(df_utils.prep_copy_df_dict(df)[0]["__df__"], local_data_params["__df__"])
def test_add_lagged_regressors():
NROWS = 512
EPOCHS = 3
BATCH_SIZE = 32
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(15, min_periods=1).mean()
df["C"] = df["y"].rolling(30, min_periods=1).mean()
col_dict = {
"1": "A",
"2": ["B"],
"3": ["A", "B", "C"],
}
for key, value in col_dict.items():
log.debug(value)
if isinstance(value, list):
feats = np.array(["ds", "y"] + value)
else:
feats = np.array(["ds", "y", value])
df1 = pd.DataFrame(df, columns=feats)
cols = [col for col in df1.columns if col not in ["ds", "y"]]
m = NeuralProphet(
n_forecasts=1,
n_lags=3,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m = m.add_lagged_regressor(names=cols)
metrics_df = m.fit(df1, freq="D", validation_df=df1[-100:])
future = m.make_future_dataframe(df1, n_historic_predictions=365)
## Check if the future dataframe contains all the lagged regressors
check = any(item in future.columns for item in cols)
forecast = m.predict(future)
log.debug(check)
def test_auto_batch_epoch():
# for epochs = int(2 ** (2.3 * np.log10(100 + n_data)) / (n_data / 1000.0))
check_medium = {
"1": (1, 1000),
"10": (10, 1000),
"100": (16, 391),
"1000": (32, 127),
"10000": (64, 59),
"100000": (128, 28),
"1000000": (256, 14),
"10000000": (512, 10),
}
# for epochs = int(2 ** (2.5 * np.log10(100 + n_data)) / (n_data / 1000.0))
check = {
"1": (1, 1000),
"10": (10, 1000),
"100": (16, 539),
"1000": (32, 194),
"10000": (64, 103),
"100000": (128, 57),
"1000000": (256, 32),
"10000000": (512, 18),
}
observe = {}
for n_data, (batch_size, epochs) in check.items():
n_data = int(n_data)
c = configure.Train(
learning_rate=None,
epochs=None,
batch_size=None,
loss_func="mse",
optimizer="SGD",
)
c.set_auto_batch_epoch(n_data=n_data)
observe["{}".format(n_data)] = (c.batch_size, c.epochs)
log.debug("[config] n_data: {}, batch: {}, epoch: {}".format(n_data, c.batch_size, c.epochs))
log.debug("[should] n_data: {}, batch: {}, epoch: {}".format(n_data, batch_size, epochs))
assert c.batch_size == batch_size
assert c.epochs == epochs
# print("\n")
# print(check)
# print(observe)
def test_split_impute():
def check_split(df_in, df_len_expected, n_lags, n_forecasts, freq, p=0.1):
m = NeuralProphet(
n_lags=n_lags,
n_forecasts=n_forecasts,
learning_rate=LR,
)
df_in = df_utils.check_dataframe(df_in, check_y=False)
df_in = m._handle_missing_data(df_in, freq=freq, predicting=False)
assert df_len_expected == len(df_in)
total_samples = len(df_in) - n_lags - 2 * n_forecasts + 2
df_train, df_test = m.split_df(df_in, freq=freq, valid_p=0.1)
n_train = len(df_train) - n_lags - n_forecasts + 1
n_test = len(df_test) - n_lags - n_forecasts + 1
assert total_samples == n_train + n_test
n_test_expected = max(1, int(total_samples * p))
n_train_expected = total_samples - n_test_expected
assert n_train == n_train_expected
assert n_test == n_test_expected
log.info("testing: SPLIT: daily data")
df = pd.read_csv(PEYTON_FILE)
check_split(df_in=df, df_len_expected=len(df) + 59, freq="D", n_lags=10, n_forecasts=3)
log.info("testing: SPLIT: monthly data")
df = pd.read_csv(AIR_FILE, nrows=NROWS)
check_split(df_in=df, df_len_expected=len(df), freq="MS", n_lags=10, n_forecasts=3)
log.info("testing: SPLIT: 5min data")
df = pd.read_csv(YOS_FILE, nrows=NROWS)
check_split(df_in=df, df_len_expected=len(df), freq="5min", n_lags=10, n_forecasts=3)
# redo with no lags
log.info("testing: SPLIT: daily data")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
check_split(df_in=df, df_len_expected=len(df), freq="D", n_lags=0, n_forecasts=1)
log.info("testing: SPLIT: monthly data")
df = pd.read_csv(AIR_FILE, nrows=NROWS)
check_split(df_in=df, df_len_expected=len(df), freq="MS", n_lags=0, n_forecasts=1)
log.info("testing: SPLIT: 5min data")
df = pd.read_csv(YOS_FILE)
check_split(df_in=df, df_len_expected=len(df) - 12, freq="5min", n_lags=0, n_forecasts=1)
def test_cv():
def check_folds(df, n_lags, n_forecasts, valid_fold_num, valid_fold_pct, fold_overlap_pct):
folds = df_utils.crossvalidation_split_df(
df, n_lags, n_forecasts, valid_fold_num, valid_fold_pct, fold_overlap_pct
)
train_folds_len = []
val_folds_len = []
for (f_train, f_val) in folds:
train_folds_len.append(len(f_train))
val_folds_len.append(len(f_val))
train_folds_samples = [x - n_lags - n_forecasts + 1 for x in train_folds_len]
val_folds_samples = [x - n_lags - n_forecasts + 1 for x in val_folds_len]
total_samples = len(df) - n_lags - (2 * n_forecasts) + 2
val_fold_each = max(1, int(total_samples * valid_fold_pct))
overlap_each = int(fold_overlap_pct * val_fold_each)
assert all([x == val_fold_each for x in val_folds_samples])
train_folds_should = [
total_samples - val_fold_each - (valid_fold_num - i - 1) * (val_fold_each - overlap_each)
for i in range(valid_fold_num)
]
assert all([x == y for (x, y) in zip(train_folds_samples, train_folds_should)])
log.debug("total_samples: {}".format(total_samples))
log.debug("val_fold_each: {}".format(val_fold_each))
log.debug("overlap_each: {}".format(overlap_each))
log.debug("val_folds_len: {}".format(val_folds_len))
log.debug("val_folds_samples: {}".format(val_folds_samples))
log.debug("train_folds_len: {}".format(train_folds_len))
log.debug("train_folds_samples: {}".format(train_folds_samples))
log.debug("train_folds_should: {}".format(train_folds_should))
len_df = 100
check_folds(
df=pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=len_df), "y": np.arange(len_df)}),
n_lags=0,
n_forecasts=1,
valid_fold_num=3,
valid_fold_pct=0.1,
fold_overlap_pct=0.0,
)
len_df = 1000
check_folds(
df=pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=len_df), "y": np.arange(len_df)}),
n_lags=50,
n_forecasts=10,
valid_fold_num=10,
valid_fold_pct=0.1,
fold_overlap_pct=0.5,
)
def test_reg_delay():
df = pd.read_csv(PEYTON_FILE, nrows=102)[:100]
m = NeuralProphet(
epochs=10,
learning_rate=LR,
)
m.fit(df, freq="D")
c = m.config_train
for w, e, i in [
(0, 0, 1),
(0, 3, 0),
(0, 5, 0),
# (0.002739052315863355, 5, 0.1),
(0.5, 6, 0.5),
# (0.9972609476841366, 7, 0.9),
(1, 7, 1),
(1, 8, 0),
]:
weight = c.get_reg_delay_weight(e, i, reg_start_pct=0.5, reg_full_pct=0.8)
log.debug("e {}, i {}, expected w {}, got w {}".format(e, i, w, weight))
assert weight == w
def test_double_crossvalidation():
len_df = 100
folds_val, folds_test = df_utils.double_crossvalidation_split_df(
df=pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=len_df), "y": np.arange(len_df)}),
n_lags=0,
n_forecasts=1,
k=3,
valid_pct=0.3,
test_pct=0.15,
)
train_folds_len1 = []
val_folds_len1 = []
for (f_train, f_val) in folds_val:
train_folds_len1.append(len(f_train))
val_folds_len1.append(len(f_val))
train_folds_len2 = []
val_folds_len2 = []
for (f_train, f_val) in folds_test:
train_folds_len2.append(len(f_train))
val_folds_len2.append(len(f_val))
assert train_folds_len1[-1] == 75
assert train_folds_len2[0] == 85
assert val_folds_len1[0] == 10
assert val_folds_len2[0] == 5
log.debug("train_folds_len1: {}".format(train_folds_len1))
log.debug("val_folds_len1: {}".format(val_folds_len1))
log.debug("train_folds_len2: {}".format(train_folds_len2))
log.debug("val_folds_len2: {}".format(val_folds_len2))
def test_check_duplicate_ds():
# Check whether a ValueError is thrown in case there
# are duplicate dates in the ds column of dataframe
df = pd.read_csv(PEYTON_FILE, nrows=102)[:50]
# introduce duplicates in dataframe
df = pd.concat([df, df[8:9]]).reset_index()
# Check if error thrown on duplicates
m = NeuralProphet(
n_lags=24,
ar_reg=0.5,
learning_rate=LR,
)
with pytest.raises(ValueError):
m.fit(df, freq="D")
def test_infer_frequency():
df = pd.read_csv(PEYTON_FILE, nrows=102)[:50]
m = NeuralProphet(
epochs=EPOCHS,
learning_rate=LR,
)
# Check if freq is set automatically
df_train, df_test = m.split_df(df)
log.debug("freq automatically set")
# Check if freq is set automatically
df_train, df_test = m.split_df(df, freq=None)
log.debug("freq automatically set even if set to None")
# Check if freq is set when equal to the original
df_train, df_test = m.split_df(df, freq="D")
log.debug("freq is equal to ideal")
# Check if freq is set in different freq
df_train, df_test = m.split_df(df, freq="5D")
log.debug("freq is set even though is different than the ideal")
# Assert for data unevenly spaced
index = np.unique(np.geomspace(1, 40, 20, dtype=int))
df_uneven = df.iloc[index, :]
with pytest.raises(ValueError):
m.split_df(df_uneven)
# Check if freq is set even in a df with multiple freqs
df_train, df_test = m.split_df(df_uneven, freq="H")
log.debug("freq is set even with not definable freq")
# Check if freq is set for list
df_dict = {"df1": df, "df2": df}
m = NeuralProphet(
learning_rate=LR,
)
m.fit(df_dict)
log.debug("freq is set for list of dataframes")
# Check if freq is set for list with different freq for n_lags=0
df1 = df.copy(deep=True)
time_range = pd.date_range(start="1994-12-01", periods=df.shape[0], freq="M")
df1["ds"] = time_range
df_dict = {"df1": df, "df2": df1}
m = NeuralProphet(
n_lags=0,
epochs=5,
learning_rate=LR,
)
m.fit(df_dict)
log.debug("freq is set for list of dataframes(n_lags=0)")
# Assert for automatic frequency in list with different freq
m = NeuralProphet(
n_lags=2,
learning_rate=LR,
)
with pytest.raises(ValueError):
m.fit(df_dict)
# Exceptions
frequencies = ["M", "MS", "Y", "YS", "Q", "QS", "B", "BH"]
df = df.iloc[:200, :]
for freq in frequencies:
df1 = df.copy(deep=True)
time_range = pd.date_range(start="1994-12-01", periods=df.shape[0], freq=freq)
df1["ds"] = time_range
df_train, df_test = m.split_df(df1)
log.debug("freq is set for all the exceptions")
def test_globaltimedataset():
df = pd.read_csv(PEYTON_FILE, nrows=100)
df1 = df[:50]
df2 = df[50:]
m1 = NeuralProphet(
yearly_seasonality=True,
weekly_seasonality=True,
daily_seasonality=True,
learning_rate=LR,
)
m2 = NeuralProphet(
n_lags=3,
n_forecasts=2,
learning_rate=LR,
)
m3 = NeuralProphet(learning_rate=LR)
# TODO m3.add_country_holidays("US")
config_normalization = configure.Normalization("auto", False, True, False)
for m in [m1, m2, m3]:
df_dict = {"df1": df1.copy(), "df2": df2.copy()}
config_normalization.init_data_params(df_dict, m.config_covar, m.regressors_config, m.events_config)
m.config_normalization = config_normalization
df_dict = m._normalize(df_dict)
dataset = m._create_dataset(df_dict, predict_mode=False)
dataset = m._create_dataset(df_dict, predict_mode=True)
# lagged_regressors, future_regressors
df4 = df.copy()
df4["A"] = np.arange(len(df4))
df4["B"] = np.arange(len(df4)) * 0.1
m4 = NeuralProphet(
n_lags=2,
learning_rate=LR,
)
m4.add_future_regressor("A")
m4.add_lagged_regressor("B")
config_normalization = configure.Normalization("auto", False, True, False)
for m in [m4]:
df_dict = {"df4": df4.copy()}
config_normalization.init_data_params(df_dict, m.config_covar, m.regressors_config, m.events_config)
m.config_normalization = config_normalization
df_dict = m._normalize(df_dict)
dataset = m._create_dataset(df_dict, predict_mode=False)
dataset = m._create_dataset(df_dict, predict_mode=True)
def test_loader():
df = pd.read_csv(PEYTON_FILE, nrows=100)
df["A"] = np.arange(len(df))
df["B"] = np.arange(len(df)) * 0.1
df1 = df[:50]
df2 = df[50:]
m = NeuralProphet(
yearly_seasonality=True,
weekly_seasonality=True,
daily_seasonality=True,
n_lags=3,
n_forecasts=2,
learning_rate=LR,
)
m.add_future_regressor("A")
m.add_lagged_regressor("B")
config_normalization = configure.Normalization("auto", False, True, False)
df_dict = {"df1": df1.copy(), "df2": df2.copy()}
config_normalization.init_data_params(df_dict, m.config_covar, m.regressors_config, m.events_config)
m.config_normalization = config_normalization
df_dict = m._normalize(df_dict)
dataset = m._create_dataset(df_dict, predict_mode=False)
loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=True, drop_last=False)
for inputs, targets, meta in loader:
assert set(meta["df_name"]) == set(df_dict.keys())
break
def test_newer_sample_weight():
dates = pd.date_range(start="2020-01-01", periods=100, freq="D")
a = [0, 1] * 50
y = -1 * | np.array(a[:50]) | numpy.array |
import numpy
import pytest
from matchms import Fragments
def test_fragments_init():
mz = numpy.array([10, 20, 30], dtype="float")
intensities = numpy.array([100, 20, 300], dtype="float")
peaks = Fragments(mz=mz, intensities=intensities)
assert peaks is not None
assert numpy.allclose(mz, peaks.mz)
assert numpy.allclose(intensities, peaks.intensities)
def test_fragments_mz_wrong_numpy_dtype():
mz = numpy.array([10, 20, 30], dtype="int")
intensities = numpy.array([100, 20, 300], dtype="float")
with pytest.raises(AssertionError) as msg:
_ = Fragments(mz=mz, intensities=intensities)
assert str(msg.value) == "Input argument 'mz' should be an array of type float."
def test_fragments_intensities_wrong_numpy_dtype():
mz = numpy.array([10, 20, 30], dtype="float")
intensities = numpy.array([100, 20, 300], dtype="int")
with pytest.raises(AssertionError) as msg:
_ = Fragments(mz=mz, intensities=intensities)
assert str(msg.value) == "Input argument 'intensities' should be an array of type float."
def test_fragments_same_shape():
mz = numpy.array([10, 20, 30, 40], dtype="float")
intensities = | numpy.array([100, 20, 300], dtype="float") | numpy.array |
import random
import numpy as np
from skimage.draw import disk
from typing import Tuple
def sample_position(img: np.ndarray) -> Tuple[int, int]:
"""Sample a random position in the brain
:param: img: The image to sample the position on
:return: A tuple of the x,y position
"""
obj_inds = np.where(img > 0)
location_idx = random.randint(0, len(obj_inds[0]) - 1)
position = (obj_inds[0][location_idx], obj_inds[1][location_idx])
return position
def disk_anomaly(img: np.ndarray, position: Tuple[int, int], radius: int,
intensity: float) -> np.ndarray:
"""Draw a disk on a grayscale image.
Args:
img (np.ndarray): Grayscale image
position (Tuple[int, int]): Position of disk
radius (int): Radius of disk
intensity (float): Intensity of pixels inside the disk
Returns:
disk_img (np.ndarray): img with ball drawn on it
label (np.ndarray): target segmentation mask
"""
assert img.ndim == 2, f"Invalid shape {img.shape}. Use a grayscale image"
# Create disk
rr, cc = disk(position, radius)
rr = rr.clip(0, img.shape[0] - 1)
cc = cc.clip(0, img.shape[1] - 1)
# Draw disk on image
disk_img = img.copy()
disk_img[rr, cc] = intensity
# Create label
label = np.zeros(img.shape, dtype=np.uint8)
label[rr, cc] = 1
# Remove anomaly at background pixels
mask = img > 0
disk_img *= mask
label *= mask
return disk_img, label
def source_deformation_anomaly(img: np.ndarray, position: Tuple[int, int],
radius: int):
"""Pixels are shifted away from the center of the sphere.
Args:
img (np.ndarray): Image to be augmented, shape [h, w]
position (Tuple[int int]): Center pixel of the mask
radius (int): Radius
Returns:
img_deformed (np.ndarray): img with source deformation
label (np.ndarray): target segmentation mask
"""
# Create label mask
rr, cc = disk(position, radius)
rr = rr.clip(0, img.shape[0] - 1)
cc = cc.clip(0, img.shape[1] - 1)
label = np.zeros(img.shape, dtype=np.uint8)
label[rr, cc] = 1
# Remove anomaly at background pixels
mask = img > 0
label *= mask
# Center voxel of deformation
C = np.array(position)
# Create copy of image for reference
img_deformed = img.copy()
copy = img.copy()
# Iterate over indices of all voxels in mask
inds = np.where(label > 0)
for x, y in zip(*inds[-2:]):
# Voxel at current location
I = np.array([x, y])
# Source pixel shift
s = np.square(np.linalg.norm(I - C, ord=2) / radius)
V = np.round(C + s * (I - C)).astype(np.int)
x_, y_ = V
# Assure that z_, y_ and x_ are valid indices
x_ = max(min(x_, img.shape[-1] - 1), 0)
y_ = max(min(y_, img.shape[-2] - 1), 0)
if img_deformed[..., x, y] > 0:
img_deformed[..., x, y] = copy[..., x_, y_]
return img_deformed, label
def sink_deformation_anomaly(img: np.ndarray, position: Tuple[int, int],
radius: int):
"""Pixels are shifted toward from the center of the sphere.
Args:
img (np.ndarray): Image to be augmented, shape [h, w]
position (Tuple[int int]): Center pixel of the mask
radius (int): Radius
Returns:
img_deformed (np.ndarray): img with sink deformation
label (np.ndarray): target segmentation mask
"""
# Create label mask
rr, cc = disk(position, radius)
rr = rr.clip(0, img.shape[0] - 1)
cc = cc.clip(0, img.shape[1] - 1)
label = np.zeros(img.shape, dtype=np.uint8)
label[rr, cc] = 1
# Remove anomaly at background pixels
mask = img > 0
label *= mask
# Center voxel of deformation
C = np.array(position)
# Create copy of image for reference
img_deformed = img.copy()
copy = img.copy()
# Iterate over indices of all voxels in mask
inds = np.where(label > 0)
for x, y in zip(*inds[-2:]):
# Voxel at current location
I = np.array([x, y])
# Sink pixel shift
s = np.square(np.linalg.norm(I - C, ord=2) / radius)
V = np.round(I + (1 - s) * (I - C)).astype(np.int)
x_, y_ = V
# Assure that z_, y_ and x_ are valid indices
x_ = max(min(x_, img.shape[-2] - 1), 0)
y_ = max(min(y_, img.shape[-1] - 1), 0)
if img_deformed[..., x, y] > 0:
img_deformed[..., x, y] = copy[..., x_, y_]
return img_deformed, label
def pixel_shuffle_anomaly(img: np.ndarray, position: Tuple[int, int],
radius: int):
"""Pixels in the label mask are randomly shuffled
Args:
img (np.ndarray): Image to be augmented, shape [h, w]
position (Tuple[int int]): Center pixel of the mask
radius (int): Radius
Returns:
img_deformed (np.ndarray): img with sink deformation
label (np.ndarray): target segmentation mask
"""
# Create label mask
rr, cc = disk(position, radius)
rr = rr.clip(0, img.shape[0] - 1)
cc = cc.clip(0, img.shape[1] - 1)
label = | np.zeros(img.shape, dtype=np.uint8) | numpy.zeros |
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import keras
from keras import Sequential, layers, activations
from keras.models import Model, load_model
from images.image import *
from skimage.metrics import peak_signal_noise_ratio as psnr, structural_similarity as ssim
from typing import List, Dict, Tuple
from copy import deepcopy
def show(img):
plt.figure(figsize=(12,8))
plt.imshow(img, cmap='gray', interpolation='nearest')
def reconstroi_projecao_a_partir_dos_patches(patches: np.ndarray, dimensao_patch=(40,40), stride=40, dimensao_imagem=(650, 1650)) -> np.ndarray:
imagem_predita = np.zeros(shape=dimensao_imagem)
i = 0
for row in range(0, (dimensao_imagem[0] - dimensao_patch[0] +1), stride):
for col in range(0, (dimensao_imagem[1] - dimensao_patch[1] + 1), stride):
imagem_predita[row: row + dimensao_patch[0], col: col + dimensao_patch[1]] = patches[i].reshape(dimensao_patch[0],dimensao_patch[1])
i += 1
return imagem_predita
def verifica_se_todos_os_pixels_sao_iguais(imagem: np.ndarray) -> bool:
"""
Verifica se um array numpy de formato (m,n) possui todos os pixels iguais.
"""
assert len(imagem.shape) == 2, 'A imagem deve ter apenas 2 dimensões.'
(w, h) = imagem.shape
return imagem.max() == imagem.min()
def pega_os_patches(projecoes:np.ndarray, dimensao:Tuple[int,int], stride:int, remover_patches_pretos = False) -> np.ndarray:
"""
Retorna os patches das projeções `projecoes` de tamanho dimensao[0] x dimensao[1] com stride de `stride`.
O parâmetro `remover_patches_pretos` delimita se deve incluir os patches que contém apenas preto.
"""
(largura, altura) = dimensao
projecoes = remove_a_dimensao_das_cores(projecoes)
patches = []
for p in range(projecoes.shape[0]): # percorre as projeções
patches_numpy = crop_image(projecoes[p], height=altura, width=largura, stride=stride)
for i in range(patches_numpy.shape[0]): # percorre os patches extraídos da projeção
if remover_patches_pretos:
if verifica_se_todos_os_pixels_sao_iguais(patches_numpy[i]) == False:
patches.append(patches_numpy[i])
else:
patches.append(patches_numpy[i])
# converte os patches para um array numpy.
patches = np.array(patches)
return patches
def remove_a_dimensao_das_cores(array_numpy: np.ndarray) -> np.ndarray:
"""
Recebe um array numpy com shape (QUANTIDADE, LARGURA, ALTURA, COR) e
retorna um novo array numpy com shape (QUANTIDADE, LARGURA, ALTURA).
"""
assert len(array_numpy.shape) == 4, 'O array deve ter shape = (QUANTIDADE, LARGURA, ALTURA, COR).'
(qtde, w, h, _) = array_numpy.shape
array_numpy = array_numpy.reshape(qtde, w, h)
return array_numpy
def adiciona_a_dimensao_das_cores(array:np.ndarray) -> np.ndarray:
"""
Adiciona a dimensão das cores no array numpy, considerando a imagem sendo escala de cinza.
"""
return array.reshape( array.shape + (1,) )
def mostrar_lado_a_lado(imagens: List[np.ndarray], titulos: List[str], figsize: Dict[int, int] = (12,8)):
"""
Imprime as imagens que estiverem na lista com os títulos apresentados.
"""
assert len(imagens) == len(titulos), 'imagens e titulos devem ter o mesmo tamanho.'
assert len(imagens[0].shape) == 2, 'As imagens deve ter apenas 2 dimensões.'
quantidade = len(imagens)
fig, ax = plt.subplots(1, quantidade, figsize=figsize)
for i in range(quantidade):
ax[i].axis('off')
ax[i].set_title(titulos[i])
ax[i].imshow(imagens[i], cmap='gray', interpolation='nearest')
def compara_imagens_em_relacao_ao_psnr_e_ssim(imagem_ground_truth: np.ndarray, imagem: np.ndarray, data_range: int):
"""
Imprime o PSNR e o SSIM entre a imagem_ground_truth e a imagem.
"""
print('PSNR: %.2f dB, e SSIM: %.2f' % (psnr(imagem, imagem_ground_truth, data_range=data_range), ssim(imagem, imagem_ground_truth, data_range=data_range)))
def compara_datasets_em_relacao_ao_psnr_e_ssim_medio(imagens_ground_truth: np.ndarray, imagens_filtradas: np.ndarray, data_range: int = 256):
"""
Imprime o PSNR e o SSIM médio entre os datasets ground truth e as imagens filtradas.
"""
assert imagens_ground_truth.shape == imagens_filtradas.shape, 'Os datasets devem ter as mesmas dimensões.'
assert len(imagens_ground_truth.shape) == 3, 'Os datasets não devem ter a dimensão de cor.'
psnr_acumulado = []
ssim_acumulado = []
for i in range(imagens_ground_truth.shape[0]):
psnr_acumulado.append(psnr(imagens_ground_truth[i], imagens_filtradas[i], data_range=data_range))
ssim_acumulado.append(ssim(imagens_ground_truth[i], imagens_filtradas[i], data_range=data_range))
psnr_acumulado = np.array(psnr_acumulado)
ssim_acumulado = np.array(ssim_acumulado)
print('PSNR médio: %.2f dB e SSIM médio: %.2f' % (psnr_acumulado.mean(), ssim_acumulado.mean()))
def dividir_dataset_em_treinamento_e_teste(dataset: np.ndarray, divisao=(80,20)):
"""
Divisão representa a porcentagem entre conj. de treinamento e conj. de teste.
Ex: (80,20) representa 80% para treino e 20% para teste.
"""
assert len(divisao) == 2, 'Divisão deve ser: % de conj. de treinamento e % de conj. de teste.'
n_treino, n_teste = divisao
assert n_treino + n_teste == 100, 'A soma da divisão deve ser igual a 100.'
total = dataset.shape[0]
porcentagem_treino = n_treino/100 #0.8
porcentagem_teste = n_teste/100 #0.2
return dataset[:int(porcentagem_treino*total)], dataset[int(porcentagem_treino*total):]
def carrega_dataset(divisao: Tuple[int, int], embaralhar=True):
DIRETORIO_DATASETS = 'dataset/patch-50x50-cada-projecao-200'
x = np.load(os.path.join(DIRETORIO_DATASETS, 'noisy.npy'))
y = np.load(os.path.join(DIRETORIO_DATASETS, 'original.npy'))
if embaralhar:
| np.random.seed(42) | numpy.random.seed |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import pairwise_distances
from sklearn.cluster import KMeans
def cluster_score(data, cluster_assigns):
""" Evaluate the score of the cluster assignments for n clusters using the
pairwise distance of each point in the data to it's assigned cluster
center
"""
total_distance = 0
for c in np.unique(cluster_assigns):
distance = pairwise_distances(data[cluster_assigns == c])
distance = distance.reshape(-1,1)[distance.reshape(-1,1) != 0]
# Check if there is only one data point in the cluster
if distance.size > 0:
total_distance += np.mean(distance.reshape(-1,1)[
distance.reshape(-1,1) != 0])
return total_distance
def find_n_clusters(data, n, clustering=KMeans()):
""" Compute the optimal cluster assignments of the input data for n
clusters. Compute the same assignments for randomly generated data to
utilize the Gap Statistic for evaluating n clusters
"""
# Generate Reference Data
reference = np.random.rand(*data.shape)
# Cluster and score reference
trial_scores = []
for _ in range(0, 2):
clustering.n_clusters = n
c_assign = clustering.fit_predict(reference)
trial_scores.append(cluster_score(reference, c_assign))
ref_score = np.mean(trial_scores)
# Cluster and score data
clustering.n_clusters = n
d_assign = clustering.fit_predict(data)
data_score = cluster_score(data, d_assign)
gap = np.log(ref_score) - | np.log(data_score) | numpy.log |
from plyfile import PlyData, PlyElement, make2d
import numpy as np
def ReadPLY(filename):
plydata = PlyData.read(filename)
nVerts = plydata['vertex'].count
verts = np.zeros((nVerts,3))
verts[:,0] = | np.array(plydata['vertex'].data['x']) | numpy.array |
import numpy as np
import scipy_psdm as psdm
def test_unit_diagonal():
mat = [
[1.000, -0.948, 0.099, -0.129],
[-0.948, 1.000, -0.591, 0.239],
[0.099, -0.591, 1.000, 0.058],
[-0.129, 0.239, 0.058, 1.000],
]
mat = np.array(mat)
rho = psdm.approximate_correlation_matrix(mat)
np.testing.assert_allclose( | np.diag(rho) | numpy.diag |
# -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.MeshMat import MeshMat
from pyleecan.Classes.CellMat import CellMat
from pyleecan.Classes.MeshSolution import MeshSolution
from pyleecan.Classes.PointMat import PointMat
import numpy as np
DELTA = 1e-10
@pytest.mark.METHODS
@pytest.mark.MeshSol
# @pytest.mark.DEV
def test_MeshMat_1group():
"""unittest for 1 group"""
mesh = MeshMat()
mesh.cell["triangle"] = CellMat(nb_pt_per_cell=3)
mesh.point = PointMat()
mesh.point.add_point(np.array([0, 0]))
mesh.point.add_point(np.array([1, 0]))
mesh.point.add_point(np.array([1, 2]))
mesh.point.add_point(np.array([2, 3]))
mesh.point.add_point(np.array([3, 3]))
mesh.add_cell( | np.array([0, 1, 2]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 23:56:16 2019
@author: kirichoi
"""
import os, sys
import tellurium as te
import roadrunner
import numpy as np
import antimony
import scipy.optimize
import networkGenerator as ng
import time
import copy
def f1(k_list, *args):
global counts
global countf
args[0].reset()
args[0].setValues(args[0].getGlobalParameterIds(), k_list)
try:
args[0].steadyStateApproximate()
objCCC = args[0].getScaledConcentrationControlCoefficientMatrix()
objCCC[np.abs(objCCC) < 1e-12] = 0 # Set small values to zero
if np.isnan(objCCC).any():
dist_obj = 10000
else:
if args[3]:
objFlux = args[0].getReactionRates()
objFlux[np.abs(objFlux) < 1e-12] = 0 # Set small values to zero
# objFCC = args[0].getScaledFluxControlCoefficientMatrix()
# objFCC[np.abs(objFCC) < 1e-12] = 0 # Set small values to zero
objCCC_row = objCCC.rownames
objCCC_col = objCCC.colnames
objCCC = objCCC[np.argsort(objCCC_row)]
objCCC = objCCC[:,np.argsort(objCCC_col)]
if args[3]:
objFlux = objFlux[np.argsort(objCCC_col)]
dist_obj = (((np.linalg.norm(args[1] - objCCC)) + (np.linalg.norm(args[2] - objFlux))) *
((1 + np.sum(np.equal(np.sign(np.array(args[1])), np.sign(np.array(objCCC))))) +
(1 + np.sum(np.equal(np.sign(np.array(args[2])), np.sign(np.array(objFlux)))))))
else:
dist_obj = ((np.linalg.norm(args[1] - objCCC))*(1 +
np.sum(np.not_equal(np.sign(np.array(args[1])),
np.sign(np.array(objCCC))))))
except:
countf += 1
dist_obj = 10000
counts += 1
return dist_obj
def callbackF(X, convergence=0.):
global counts
global countf
print(str(counts) + ", " + str(countf))
return False
def initialize(Parameters):
global countf
global counts
numBadModels = 0
numGoodModels = 0
numIter = 0
ens_dist = np.empty(Parameters.ens_size)
ens_model = np.empty(Parameters.ens_size, dtype='object')
ens_rl = np.empty(Parameters.ens_size, dtype='object')
rl_track = []
rl_track.append(Parameters.knownReactionList)
# Initial Random generation
while (numGoodModels < Parameters.ens_size):
# Ensure no redundant model
rl = ng.generateReactionList(Parameters)
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
while rl in rl_track:
rl = ng.generateReactionList(Parameters)
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
antStr = ng.generateAntimony(Parameters.realFloatingIds, Parameters.realBoundaryIds, stt[1],
stt[2], rl, boundary_init=Parameters.realBoundaryVal)
try:
r = te.loada(antStr)
counts = 0
countf = 0
r.steadyStateApproximate()
p_bound = ng.generateParameterBoundary(r.getGlobalParameterIds())
res = scipy.optimize.differential_evolution(f1,
args=(r, Parameters.realConcCC, Parameters.realFlux, Parameters.FLUX),
bounds=p_bound,
maxiter=Parameters.optiMaxIter,
tol=Parameters.optiTol,
polish=Parameters.optiPolish,
seed=Parameters.r_seed)
if not res.success:
numBadModels += 1
else:
# TODO: Might be able to cut the bottom part by simply using
# the obj func value from optimizer
r = te.loada(antStr)
r.setValues(r.getGlobalParameterIds(), res.x)
r.steadyStateApproximate()
SS_i = r.getFloatingSpeciesConcentrations()
r.steadyStateApproximate()
if | np.any(SS_i < 1e-5) | numpy.any |
from scipy import stats
import stumpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import calmap # not working with latest pandas
import calplot
import joypy
import sys
import time
import datetime as dt
class ALDI():
def __init__(self, df_meters, df_metadata, m=24, col_id='building_id', site_id='', meter_id='', verbose=False):
"""
Args:
df_meters: sorted NxM dataframe with M buildings and N rows with hourly
timestamp as indices
df_metadata: dataframe with metadata regarding the buildings
m: hourly window size, one day = 24
col_id: string name of the column with building ids in df_meters and df_metadata
site_id: id of the current portfolio being analyzed
meter_id: id of the current sensor reading being analyzed
verbose: boolean value to enable debugging printing
"""
self.df_meters = df_meters.copy()
self.df_metadata = df_metadata.copy()
self.base_timestamps = df_meters.copy().index
self.m = m
self.col_id = col_id
self.site_id = site_id
self.meter_id = meter_id
self.verbose = verbose
# auxiliary variables needed
self.name_list = df_meters.columns
##### execute ALDI
self.mp_adj, self.mp_ind = self.get_mp() # get matrix profile and indices
# merge information to one single dataframe
self.df_result, self.num_days, self.num_buildings = self.data_reconstruction()
self.df_result_meta = self.add_metadata()
# calculate k-test
self.df_ks_test = self.k_test()
self.df_ks_test_det = None # placeholder
def zero_coun(self): # TODO: implement
pass
def get_mp(self):
"""
Calculates matrix profile and matrix profile indices for a time-stamp
sorted dataframe where the columns are buildings from the same site
and rows are meter readings.
Returns:
mp_adj: dataframe with the matrix profile values
mp_ind: dataframe with the matrix profile indices
"""
mp_adj = pd.DataFrame(columns=self.name_list)
mp_ind = pd.DataFrame(columns=self.name_list)
for col in self.name_list:
bldg = self.df_meters[col]
mp = stumpy.stump(bldg, m=self.m)
# append np.nan to matrix profile to allow plotting against raw data
madj = np.append(mp[:,0], np.zeros(self.m-1) + np.nan)
mind = np.append(mp[:,1], np.zeros(self.m-1) + np.nan)
# save mp information
mp_adj[col] = madj
mp_ind[col] = mind
return mp_adj, mp_ind
def midnight_mp(self):
"""
Picks daily matrix profile at midnight
"""
# use only available timestamps
df_e = self.df_meters.copy()
df_mp = self.mp_adj.set_index(df_e.index)
df_mpind = self.mp_ind.set_index(df_e.index)
df_e_0 = df_e[df_e.index.hour==0]
df_mp_0 = df_mp[df_mp.index.hour==0]
df_mpind_0 = df_mpind[df_mpind.index.hour==0]
if self.verbose:
print(f'Midnight MP values:\n{df_e_0}')
return df_e_0, df_mp_0, df_mpind_0
def data_reconstruction(self):
"""
Puts together calculated values into one single dataframe
"""
df_result = pd.DataFrame(columns=['raw','mp','mp_ind'])
df_e_0, df_mp_0, df_mpind_0 = self.midnight_mp()
num_days = df_e_0.shape[0]
num_buildings = df_e_0.shape[1]
print(f'num of days: {num_days}') # debug
# combining the matrix profile and indices values
df_result['raw'] = df_e_0.values.reshape(num_days * num_buildings)
df_result['mp'] = df_mp_0.values.reshape(num_days * num_buildings)
df_result['mp_ind'] = df_mpind_0.values.reshape(num_days * num_buildings)
if self.verbose:
print(f'Combining raw and calculated values:\n{df_result}')
df_names=[]
df_dates=[]
days=[]
self.year = df_e_0.index[0].year
self.month = df_e_0.index[0].month
self.day = df_e_0.index[0].day
# combining the building names and dates
for i in range(num_days):
df_names = np.append(df_names, np.array(self.name_list))
days = np.append(days, np.ones(len(self.name_list))*i)
for i in range(len(days)):
df_dates = df_dates + \
[dt.datetime(year=self.year,month=self.month,day=self.day) + \
dt.timedelta(days=days[i])]
df_result[self.col_id] = df_names
df_result['date'] = df_dates
if self.verbose:
print(f'Updating the combined values with building names and full dates:\n{df_result}')
# combining the breakdown of the dates
df_month=[]
df_daytype=[]
df_day=[]
for i in range(len(df_result)):
df_month = np.append(df_month, df_result.date[i].strftime('%b'))
df_daytype = np.append(df_daytype, df_result.date[i].strftime('%a'))
df_day = np.append(df_day, df_result.date[i].strftime('%d'))
df_result['month'] = df_month
df_result['daytype'] = df_daytype
df_result['day'] = df_day
if self.verbose:
print(f'Updating the combined values with broken down dates:\n{df_result}')
return df_result, num_days, num_buildings
def add_metadata(self):
"""
Combines the processed dataframe with matrix profile calculation
alongside the metadata file
"""
df_result_meta = self.df_result.merge(self.df_metadata, on=self.col_id)
if self.verbose:
print(f'Merging available metadata:\n{df_result_meta.head()}')
return df_result_meta
def daytype_dist(self):
"""Computes daytype distributions"""
daytype_dist = {}
daytype_dist['mon'] = self.df_result.mp[self.df_result.daytype == 'Mon']
daytype_dist['tue'] = self.df_result.mp[self.df_result.daytype == 'Tue']
daytype_dist['wed'] = self.df_result.mp[self.df_result.daytype == 'Wed']
daytype_dist['thu'] = self.df_result.mp[self.df_result.daytype == 'Thu']
daytype_dist['fri'] = self.df_result.mp[self.df_result.daytype == 'Fri']
daytype_dist['sat'] = self.df_result.mp[self.df_result.daytype == 'Sat']
daytype_dist['sun'] = self.df_result.mp[self.df_result.daytype == 'Sun']
return daytype_dist
def k_test(self):
"""Computes k-s test for each daily distribution"""
daytype_dist = self.daytype_dist() # compute daily distributions
ks_test = pd.DataFrame(columns=['D','p'],
index=pd.date_range(pd.datetime(year=self.year, month=self.month, day=self.day),
periods=self.num_days))
for i in pd.date_range(pd.datetime(year=self.year, month=self.month, day=self.day), periods=self.num_days):
events = self.df_result.mp[self.df_result.date == i]
if i.weekday() == 0:
test = stats.ks_2samp(events, daytype_dist['mon'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 1:
test = stats.ks_2samp(events, daytype_dist['tue'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 2:
test = stats.ks_2samp(events, daytype_dist['wed'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 3:
test = stats.ks_2samp(events, daytype_dist['thu'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 4:
test = stats.ks_2samp(events, daytype_dist['fri'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 5:
test = stats.ks_2samp(events, daytype_dist['sat'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 6:
test = stats.ks_2samp(events, daytype_dist['sun'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if self.verbose:
print(f'K-S test dataframe:\n{ks_test}')
return ks_test
def get_rejected_days(self):
"""
Calculates the rejected days at commonly used p-values
Returns:
p_nr: dataframe with the total number of rejected days at
the given p-value(s)
"""
ks_test = self.df_ks_test.copy()
p_nr = pd.DataFrame(columns=['p','nr'])
# by default compute commonly used p-values
p_nr.p = [0.01, 0.05, 0.1, 0.15, 0.2]
p_nr.nr = np.zeros(len(p_nr.p))
for i in range(len(p_nr)):
ks_test['det_aux'] = np.where(ks_test['p'] < p_nr.p[i], 1, 0)
temp = ks_test
temp = pd.Series(ks_test.det_aux)
p_nr.nr[i] = np.sum(temp)
return p_nr
def get_discords(self, pvalue=0.01):
"""Calculates the discords at a given p-value"""
# filter based on pvalue
ks_test = self.df_ks_test.copy()
ks_test['det'] = np.where(ks_test['p'] < pvalue, 1, 0)
discord = ks_test[ks_test['det'] == 1]
# plot
sns.set(context='notebook', style='whitegrid', palette='deep', font='sans-serif', font_scale=1.8)
plt.figure(figsize=[3, 5])
sns.boxplot(data=discord['D'], orient='vertical')
plt.ylim(0,1)
plt.xlabel(f'Site {self.col_id}')
plt.ylabel('D')
plt.savefig(f'img/discords_{pvalue}-{self.site_id}-{self.meter_id}.png', bbox_inches='tight', format='PNG')
plt.close()
# sort the dataframe and calculate quantiles
discord_sort = discord.sort_values(by='D')
discord_q = self.get_discords_quantiles(discord_sort)
self.df_ks_test_det = ks_test
return discord_sort, discord_q
def get_result_df(self, p_value=0.01):
"""Calculates the discords at a given p-value"""
# prepare index and column for resulting dataframes
hourly_timestamps = self.base_timestamps.copy()
all_bdg = self.name_list.copy()
columns = [f'is_discord_{x}' for x in all_bdg]
# filter based on p_value
df_daily_is_discord = self.df_ks_test.copy()
df_daily_is_discord['is_discord'] = np.where(
df_daily_is_discord['p'] < p_value, 1, 0)
# hand waving specialization (caution) of discords for all bdgs
for col in columns:
df_daily_is_discord[col] = df_daily_is_discord['is_discord']
df_daily_is_discord = df_daily_is_discord.drop(['p', 'D', 'is_discord'], axis=1)
df_hourly_is_discord = pd.DataFrame(index = hourly_timestamps)
# copy daily dataframe to hourly dataframe
df_hourly_is_discord['day'] = df_hourly_is_discord.index.date
df_daily_is_discord.index = df_daily_is_discord.index.date
df_hourly_is_discord = df_hourly_is_discord.join(df_daily_is_discord,
on='day', how='left')
df_hourly_is_discord = df_hourly_is_discord.drop(['day'], axis=1)
df_hourly_is_discord = df_hourly_is_discord.astype('int8')
return df_hourly_is_discord
def get_discords_quantiles(self, discord_sorted):
"""Calculates the IQR discords"""
df_e = self.df_meters.copy()
df_e_z = pd.DataFrame(stats.zscore(df_e, axis=0, nan_policy='omit'),index=df_e.index)
for i in discord_sorted.index[-3:]: # why 3?
discord_temp = df_e_z[i:i + dt.timedelta(hours=self.m-1)] # 23 for daily
# print(i, self.df_ks_test.D[i], self.df_ks_test.p[i])
discord_q = pd.DataFrame(columns=['q1','q2','q3'],index=discord_temp.index)
for j in range(len(discord_temp)):
# replaced np.percentile with nanpercentile
discord_q['q1'][j] = | np.nanpercentile(discord_temp.iloc[j,:], 25) | numpy.nanpercentile |
# general imports
import numpy as np
from abc import ABC, abstractmethod
# local module imports
from . import interpolation as ip, global_functions as g_func, \
channel as chl, output_object as oo
class ParallelFlowCircuit(ABC, oo.OutputObject):
def __new__(cls, dict_flow_circuit, manifolds, channels,
n_subchannels=1.0, **kwargs):
circuit_type = dict_flow_circuit.get('type', 'Koh')
if circuit_type == 'Koh':
return super(ParallelFlowCircuit, cls).\
__new__(KohFlowCircuit)
elif circuit_type == 'ModifiedKoh':
return super(ParallelFlowCircuit, cls).\
__new__(ModifiedKohFlowCircuit)
elif circuit_type == 'UpdatedKoh':
return super(ParallelFlowCircuit, cls).\
__new__(UpdatedKohFlowCircuit)
elif circuit_type == 'Wang':
return super(ParallelFlowCircuit, cls).\
__new__(WangFlowCircuit)
else:
raise NotImplementedError
def __init__(self, dict_flow_circuit, manifolds, channels,
n_subchannels=1.0, **kwargs):
name = dict_flow_circuit['name']
super().__init__(name)
assert isinstance(dict_flow_circuit, dict)
assert isinstance(manifolds, (list, tuple))
assert isinstance(channels, (list, tuple))
err_message = 'manifolds must be tuple or list with two objects of ' \
'class Channel'
if len(manifolds) != 2:
raise ValueError(err_message)
elif not isinstance(manifolds[0], chl.Channel):
raise TypeError(err_message)
if not isinstance(channels[0], chl.Channel):
raise TypeError(err_message)
self.print_variables = \
{
'names': ['normalized_flow_distribution'],
'units': ['-'],
'sub_names': ['None']
}
self.combine_print_variables(self.print_variables,
kwargs.get('print_variables', None))
self.manifolds = manifolds
self.manifolds[0].name = self.name + ': Inlet Manifold'
self.manifolds[0].fluid.name = self.manifolds[0].name + ': ' \
+ self.manifolds[0].fluid.TYPE_NAME
self.manifolds[1].name = self.name + ': Outlet Manifold'
self.manifolds[1].fluid.name = self.manifolds[1].name + ': ' \
+ self.manifolds[1].fluid.TYPE_NAME
self.channels = channels
self.manifolds[0].flow_direction = 1
self.shape = dict_flow_circuit.get('shape', 'U')
if self.shape not in ('U', 'Z'):
raise ValueError('shape of flow circuit must be either U or Z')
if self.shape == 'U':
self.manifolds[1].flow_direction = -1
else:
self.manifolds[1].flow_direction = 1
if hasattr(self.manifolds[0].fluid, 'mass_fraction'):
self.multi_component = True
else:
self.multi_component = False
self.n_channels = len(self.channels)
self.n_subchannels = n_subchannels
self.tolerance = dict_flow_circuit.get('tolerance', 1e-6)
self.max_iter = dict_flow_circuit.get('max_iter', 20)
self.min_iter = dict_flow_circuit.get('min_iter', 3)
self.calc_distribution = \
dict_flow_circuit.get('calc_distribution', True)
self.mass_flow_in = \
self.manifolds[0].mass_flow_total[self.manifolds[0].id_in]
self.vol_flow_in = 0.0
self.channel_mass_flow = \
np.ones(self.n_channels) * self.mass_flow_in / self.n_channels
self.channel_vol_flow = np.zeros(self.channel_mass_flow.shape)
self.channel_vol_flow_old = np.zeros(self.channel_vol_flow.shape)
self.channel_length = \
np.asarray([channel.length for channel in channels])
self.channel_cross_area = \
np.asarray([channel.cross_area for channel in channels])
self.initialize = True
self.update_channels(update_fluid=True)
self.normalized_flow_distribution = \
np.zeros(self.channel_vol_flow.shape)
self.iteration = 0
self.add_print_variables(self.print_variables)
def update(self, inlet_mass_flow=None, calc_distribution=None,
update_fluid=False, **kwargs):
"""
Update the flow circuit
"""
if inlet_mass_flow is not None:
id_in = self.manifolds[0].id_in
self.vol_flow_in = \
inlet_mass_flow / self.manifolds[0].fluid.density[id_in]
if self.initialize:
# homogeneous distribution
self.channel_mass_flow[:] = inlet_mass_flow / self.n_channels
self.channel_vol_flow[:] = self.vol_flow_in / self.n_channels
else:
# use previous distribution scaled to new mass flow
self.channel_mass_flow[:] *= inlet_mass_flow / self.mass_flow_in
self.channel_vol_flow[:] *= inlet_mass_flow / self.mass_flow_in
# set new mass and volume flows
self.mass_flow_in = inlet_mass_flow
if self.initialize:
self.update_channels(update_fluid=True)
self.channel_vol_flow_old[:] = 1e8
# channel_vol_flow_old = np.zeros(self.channel_vol_flow.shape)
if calc_distribution is None:
calc_distribution = self.calc_distribution
if calc_distribution and self.n_channels > 1:
for i in range(self.max_iter):
# print(self.name + ' Iteration # ', str(i+1))
self.iteration = i
self.single_loop()
if i == 0:
self.initialize = False
error = \
np.sum(
np.divide(self.channel_vol_flow -
self.channel_vol_flow_old[:],
self.channel_vol_flow,
where=self.channel_vol_flow != 0.0) ** 2.0)
# print(channel_vol_flow_old)
# print(self.channel_vol_flow)
self.channel_vol_flow_old[:] = self.channel_vol_flow
# print(error)
if error < self.tolerance and i >= self.min_iter:
break
if i == (self.max_iter - 1):
print('maximum number of iterations n = {} '
'with error = {} in update() of {} '
'reached'.format(self.max_iter, error, self))
else:
self.initialize = False
# final channel updates within flow circuit iteration
self.update_channels(update_fluid=True)
try:
self.normalized_flow_distribution[:] = \
self.channel_mass_flow / np.average(self.channel_mass_flow)
except FloatingPointError:
self.normalized_flow_distribution[:] = 0.0
@abstractmethod
def single_loop(self, inlet_mass_flow=None, update_channels=True):
pass
def update_channels(self, update_fluid=True):
if self.initialize:
channel_mass_flow_in = np.ones(self.n_channels) \
* self.mass_flow_in / self.n_channels
channel_mass_flow_out = channel_mass_flow_in
else:
channel_mass_flow_in = self.channel_mass_flow
# channel_mass_flow_out = \
# np.array([channel.mass_flow_total[channel.id_out]
# for channel in self.channels])
# channel_mass_flow_out *= self.n_subchannels
channel_mass_flow_out = self.channel_mass_flow
if self.multi_component:
mass_fraction = \
np.array([channel.fluid.mass_fraction[:, channel.id_out]
for channel in self.channels]).transpose()
else:
mass_fraction = 1.0
mass_source = channel_mass_flow_out * mass_fraction
# mass_source = self.channel_mass_flow * mass_fraction
channel_enthalpy_out = \
np.asarray([ch.g_fluid[ch.id_out] * ch.temperature[ch.id_out]
for ch in self.channels]) * self.n_subchannels
self.manifolds[1].update(mass_flow_in=0.0, mass_source=mass_source,
update_mass=True, update_flow=True,
update_heat=False, update_fluid=update_fluid,
enthalpy_source=channel_enthalpy_out)
# Channel update
for i, channel in enumerate(self.channels):
channel.p_out = ip.interpolate_1d(self.manifolds[1].pressure)[i]
channel.temperature[channel.id_in] = self.manifolds[0].temp_ele[i]
channel.update(mass_flow_in=
channel_mass_flow_in[i] / self.n_subchannels,
update_mass=True, update_flow=True,
update_heat=False, update_fluid=update_fluid)
# Inlet header update
id_in = self.channels[-1].id_in
self.manifolds[0].p_out = self.channels[-1].pressure[id_in]
if self.multi_component:
mass_fraction = self.manifolds[0].fluid.mass_fraction[:, :-1]
else:
mass_fraction = 1.0
mass_source = -self.channel_mass_flow * mass_fraction
self.manifolds[0].update(mass_flow_in=self.mass_flow_in, # * 1.00000,
mass_source=mass_source,
update_mass=True, update_flow=True,
update_heat=False, update_fluid=update_fluid)
id_in = self.manifolds[0].id_in
self.vol_flow_in = \
self.mass_flow_in / self.manifolds[0].fluid.density[id_in]
class KohFlowCircuit(ParallelFlowCircuit):
def __init__(self, dict_flow_circuit, manifolds, channels,
n_subchannels=1.0):
super().__init__(dict_flow_circuit, manifolds, channels,
n_subchannels)
# Distribution factor
self.alpha = np.ones(self.n_channels)
id_in = self.channels[-1].id_in
id_out = self.channels[-1].id_out
self.dp_ref = \
self.channels[-1].pressure[id_in] \
- self.channels[-1].pressure[id_out]
self.k_perm = np.zeros(self.n_channels)
self.l_by_a = np.array([channel.length / channel.cross_area
for channel in self.channels])
self.visc_channel = np.zeros(self.n_channels)
self.dp_channel = np.zeros(self.n_channels)
def single_loop(self, inlet_mass_flow=None, update_channels=True):
"""
Update the flow circuit
"""
if inlet_mass_flow is not None:
self.mass_flow_in = inlet_mass_flow
if update_channels:
self.update_channels()
self.dp_channel[:] = \
np.array([channel.pressure[channel.id_in]
- channel.pressure[channel.id_out]
for channel in self.channels])
self.channel_vol_flow[:] = \
np.array([np.average(channel.vol_flow)
for channel in self.channels])
# if np.min(np.abs(vol_flow_channel)) > g_par.SMALL:
self.visc_channel[:] = \
np.array([np.average(channel.fluid.viscosity)
for channel in self.channels])
# velocity = np.array([np.average(channel.velocity)
# for channel in self.channels])
p_in = ip.interpolate_1d(self.manifolds[0].pressure)
p_out = ip.interpolate_1d(self.manifolds[1].pressure)
if np.any(self.channel_vol_flow == 0.0):
raise ValueError('zero flow rates detected, '
'check boundary conditions')
if self.initialize:
self.k_perm[:] = self.channel_vol_flow / self.dp_channel \
* self.visc_channel * self.l_by_a
self.dp_ref = np.maximum(self.dp_channel[-1], 1e-3)
self.alpha[:] = (p_in - p_out) / self.dp_ref
self.dp_ref = self.vol_flow_in / np.sum(self.alpha) * self.l_by_a \
* self.visc_channel[-1] / self.k_perm[-1] / self.n_subchannels
p_in += self.dp_ref \
+ self.manifolds[1].pressure[self.manifolds[1].id_out] \
- self.manifolds[0].p_out
self.alpha[:] = (p_in - p_out) / self.dp_ref
self.channel_vol_flow[:] = (p_in - p_out) * self.k_perm \
/ self.l_by_a * self.n_subchannels / self.visc_channel
density = np.array([channel.fluid.density[channel.id_in]
for channel in self.channels])
self.channel_mass_flow[:] = self.channel_vol_flow * density
mass_flow_correction = \
self.mass_flow_in / np.sum(self.channel_mass_flow)
self.channel_mass_flow[:] *= mass_flow_correction
class ModifiedKohFlowCircuit(KohFlowCircuit):
def __init__(self, dict_flow_circuit, manifolds, channels,
n_subchannels=1.0):
super().__init__(dict_flow_circuit, manifolds, channels,
n_subchannels)
self.urf = dict_flow_circuit.get('underrelaxation_factor', 0.5)
def single_loop(self, inlet_mass_flow=None, update_channels=True):
"""
Update the flow circuit
"""
if inlet_mass_flow is not None:
self.mass_flow_in = inlet_mass_flow
if update_channels:
self.update_channels()
self.dp_channel[:] = \
np.array([channel.pressure[channel.id_in]
- channel.pressure[channel.id_out]
for channel in self.channels])
self.channel_vol_flow[:] = self.n_subchannels \
* np.array([np.average(channel.vol_flow)
for channel in self.channels])
# if np.min(np.abs(vol_flow_channel)) > g_par.SMALL:
self.visc_channel[:] = \
np.array([np.average(channel.fluid.viscosity)
for channel in self.channels])
# velocity = np.array([np.average(channel.velocity)
# for channel in self.channels])
p_in = ip.interpolate_1d(self.manifolds[0].pressure)
p_out = ip.interpolate_1d(self.manifolds[1].pressure)
if np.any(self.channel_vol_flow == 0.0):
raise ValueError('zero flow rates detected, '
'check boundary conditions')
# if self.initialize:
# self.k_perm[:] = self.channel_vol_flow / self.dp_channel \
# * self.visc_channel * self.l_by_a
self.alpha[:] = (p_in - p_out) / self.dp_channel
self.channel_vol_flow[:] *= (self.urf + (1.0 - self.urf) * self.alpha)
density = np.array([channel.fluid.density[channel.id_in]
for channel in self.channels])
self.channel_mass_flow[:] = \
self.channel_vol_flow * density
mass_flow_correction = \
self.mass_flow_in / np.sum(self.channel_mass_flow)
self.channel_mass_flow[:] *= mass_flow_correction
class UpdatedKohFlowCircuit(KohFlowCircuit):
def __init__(self, dict_flow_circuit, manifolds, channels,
n_subchannels=1.0):
super().__init__(dict_flow_circuit, manifolds, channels,
n_subchannels)
self.urf = dict_flow_circuit.get('underrelaxation_factor', 0.5)
self.density_channel = np.zeros(self.visc_channel.shape)
def update_manifolds(self, update_fluid=True):
if self.initialize:
channel_mass_flow_in = np.ones(self.n_channels) \
* self.mass_flow_in / self.n_channels
channel_mass_flow_out = channel_mass_flow_in
else:
channel_mass_flow_in = self.channel_mass_flow
channel_mass_flow_out = \
np.array([channel.mass_flow_total[channel.id_out]
for channel in self.channels])
channel_mass_flow_out *= self.n_subchannels
if self.multi_component:
mass_fraction = \
np.array([channel.fluid.mass_fraction[:, channel.id_out]
for channel in self.channels]).transpose()
else:
mass_fraction = 1.0
mass_source = channel_mass_flow_out * mass_fraction
# mass_source = self.channel_mass_flow * mass_fraction
channel_enthalpy_out = \
np.asarray([ch.g_fluid[ch.id_out] * ch.temperature[ch.id_out]
for ch in self.channels]) * self.n_subchannels
self.manifolds[1].update(mass_flow_in=0.0, mass_source=mass_source,
update_mass=True, update_flow=True,
update_heat=False, update_fluid=update_fluid,
enthalpy_source=channel_enthalpy_out)
# Inlet header update
self.manifolds[0].p_out = \
self.manifolds[1].pressure[-1] + self.dp_ref
if self.multi_component:
mass_fraction = self.manifolds[0].fluid.mass_fraction[:, :-1]
else:
mass_fraction = 1.0
mass_source = -self.channel_mass_flow * mass_fraction
self.manifolds[0].update(mass_flow_in=self.mass_flow_in, # * 1.00000,
mass_source=mass_source,
update_mass=True, update_flow=True,
update_heat=False, update_fluid=update_fluid)
id_in = self.manifolds[0].id_in
self.vol_flow_in = \
self.mass_flow_in / self.manifolds[0].fluid.density[id_in]
def single_loop(self, inlet_mass_flow=None, update_channels=True):
"""
Update the flow circuit
"""
if inlet_mass_flow is not None:
self.mass_flow_in = inlet_mass_flow
if self.iteration == 0:
self.visc_channel[:] = \
np.array([np.average(channel.fluid.viscosity)
for channel in self.channels])
self.density_channel[:] = \
np.array([channel.fluid.density[channel.id_in]
for channel in self.channels])
self.dp_channel = \
np.array([channel.pressure[channel.id_in]
- channel.pressure[channel.id_out]
for channel in self.channels])
self.k_perm[:] = self.channel_vol_flow / self.dp_channel \
* self.visc_channel * self.l_by_a / self.n_subchannels
self.dp_ref = self.vol_flow_in / np.sum(self.alpha) * self.l_by_a[-1] \
* self.visc_channel[-1] / self.k_perm[-1] / self.n_subchannels
self.update_manifolds(update_fluid=True)
p_in = ip.interpolate_1d(self.manifolds[0].pressure)
p_out = ip.interpolate_1d(self.manifolds[1].pressure)
alpha = (p_in - p_out) / self.dp_ref
self.alpha[:] = self.alpha * self.urf + alpha * (1.0 - self.urf)
# self.dp_ref = self.vol_flow_in / np.sum(self.alpha) * self.l_by_a \
# * self.visc_channel[-1] / self.k_perm[-1] / self.n_subchannels
# p_in += self.dp_ref \
# + self.manifolds[1].pressure[self.manifolds[1].id_out] \
# - self.manifolds[0].p_out
# self.alpha[:] = (p_in - p_out) / self.dp_ref
channel_vol_flow = (p_in - p_out) * self.k_perm \
/ self.l_by_a * self.n_subchannels / self.visc_channel
self.channel_vol_flow[:] = self.channel_vol_flow * self.urf \
+ channel_vol_flow * (1.0 - self.urf)
# self.channel_vol_flow[:] = (p_in - p_out) * self.k_perm \
# / self.l_by_a * self.n_subchannels / self.visc_channel
self.channel_mass_flow[:] = self.channel_vol_flow * self.density_channel
mass_flow_correction = \
self.mass_flow_in / np.sum(self.channel_mass_flow)
self.channel_mass_flow[:] *= mass_flow_correction
class WangFlowCircuit(ParallelFlowCircuit):
def __init__(self, dict_flow_circuit, manifolds, channels,
n_subchannels=1.0):
super().__init__(dict_flow_circuit, manifolds, channels,
n_subchannels)
# self.zeta = np.zeros(self.n_channels)
self.xsi = 1.0
self.H = self.manifolds[0].cross_area / self.manifolds[1].cross_area
F_c = np.array([np.average(channel.cross_area)
for channel in self.channels])
# print('F_c: ', F_c)
sum_Fc = g_func.add_source(np.copy(F_c), F_c[1:], direction=-1)
# print('sum_Fc: ', sum_Fc)
self.M = sum_Fc / np.average(self.manifolds[0].cross_area)
# print('self.M: ', self.M)
# self.M = np.sum(F_c) / np.average(self.manifolds[0].cross_area)
self.E = self.manifolds[0].length / self.manifolds[0].d_h
self.D_star = self.manifolds[0].d_h / self.manifolds[1].d_h
self.sqr_M = self.M ** 2.0
self.sqr_H = self.H ** 2.0
# print('M = ', self.M)
# print('E = ', self.E)
def update_channels(self):
super().update_channels()
if self.initialize:
self.f_in = np.copy(self.manifolds[0].friction_factor)
self.f_out = np.copy(self.manifolds[1].friction_factor)
# if self.initialize:
self.zeta = np.array([channel.zeta_bends * channel.n_bends
for channel in self.channels]) \
+ np.array([np.sum(channel.friction_factor * channel.dx /
channel.d_h)
for channel in self.channels])
self.zeta[:] += 1.0 + self.manifolds[0].zeta_other \
+ self.manifolds[1].zeta_other
# self.zeta[:] = 10.0
self.initialize = False
def single_loop(self, inlet_mass_flow=None, update_channels=True):
if inlet_mass_flow is not None:
self.mass_flow_in = inlet_mass_flow
id_in = self.manifolds[0].id_in
self.vol_flow_in = self.mass_flow_in \
/ self.manifolds[0].fluid.density[id_in]
self.update_channels()
mfd_in = self.manifolds[0]
mfd_out = self.manifolds[1]
k_in_0 = 0.6
k_out_0 = 1.0
b_in = 0.01
b_out = 0.01
W_0 = self.vol_flow_in / mfd_in.cross_area
# print('W_0: ', W_0)
# print('Re_0:', W_0 * mfd_in.fluid.density[0] * mfd_in.d_h /
# mfd_in.fluid.viscosity[0])
# print('mfd_in.velocity[:-1]: ', mfd_in.velocity[:-1])
# mfd_in.velocity[0] = W_0
# print('zeta = ', self.zeta)
f_in = mfd_in.friction_factor
f_out = mfd_out.friction_factor
# f_in = self.f_in
# f_out = self.f_out
#print('f_in: ', f_in)
#print('f_out: ', f_out)
# f_in[:] = 0.038
# f_out[:] = 0.038
k_in = k_in_0 + b_in * np.log(mfd_in.velocity[:-1] / W_0)
k_out = k_out_0 + b_out * np.log(mfd_out.velocity[:-1] / W_0)
Q = 2.0 / (3.0 * self.zeta) * (k_in - k_out * self.sqr_H) \
* self.sqr_M
R = - 0.25 * self.E * self.xsi / self.zeta \
* (f_in + f_out * self.D_star * self.sqr_H) * self.sqr_M
avg_R = np.average(R)
avg_Q = np.average(Q)
cube_Q = np.power(Q, 3.0)
condition = np.square(R) + cube_Q
avg_condition = np.square(avg_R) + np.power(avg_Q, 3.0)
condition_0 = np.square(R[0]) + np.power(Q[0], 3.0)
x = mfd_in.x / mfd_in.length
one_third = 1.0 / 3.0
# print('avg_condition: ', avg_condition)
# print('condition: ', condition)
w = 1.0
for i in range(self.n_channels):
# print('w_i: ', w)
# k_in_i = k_in_0 + b_in * np.log(w)
# k_out_i = k_out_0 + b_out * np.log(w * self.H)
# Q_i = 2.0 / (3.0 * self.zeta[i]) * (
# k_in_i - k_out_i * self.sqr_H) * self.sqr_M
# R_i = - 0.25 * self.E * self.xsi / self.zeta[i] \
# * (f_in[i] + f_out[i] * self.D_star * self.sqr_H) * self.sqr_M
# cube_Q_i = np.power(Q_i, 3.0)
# square_R_i = np.square(R_i)
# condition_i = square_R_i + cube_Q_i
# print('cube_Q_i: ', cube_Q_i)
# print('square_R_i: ', square_R_i)
condition_i = condition[i]
R_i = R[i]
Q_i = Q[i]
cube_Q_i = cube_Q[i]
# print('condition: ', condition_i)
if condition_i < 0.0:
theta = np.arccos(R_i/np.sqrt(-cube_Q_i))
sqrt_Q = np.sqrt(-Q_i)
r_1 = 2.0 * sqrt_Q * np.cos(theta * one_third)
r_2 = 2.0 * sqrt_Q * np.cos((theta + 2.0*np.pi) * one_third)
w = (np.exp(r_1 + r_2 * x[i+1]) - np.exp(r_2 + r_1 * x[i+1])) \
/ (np.exp(r_1) - np.exp(r_2))
# print('i :', i, ', condition < 0, w: ', w)
elif condition_i == 0.0:
r = - 0.5 * np.power(R_i, one_third)
w = (1.0 - x[i+1]) * np.exp(r*x[i+1])
# print('i :', i, ', condition == 0, w: ', w)
else:
sqrt_condition = | np.sqrt(condition_i) | numpy.sqrt |
################### Bibliotecas Utilizadas ##########################
import os,cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD,RMSprop,adam
####################################################################
#################### PARÂMETROS INICIAIS ###########################
PASTA_DE_DADOS = 'data'
SENSOR_NUMERO = 'S1'
REDUZIR_IMAGEM_PARA = 128 #Sempre o mesmo num para altura e largura
NUMERO_EPOCAS = 3
####################################################################
#################### CARREGANDO OS DADOS ###########################
data_path = os.getcwd() + '/' + PASTA_DE_DADOS + '/' + SENSOR_NUMERO
data_dir_list = os.listdir(data_path)
num_channel=1
linhas_img = REDUZIR_IMAGEM_PARA
colunas_img = REDUZIR_IMAGEM_PARA
lista_imgs=[] #Lista que conterá todos os vetores caracteristicas de cada uma das imagens
def image_to_feature_vector(image, size=(linhas_img, colunas_img)):
# Reduz a imagem para o tamanho especificado, depois achata a imagem
# em uma lista de intensidade de pixels.
return cv2.resize(image, size).flatten()
for classe in data_dir_list:
classe_path = data_path+'/'+ classe
img_list=os.listdir(classe_path)
for img in img_list:
input_img=cv2.imread(data_path + '/'+ classe + '/'+ img ) #Lê uma imagem com o caminho especificado.
input_img = input_img[49:585, 114:792] #Corta a borda branca.
input_img=cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY) #Conversão da imagem RGB em preto e branco.
#cv2.imwrite('teste.jpg',input_img)
input_img_flatten=image_to_feature_vector(input_img,(linhas_img,colunas_img)) #Conversão da imagem em PB para vetor caracteristica. (Resize e Flatten)
lista_imgs.append(input_img_flatten) #Adicionando a lista principal para guardar os vetores de característica.
####################################################################
#
# A partir deste ponto temos as imagens em forma de vetor de caracteristica na
# variavel "lista_imgs". Todas imagens que estão nas subpastas do sensor escolhido
# já estão carregadas no programa.
#################### PRÉ PROCESSAMENTO ###########################
from sklearn.preprocessing import scale
np_lista_imgs = np.array(lista_imgs) #Transformando a lista de imagens já vetorizadas para um array da biblioteca numpy.
np_lista_imgs = np_lista_imgs.astype('float32') #Conversão em 'float32'
imgs_padronizadas = scale(np_lista_imgs) #Padronização do conjunto de dados, centraliza os números para uma escala onde a média é 0.
imgs_padronizadas= imgs_padronizadas.reshape(np_lista_imgs.shape[0],num_channel,linhas_img,colunas_img) #Reformatando a imagem para ficar no padrão da entrada.
np_lista_imgs = imgs_padronizadas
####################################################################
#################### CRIANDO AS CLASSES ###########################
num_classes = 4
num_amostras = np_lista_imgs.shape[0]
labels = np.ones((num_amostras,),dtype='int64')
labels[0:60]=0
labels[60:120]=1
labels[120:180]=2
labels[180:]=3
names = ['d1','d2','d3','integro']
Y = np_utils.to_categorical(labels, num_classes) #Hot-Encoding
####################################################################
################ DIFININDO O MODELO DA CNN #########################
input_shape=np_lista_imgs[0].shape
model = Sequential()
# 1 Camada #
model.add(Convolution2D(filters = 32,
kernel_size = (3,3),
padding = 'same',
input_shape = input_shape
#,activation='relu'
))
model.add(Activation('relu'))
##########
# 2 Camada #
model.add(Convolution2D(filters = 32,
kernel_size = (3, 3)
))
model.add(Activation(activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(rate = 0.5))
##########
# 3 Camada #
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
##########
# 4 Camada #
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
##########
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
####################################################################
############### VER A CONFIGURAÇÃO DO MODELO #######################
'''
model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape
model.layers[0].output_shape
model.layers[0].get_weights()
np.shape(model.layers[0].get_weights()[0])
model.layers[0].trainable
'''
####################################################################
##################### TREINAR O MODELO #############################
x,y = shuffle(np_lista_imgs,Y, random_state=2) #Embaralhando a lista de imagens
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=2) #Separando as imagens para o treinamento ( TREINAMENTO | VALIDAÇÃO )
model.fit(x = X_train, #Finalmente, treinando a rede!!
y = y_train,
batch_size=8,
epochs=NUMERO_EPOCAS,
verbose=1,
validation_data=(X_test, y_test))
####################################################################
################ Grafico de Acertos e Perdas #######################
'''
train_loss=hist.history['loss']
val_loss=hist.history['val_loss']
train_acc=hist.history['acc']
val_acc=hist.history['val_acc']
xc=range(num_epoch)
plt.figure(1,figsize=(7,5))
plt.plot(train_loss,xc)
plt.plot(val_loss,xc)
plt.xlabel('num of Epochs')
plt.ylabel('loss')
plt.title('train_loss vs val_loss')
plt.grid(True)
plt.legend(['train','val'])
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
plt.figure(2,figsize=(7,5))
plt.plot(train_acc,xc)
plt.plot(val_acc,xc)
plt.xlabel('num of Epochs')
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.grid(True)
plt.legend(['train','val'],loc=4)
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
'''
####################################################################
#################### Avaliar uma imagem ############################
IMAGEM_TESTADA_PATH = 'data2/S2/Dano2_S2/d7.jpg'
''' Avaliar todas as imagens no bloco de validação.
score = model.evaluate(X_test, y_test, True, 0)
#print('Test Loss:', score[0])
#print('Test accuracy:', score[1])
test_image = X_test
model.predict_classes(test_image, batch_size=8, verbose=1)
#print(y_test[0:1])
'''
test_image = cv2.imread(IMAGEM_TESTADA_PATH)
test_image = test_image[49:585, 114:792]
test_image=cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
test_image=cv2.resize(test_image,(linhas_img,colunas_img))
test_image = np.array(test_image)
test_image = test_image.astype('float32')
test_image /= 255
test_image= np.expand_dims(test_image, axis=3)
test_image= np.expand_dims(test_image, axis=0)
print((model.predict(test_image)))
print(model.predict_classes(test_image))
####################################################################
################### Visualizar img camada ##########################
# É preciso que esse bloco seja executado em conjunto com o bloco
# "Avaliar uma imagem" para que a variavel test_image seja iniciada.
NUM_CAMADA=2
def get_featuremaps(model, layer_idx, X_batch):
get_activations = K.function([model.layers[0].input, K.learning_phase()],[model.layers[layer_idx].output,])
activations = get_activations([X_batch,0])
return activations
activations = get_featuremaps(model, int(NUM_CAMADA),test_image)
print (np.shape(activations))
feature_maps = activations[0][0]
print (np.shape(feature_maps))
num_of_featuremaps=feature_maps.shape[2]
fig=plt.figure(figsize=(16,16))
plt.title("featuremaps-layer-{}".format(NUM_CAMADA))
subplot_num=int(np.ceil( | np.sqrt(num_of_featuremaps) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 11:47:15 2018
@author: p.tagade
"""
import os
import numpy as np
import json
from extract_features import *
import platform
# -----------------------------------------------------------------------------
def properties(smiles, lumo=None, homo=None):
# -----------------------------------------------------------------------------
''' Function for orbital energies and redox potentials prediction.
The function accepts smiles of an organic molecule as input and provides
orbital energies and redox potential as output.
Inputs:
smiles - smiles representation of organic molecule
Optional inputs:
lumo - Lowest Unoccopied Molecular Orbital Energy in eV.
homo - Highest Occuped Molecular Orbital Energy in eV.
Output:
output_properties - A dictionary of output properties
'''
# -----------------------------------------------------------------------------
property_limits = {}
property_limits['oxidation'] = [-0.485437, 3.68406]
property_limits['reduction'] = [-4.89283, 0.201063]
property_limits['bandgap'] = [-9.06482453, -1.06047362]
property_limits['lumo'] = [-3.91109452, 0.70178201]
property_limits['homo'] = [-8.495, -4.625]
# Output is sent in a dictionary
output_properties = {}
# -----------------------------------------------------------------------------
functional_groups = create_functional_groups()
# -----------------------------------------------------------------------------
# Extracting features from SMILES representation
# -----------------------------------------------------------------------------
smiles_features = extract_features(smiles, functional_groups)
# -----------------------------------------------------------------------------
# Loading coefficients of the correlation
# -----------------------------------------------------------------------------
filepath = os.path.join(os.path.dirname(__file__), 'coefficients.jsn')
with open(filepath, 'r') as fp:
coefficients = json.load(fp)
# -----------------------------------------------------------------------------
# Predicting lumo energy
# -----------------------------------------------------------------------------
lim_lumo = np.array(property_limits['lumo'])
if lumo == None:
lumo_coefs = coefficients['lumo']
lumo = predict(smiles_features, lumo_coefs)
lumo = lumo*(lim_lumo[1] - lim_lumo[0]) + lim_lumo[0]
output_properties['lumo'] = lumo
smiles_features['lumo'] = -2.0 + ((4.0)/(lim_lumo[1] - lim_lumo[0])) * (lumo - lim_lumo[0]);
# -----------------------------------------------------------------------------
# Predicting homo energy
# -----------------------------------------------------------------------------
lim_bandgap = np.array(property_limits['bandgap'])
if homo == None:
homo_coefs = coefficients['homo']
bandgap = predict(smiles_features, homo_coefs)
bandgap = bandgap*(lim_bandgap[1] - lim_bandgap[0]) + lim_bandgap[0]
homo = bandgap + | np.array(lumo) | numpy.array |
"""
Parsers for several prediction tool outputs.
"""
import numpy as np
max_solvent_acc = {'A': 106.0, 'C': 135.0, 'D': 163.0,
'E': 194.0, 'F': 197.0, 'G': 84.0,
'H': 184.0, 'I': 169.0, 'K': 205.0,
'L': 164.0, 'M': 188.0, 'N': 157.0,
'P': 136.0, 'Q': 198.0, 'R': 248.0,
'S': 130.0, 'T': 142.0, 'V': 142.0,
'W': 227.0, 'Y': 222.0}
def scampi(infile, sequence):
"""Parses the scampi output file.
Parameters
----------
infile : str
Scampi file.
sequence : SeqRecord
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
aa2topo = {
'I': [1, 0, 0, 0],
'M': [0, 1, 0, 0],
'O': [0, 0, 1, 0]
}
result = []
with open(infile, 'r') as fh:
for line in fh:
if not line.startswith('>'):
for aa in line.strip():
result.append(aa2topo[aa])
return np.array([result])
def psipred(infile, sequence):
"""Parses the PSIPRED .horiz output file.
Parameters
----------
infile : str
PSIPRED .horiz file.
sequence : SeqRecord
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
aa2sec = {
'H': [1, 0, 0],
'E': [0, 1, 0],
'C': [0, 0, 1]
}
result = []
with open(infile, 'r') as fh:
for line in fh:
if line.startswith('Pred:'):
spl = line.strip().split(' ')
if len(spl) < 2:
continue
for aa in spl[1]:
result.append(aa2sec[aa])
return np.array([result])
def prof(infile, sequence):
"""Parses the prof .profRdb output file.
Parameters
----------
infile : str
Prof .profRdb file.
sequence : SeqRecord
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
aa2sec = {
'H': [1, 0, 0],
'E': [0, 1, 0],
'L': [0, 0, 1]
}
result = []
with open(infile, 'r') as fh:
for line in fh:
if not line.startswith('#') and not line.startswith('No'):
aa = line.strip().split()[3]
result.append(aa2sec[aa])
return np.array([result])
def spot1d_sec(infile, sequence):
"""
Parses the Spot1d output file.
Args:
infile: Spot1d output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
return np.loadtxt(infile, usecols=[14, 13, 12], skiprows=1).reshape((1, -1, 3))
def spot1d_phi(infile, sequence):
"""
Parses the Spot1d output file.
Args:
infile: Spot1d output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
return np.loadtxt(infile, usecols=10, skiprows=1).reshape((1, -1, 1))
def spot1d_psi(infile, sequence):
"""
Parses the Spot1d output file.
Args:
infile: Spot1d output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
return np.loadtxt(infile, usecols=11, skiprows=1).reshape((1, -1, 1))
def spot1d_rsa(infile, sequence):
"""
Parses the Spot1d output file.
Args:
infile: Spot1d output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
data = np.loadtxt(infile, usecols=4, skiprows=1).reshape((1, -1, 1))
for i in range(len(sequence)):
data[0, i, 0] /= max_solvent_acc[sequence[i].upper()]
return data
def anglor(infile, sequence):
"""
Parses the ANGLOR output file.
Args:
infile: ANGLOR output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
return np.loadtxt(infile, usecols=1).clip(min=-180, max=180).reshape((1, -1, 1))
def anglor_phi(infile, sequence):
"""
Parses the ANGLOR (phi) output file.
Args:
infile: ANGLOR output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
return anglor(infile, sequence)
def anglor_psi(infile, sequence):
"""
Parses the ANGLOR (psi) output file.
Args:
infile: ANGLOR output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
return anglor(infile, sequence)
def memsat_svm(infile, sequence):
"""
Parses the Memsat SVM output file.
Args:
infile: Memsat SVM output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
with open(infile, "r") as fh:
for line in fh:
if line.startswith("Signal peptide:"):
sp = 0
if not line.strip().endswith("Not detected."):
sp = line.split(":")[1].strip().split("-")[1]
elif line.startswith("Topology"):
tms = [[y[0]-1, y[1]] for y in [list(map(int, x.split("-")))
for x in line.split(":")[1].strip().split(",")]]
elif line.startswith("Re-entrant helices:"):
reh = []
if not line.strip().endswith("Not detected."):
reh = [[y[0]-1, y[1]] for y in [list(map(int, x.split("-")))
for x in line.split(":")[1].strip().split(",")]]
elif line.startswith("N-terminal"):
orient = line.split(":")[1].strip()
if orient == "in":
result = [[1, 0, 0, 0] for _ in range(len(sequence))]
orient = "out"
else:
result = [[0, 0, 1, 0] for _ in range(len(sequence))]
orient = "in"
for tm in tms:
for i in range(*tm):
result[i] = [0, 1, 0, 0]
for i in range(tm[1], len(result)):
if orient == "in":
result[i] = [1, 0, 0, 0]
else:
result[i] = [0, 0, 1, 0]
if orient == "in":
orient = "out"
else:
orient = "in"
for r in reh:
for i in range(*r):
result[i] = [0, 0, 0, 1]
return np.array([result])
def polyphobius(infile, sequence):
"""
Parses the Polyphobius output file.
Args:
infile: Polyphobius output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
tms = []
doms = []
with open(infile, "r") as fh:
for line in fh:
if line.startswith("FT"):
split = line.strip().split()
if split[1] == "TOPO_DOM":
if split[4] == "CYTOPLASMIC.":
doms.append(["cyto", int(split[2]) - 1, int(split[3])])
else:
doms.append(
["noncyto", int(split[2]) - 1, int(split[3])])
elif split[1] == "TRANSMEM":
tms.append([int(split[2]) - 1, int(split[3])])
if doms[0][0] == "cyto":
result = [[1, 0, 0, 0] for _ in range(len(sequence))]
else:
result = [[0, 0, 1, 0] for _ in range(len(sequence))]
for dom in doms:
if dom[0] == "cyto":
for i in range(*dom[1:]):
result[i] = [1, 0, 0, 0]
else:
for i in range(*dom[1:]):
result[i] = [0, 0, 1, 0]
for tm in tms:
for i in range(*tm):
result[i] = [0, 1, 0, 0]
return np.array([result])
def predyflexy(infile, sequence):
"""
Parses the Predyflexy output file.
Args:
infile: Predyflexy output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
result = np.loadtxt(infile, usecols=10, skiprows=1).reshape((1, -1, 1))
result[:, :10, 0] = 0
result[:, -10:, 0] = 0
return result
def profbval_strict(infile, sequence):
"""
Parses the profbval (strict) output file.
Args:
infile: Profbval output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
result = np.zeros((1, len(sequence), 1))
with open(infile, "r") as fh:
it = 0
for line in fh:
if not line.startswith("number"):
pred_str = line.strip().split()[5]
if pred_str == "F":
result[0, it, 0] = 1
it += 1
return result
def profbval_bnorm(infile, sequence):
"""
Parses the profbval (normalized bfactors) output file.
Args:
infile: Profbval output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
result = np.zeros((1, len(sequence), 1))
with open(infile, "r") as fh:
it = 0
for line in fh:
if not line.startswith("number"):
result[0, it, 0] = float(line.strip().split()[3])
it += 1
return result
def spinex_phi(infile, sequence):
"""
Parses the SpineX (phi) output file.
Args:
infile: SpineX output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
return np.loadtxt(infile, usecols=3, skiprows=1).reshape((1, -1, 1))
def spinex_psi(infile, sequence):
"""
Parses the SpineX (psi) output file.
Args:
infile: SpineX output file.
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
return | np.loadtxt(infile, usecols=4, skiprows=1) | numpy.loadtxt |
from precise.skaters.portfoliostatic.diagportfactory import diagonal_portfolio_factory
from precise.skaters.portfoliostatic.diagalloc import diag_alloc
from pprint import pprint
from functools import partial
from precise.skaters.covarianceutil.covfunctions import schur_complement, \
to_symmetric, multiply_by_inverse, inverse_multiply, is_positive_def, nearest_pos_def
from scipy.optimize import root_scalar
from precise.skaters.portfoliostatic.schurportutil import symmetric_step_up_matrix, even_split
from precise.skaters.covarianceutil.covfunctions import try_invert, cov_distance
from seriate import seriate
import numpy as np
from precise.skaters.portfoliostatic.equalport import equal_long_port
def schur_portfolio_factory(seriator=None, alloc=None, port=None, splitter=None, cov=None, pre=None, n_split=5, gamma=1.0, delta=0.0):
"""
A new top-down method
"""
port_kwargs = {'gamma':gamma, 'delta':delta}
return hierarchical_seriation_portfolio_factory(seriator=seriator, alloc=alloc, port=port, splitter=splitter, cov=cov, pre=pre, n_split=n_split, port_kwargs=port_kwargs)
def hierarchical_seriation_portfolio_factory(seriator=None, alloc=None, port=None, splitter=None, cov=None, pre=None, n_split=5, port_kwargs=None):
"""
A class of algorithms that apply seriation ordering then allocate in top-down fashion
:param alloc: Decides how much capital to split between portfolios [covs] -> [ float ]
:param port: Computes a portfolio cov -> [ float ] (Used on the leaves only)
:param splitter: Splits into two groups cov -> (n1,n2)
:param gamma: (0,1) How far to move towards Schur complement
:return:
"""
# Remark. The port and alloc need not be cut from the same cloth
if alloc is None:
alloc = diag_alloc
if port is None:
port = diagonal_portfolio_factory
if splitter is None:
splitter = partial( even_split, n_split=n_split )
if port_kwargs is None:
port_kwargs = {}
port_kwargs.update({'splitter':splitter,'alloc':alloc,'port':port})
return corr_seriation_portfolio_factory(seriator=seriator, port=hierarchical_seriated_portfolio_factory, port_kwargs=port_kwargs, cov=cov, pre=pre)
def corr_seriation_portfolio_factory(port, port_kwargs:dict=None, seriator=None, cov=None, pre=None)->np.ndarray:
"""
A class of methods whose first step is seriation using correlation
:param seriator: Takes a distance matrix and returns an ordering
:param port: Portfolio generator
:param port_kwargs: Arguments to portfolio generator, other than 'cov' and/or 'pre'
:param cov: Original portfolio in arbitrary order
:param pre: Original precision matrix in arbitrary order
:return: w Portfolio weights in original ordering
"""
if cov is None:
cov = try_invert(pre)
if seriator is None:
seriator = seriate
if port_kwargs is None:
port_kwargs = {}
if any(np.diag(cov)<1e-6):
return equal_long_port(cov=cov)
else:
# Establish ordering using seriator and corr distances
try:
cov_dist = cov_distance(cov)
ndx = seriator(cov_dist)
inv_ndx = np.argsort(ndx)
cov_cols = cov[:,ndx]
cov_back = cov_cols[:,inv_ndx]
assert np.allclose(cov,cov_back)
ordered_cov = cov_cols[ndx,:]
except Exception as e:
print('warning: Seriation failed ')
return equal_long_port(cov=cov)
# Allocate capital to ordered assets
ordered_w = port(cov=ordered_cov, **port_kwargs)
# Return to original ordering
try:
w = ordered_w[inv_ndx]
except TypeError:
print('Warning: '+port.__name__+' returns list not array - should really fix this ')
w = np.array(ordered_w)[inv_ndx]
return np.array(w)
def hierarchical_seriated_portfolio_factory(alloc, cov, port, splitter, gamma:float=0.0, delta:float=0):
"""
Assumes assets have been ordered already
"""
n1, n2 = splitter(cov)
if n1==0 or n2==0:
w = port(cov)
if isinstance(w,list):
print('Warning: '+port.__name__+' returns list not array ')
w = np.array(w)
return w
else:
if abs(gamma)<1e-6:
# Hierarchical risk parity (Lopez de Prado)
w = hierarchical_risk_parity(cov=cov, n1=n1, port=port, alloc=alloc, splitter=splitter)
else:
# Schur complementary portfolio construction (yours truly)
w = hierarchical_schur_complementary_portfolio(cov=cov, n1=n1, port=port, alloc=alloc, splitter=splitter, gamma=gamma, delta=delta)
return w
def hierarchical_schur_complementary_portfolio(cov, n1, port, alloc, splitter, delta=0.0, gamma=1.0):
"""
An experimental way to split allocation
"""
A = cov[:n1, :n1]
D = cov[n1:, n1:]
B = cov[:n1, n1:]
C = cov[n1:, :n1] # = B.T
if delta>0:
# Haven't tried this yet :)
rhoB = np.mean(B,axis=None)
rhoCov_raw = cov - rhoB*np.ones_like(cov)
rhoCov = nearest_pos_def(rhoCov_raw)
return hierarchical_schur_complementary_portfolio(cov=rhoCov, n1=n1, port=port, alloc=alloc, splitter=splitter, gamma=gamma)
else:
if gamma>0.0:
# Augment the cov matrices before passing down
max_gamma = _maximal_gamma(A=A, B=B, C=C, D=D)
augA = pseudo_schur_complement(A=A, B=B, C=C, D=D, gamma=gamma * max_gamma)
augD = pseudo_schur_complement(A=D, B=C, C=B, D=A, gamma=gamma * max_gamma)
augmentation_fail = False
if not is_positive_def(augA):
try:
Ag = nearest_pos_def(augA)
except np.linalg.LinAlgError:
augmentation_fail=True
else:
Ag = augA
if not is_positive_def(augD):
try:
Dg = nearest_pos_def(augD)
except np.linalg.LinAlgError:
augmentation_fail=True
else:
Dg = augD
if augmentation_fail:
print('Warning: augmentation failed')
reductionA = 1.0
reductionD = 1.0
reductionRatioA = 1.0
Ag = A
Dg = D
else:
reductionD = np.linalg.norm(Dg)/np.linalg.norm(D)
reductionA = np.linalg.norm(Ag)/np.linalg.norm(A)
reductionRatioA = reductionA/reductionD
else:
reductionRatioA = 1.0
reductionA = 1.0
reductionD = 1.0
Ag = A
Dg = D
wA = hierarchical_seriated_portfolio_factory(alloc=alloc, cov=Ag, port=port, splitter=splitter, gamma=gamma)
wD = hierarchical_seriated_portfolio_factory(alloc=alloc, cov=Dg, port=port, splitter=splitter, gamma=gamma)
aA, aD = alloc(covs=[Ag, Dg])
aA_original, aD_original = alloc( covs=[A,D])
allocationRatioA = (aA/aA_original)
if False:
info = {'reductionA':reductionA,
'reductionD':reductionD,
'reductionRatioA':reductionRatioA,
'allocationRatioA':allocationRatioA}
pprint(info)
w = np.concatenate([aA * np.array(wA), aD * np.array(wD)])
return np.array(w)
def hierarchical_risk_parity(cov, n1, port, alloc, splitter):
"""
Recursive hierarchical risk parity
"""
# https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2708678
A = cov[:n1, :n1]
D = cov[n1:, n1:]
wA = hierarchical_seriated_portfolio_factory(alloc=alloc, cov=A, port=port, splitter=splitter, gamma=0.0)
wD = hierarchical_seriated_portfolio_factory(alloc=alloc, cov=D, port=port, splitter=splitter, gamma=0.0)
aA, aD = alloc(covs=[A, D])
w = np.concatenate([aA * np.array(wA), aD * np.array(wD)])
return w
def pseudo_schur_complement(A, B, C, D, gamma, warn=False):
"""
Augmented cov matrix for "A" inspired by the Schur complement
"""
try:
Ac_raw = schur_complement(A=A, B=B, C=C, D=D, gamma=gamma)
nA = np.shape(A)[0]
nD = np.shape(D)[0]
Ac = to_symmetric(Ac_raw)
M = symmetric_step_up_matrix(n1=nA, n2=nD)
Mt = np.transpose(M)
BDinv = multiply_by_inverse(B, D, throw=False)
BDinvMt = np.dot(BDinv, Mt)
Ra = np.eye(nA) - gamma * BDinvMt
Ag = inverse_multiply(Ra, Ac, throw=False, warn=False)
except np.linalg.LinAlgError:
if warn:
print('Pseudo-schur failed, falling back to A')
Ag = A
return Ag
def _maximal_gamma(A,B,C,D):
def _gamma_objective(gamma, A, B, C, D):
Ag = pseudo_schur_complement(A=A, B=B, C=C, D=D, gamma=gamma)
Dg = pseudo_schur_complement(A=D, B=C, C=B, D=A, gamma=gamma)
pos_def = is_positive_def(Ag) and is_positive_def(Dg)
return -0.01 if pos_def else 1.0
try:
sol = root_scalar(f=_gamma_objective, args=(A,B,C,D), method='bisect', x0=0.25,
x1=0.5, xtol=0.05, bracket=(0,0.95), maxiter=5)
return min(max(sol.root - 0.1, 0), 1.0)
except ValueError:
return 0.0
if __name__=='__main__':
M = symmetric_step_up_matrix(n1=7, n2=6)
print(np.dot(M, | np.ones(6) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 11:59:35 2020
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import data_manager as dm
import scipy.stats
# CASE, RUNS, STEPS[, LEVELS]
def mean_confidence_interval(data, axis, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a,axis=axis), scipy.stats.sem(a,axis=axis)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
#Specify plot to generate
plot_num = 1
# Define constants
n_steps = 100
x_values = | np.arange(n_steps) | numpy.arange |
from .common import Benchmark
import numpy as np
avx_ufuncs = ['sin',
'cos',
'exp',
'log',
'sqrt',
'absolute',
'reciprocal',
'square',
'rint',
'floor',
'ceil' ,
'trunc',
'frexp',
'isnan',
'isfinite',
'isinf',
'signbit']
stride = [1, 2, 4]
dtype = ['f', 'd']
class AVX_UFunc(Benchmark):
params = [avx_ufuncs, stride, dtype]
param_names = ['avx_based_ufunc', 'stride', 'dtype']
timeout = 10
def setup(self, ufuncname, stride, dtype):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
N = 10000
self.arr = np.ones(stride*N, dtype)
def time_ufunc(self, ufuncname, stride, dtype):
self.f(self.arr[::stride])
class AVX_UFunc_log(Benchmark):
params = [stride, dtype]
param_names = ['stride', 'dtype']
timeout = 10
def setup(self, stride, dtype):
np.seterr(all='ignore')
N = 10000
self.arr = np.array(np.random.random_sample(stride*N), dtype=dtype)
def time_log(self, stride, dtype):
np.log(self.arr[::stride])
avx_bfuncs = ['maximum',
'minimum']
class AVX_BFunc(Benchmark):
params = [avx_bfuncs, dtype, stride]
param_names = ['avx_based_bfunc', 'dtype', 'stride']
timeout = 10
def setup(self, ufuncname, dtype, stride):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
N = 10000
self.arr1 = np.array(np.random.rand(stride*N), dtype=dtype)
self.arr2 = np.array(np.random.rand(stride*N), dtype=dtype)
def time_ufunc(self, ufuncname, dtype, stride):
self.f(self.arr1[::stride], self.arr2[::stride])
class AVX_ldexp(Benchmark):
params = [dtype, stride]
param_names = ['dtype', 'stride']
timeout = 10
def setup(self, dtype, stride):
np.seterr(all='ignore')
self.f = getattr(np, 'ldexp')
N = 10000
self.arr1 = np.array(np.random.rand(stride*N), dtype=dtype)
self.arr2 = np.array(np.random.rand(stride*N), dtype='i')
def time_ufunc(self, dtype, stride):
self.f(self.arr1[::stride], self.arr2[::stride])
cmplx_bfuncs = ['add',
'subtract',
'multiply',
'divide']
cmplxstride = [1, 2, 4]
cmplxdtype = ['F', 'D']
class AVX_cmplx_arithmetic(Benchmark):
params = [cmplx_bfuncs, cmplxstride, cmplxdtype]
param_names = ['bfunc', 'stride', 'dtype']
timeout = 10
def setup(self, bfuncname, stride, dtype):
| np.seterr(all='ignore') | numpy.seterr |
import numpy as np
import matplotlib.pyplot as plt
import EMStatics as EM
plt.close("all")
Charge = 1
Size = np.array([49, 49, 49], dtype = int)
approx_n = 0.1
exact = False
x0 = np.array([-1, -1, -1], dtype = float)
delta_x = np.array([2, 2, 2], dtype = float)
# Create J function
def J(dx, N, x0, c, mu0):
J_Array = np.zeros(tuple(N) + (4,))
J_Array[int(N[0] / 2), int(N[1] / 2), int(N[2] / 2), 0] = Charge / np.prod(dx)
J = EM.to_vector(J_Array, N)
def GetJ(t):
return J
return GetJ
# Create simulation class
Sim = EM.sim(Size, delta_x = delta_x, x0 = x0, approx_n = approx_n, J = J)
# Solve the system
print("Solve time = %.2g s" %(Sim.solve(exact = exact, progress = 1)))
def scale(x):
return np.log(x)
# Get points to sample over
Width = np.array([2, 2], dtype = float)
Center = np.array([0, 0, 0], dtype = float)
Points = EM.sample_points_plane(np.array([1, 0, 0], dtype = float), np.array([0, 1, 0], dtype = float), Center, Width, np.array([1000, 1000]))
Values = Sim.sample_values(Sim.get_V(), Points)
# Plot V in xy plane
EM.plot_scalar(Values, extent = [Center[0] - Width[0] / 2, Center[0] + Width[0] / 2, Center[1] - Width[1] / 2, Center[1] + Width[1] / 2], scale = scale)
# Get points to sample over
Start = np.array([0, 0, 0], dtype = float)
End = np.array([1, 0, 0], dtype = float)
Points2 = EM.sample_points_line(Start, End, 1000)
Values2 = Sim.sample_values(Sim.get_V(), Points2)
# Plot V along x-axis
_, ax2, Plot = EM.plot_1D(Values2, extent = [Start[0], End[0]], label = "Sim")
ax2.plot( | np.linspace(0.01, 1, 1000) | numpy.linspace |
""" Projection of a point on a line.
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import numpy as np
def Projection(point, line):
assert len(point)==1
assert len(line)==2
x = np.array(point[0])
x1 = | np.array(line[0]) | numpy.array |
from sklearn import preprocessing
from tqdm import tqdm
import time
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score, recall_score
from sklearn.metrics import precision_score, f1_score
from sklearn.metrics import classification_report
from core.utils import AverageMeter
from itertools import cycle
from core.utils import save_checkpoint
from sklearn.metrics import roc_auc_score, roc_curve, auc
# from .grad_cam_log import record_output_gradcam
import matplotlib.pyplot as plt
target_names_dict = {"Non": 0, "Venous": 1, "Aterial": 2, "Others": 3}
map_id_name = {0: "Non Contrast", 1: "Venous", 2: "Aterial", 3: "Others"}
def valid_model(
cfg,
mode,
epoch,
model,
dataloader,
criterion,
writer=None,
save_prediction=True,
best_metric=None,
visual=False
):
"""Evaluate model performance on Validating dataset
Args:
cfg (CfgNode): Config object containing running configuration
mode (str): Model running mode (valid/test)
model (nn.Module): Model that need to have performance evaluated
dataloader (data.DataLoader): Dataloader object to load data batch-wise
criterion: Loss function
writer (Summarywriter): Logger that log validation loss and plot it on Tensorboard
save_prediction (Boolean): Whether to save prediction output or not (for bootstraping)
best_metric (float, optional): Best performance result of loaded model. Defaults to None.
"""
# Declare variables
gpu = cfg.SYSTEM.GPU
output_log_dir = cfg.DIRS.OUTPUTS
model.eval()
losses = AverageMeter()
tbar = tqdm(dataloader)
targets, preds, filenames, study_IDs, seriesNumbers = (
list(),
list(),
list(),
list(),
list(),
)
data = dict()
total_time = 0
all_probs = []
for i, (filename, study_ID, seriesNumber, image, target) in enumerate(tbar):
with torch.no_grad():
image = image.float()
if gpu:
image, target = image.cuda(), target.cuda()
start = time.time()
output = model(image)
end = time.time()
# Output prediction
sigmoid = nn.Sigmoid()
probs = sigmoid(output)
pred = torch.argmax(probs, 1)
probs = probs.cpu().numpy()
all_probs.append(probs)
# print(probs.shape)
# print(pred.shape)
# print("_--------------_")
total_time += end - start
# Compute loss
loss = criterion(output, target)
# Record loss
losses.update(loss.item() * cfg.SOLVER.GD_STEPS, target.size(0))
tbar.set_description("Valid loss: %.9f" % (losses.avg))
# Convert target, prediction to numpy
target = list(target.detach().cpu().numpy())
pred = list(pred.detach().cpu().numpy())
# print(pred)
filename = list(filename)
targets += target
preds += pred
filenames += filename
study_IDs += study_ID
seriesNumbers += list(np.array(seriesNumber))
# print(f"Inference time =", (total_time / len(tbar)) / 100)
all_targets = []
for idx in range(len(targets)):
cur = [0] * 4
cur[targets[idx]] = 1
all_targets.append([cur])
all_probs = np.concatenate(all_probs, axis=0)
all_target = np.concatenate(all_targets, axis=0)
# print(all_target.shape)
# print(all_probs.shape)
np.save("target.npy", all_target)
np.save("probs.npy", all_probs)
# print(type(targets), len(targets))
# print(all_probs.shape)
if visual == True:
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(4):
fpr[i], tpr[i], _ = roc_curve(all_target[:, i], all_probs[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
colors = cycle(["aqua", "darkorange", "cornflowerblue", "red"])
lw = 2
plt.figure()
for i, color in zip(range(4), colors):
plt.plot(
fpr[i],
tpr[i],
color=color,
lw=lw,
label=f"ROC curve of class {map_id_name[i]} (area = {roc_auc[i]})"
)
plt.plot([0, 1], [0, 1], "k--", lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Some extension of Receiver operating characteristic to multiclass")
plt.legend(loc="lower right")
plt.show()
# Record wrongly predicted sample and save confusion matrix
# record_output(cfg, mode, output_log_dir, study_IDs, seriesNumbers,
# targets, preds, filenames)
# record_output_gradcam(cfg, mode, output_log_dir, targets, preds, filenames, model)
# Calculate Metrics
accuracy = accuracy_score(targets, preds)
recall = recall_score(targets, preds, average="macro")
precision = precision_score(targets, preds, average="macro")
f1 = f1_score(targets, preds, average="macro")
print(
"ACCURACY: %.9f, RECALL: %.9f, PRECISION: %.9f, F1: %.9f"
% (accuracy, recall, precision, f1)
)
if len( | np.unique(preds) | numpy.unique |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, print_function, division
from itertools import product
from .six import integer_types
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from .. import healpy as hp_compat
# NOTE: If healpy is installed, we use it in these tests, but healpy is not a
# formal dependency of astropy-healpix.
hp = pytest.importorskip('healpy')
from hypothesis import given, settings, example
from hypothesis.strategies import integers, floats, booleans
from hypothesis.extra.numpy import arrays
NSIDE_VALUES = [2 ** n for n in range(1, 6)]
@pytest.mark.parametrize(('nside', 'degrees'), product(NSIDE_VALUES, (False, True)))
def test_nside2pixarea(nside, degrees):
actual = hp_compat.nside2pixarea(nside=nside, degrees=degrees)
expected = hp.nside2pixarea(nside=nside, degrees=degrees)
assert_equal(actual, expected)
@pytest.mark.parametrize(('nside', 'arcmin'), product(NSIDE_VALUES, (False, True)))
def test_nside2resol(nside, arcmin):
actual = hp_compat.nside2resol(nside=nside, arcmin=arcmin)
expected = hp.nside2resol(nside=nside, arcmin=arcmin)
assert_equal(actual, expected)
@pytest.mark.parametrize('nside', NSIDE_VALUES)
def test_nside2npix(nside):
actual = hp_compat.nside2npix(nside)
expected = hp.nside2npix(nside)
assert_equal(actual, expected)
@pytest.mark.parametrize('level', [0, 3, 7])
def test_order2nside(level):
actual = hp_compat.order2nside(level)
expected = hp.order2nside(level)
assert_equal(actual, expected)
@pytest.mark.parametrize('npix', [12 * 2 ** (2 * n) for n in range(1, 6)])
def test_npix2nside(npix):
actual = hp_compat.npix2nside(npix)
expected = hp.npix2nside(npix)
assert_equal(actual, expected)
# For the test below, we exclude latitudes that fall exactly on the pole or
# the equator since points that fall at exact boundaries are ambiguous.
@given(nside_pow=integers(0, 29), nest=booleans(), lonlat=booleans(),
lon=floats(0, 360, allow_nan=False, allow_infinity=False).filter(lambda lon: abs(lon) > 1e-10),
lat=floats(-90, 90, allow_nan=False, allow_infinity=False).filter(
lambda lat: abs(lat) < 89.99 and abs(lat) > 1e-10))
@settings(max_examples=2000, derandomize=True)
def test_ang2pix(nside_pow, lon, lat, nest, lonlat):
nside = 2 ** nside_pow
if lonlat:
theta, phi = lon, lat
else:
theta, phi = np.pi / 2. - np.radians(lat), np.radians(lon)
ipix1 = hp_compat.ang2pix(nside, theta, phi, nest=nest, lonlat=lonlat)
ipix2 = hp.ang2pix(nside, theta, phi, nest=nest, lonlat=lonlat)
assert ipix1 == ipix2
def test_ang2pix_shape():
ipix = hp_compat.ang2pix(8, 1., 2.)
assert isinstance(ipix, integer_types)
ipix = hp_compat.ang2pix(8, [[1., 2.], [3., 4.]], [[1., 2.], [3., 4.]])
assert ipix.shape == (2, 2)
def test_pix2ang_shape():
lon, lat = hp_compat.pix2ang(8, 1)
assert isinstance(lon, float)
assert isinstance(lat, float)
lon, lat = hp_compat.pix2ang(8, [[1, 2, 3], [4, 5, 6]])
assert lon.shape == (2, 3)
assert lat.shape == (2, 3)
@given(nside_pow=integers(0, 29), nest=booleans(), lonlat=booleans(),
frac=floats(0, 1, allow_nan=False, allow_infinity=False).filter(lambda x: x < 1))
@settings(max_examples=2000, derandomize=True)
@example(nside_pow=29, frac=0.1666666694606345, nest=False, lonlat=False)
@example(nside_pow=27, frac=2./3., nest=True, lonlat=False)
def test_pix2ang(nside_pow, frac, nest, lonlat):
nside = 2 ** nside_pow
ipix = int(frac * 12 * nside ** 2)
theta1, phi1 = hp_compat.pix2ang(nside, ipix, nest=nest, lonlat=lonlat)
theta2, phi2 = hp.pix2ang(nside, ipix, nest=nest, lonlat=lonlat)
if lonlat:
assert_allclose(phi1, phi2, atol=1e-8)
if abs(phi1) < 90:
assert_allclose(theta1, theta2, atol=1e-10)
else:
assert_allclose(theta1, theta2, atol=1e-8)
if theta1 > 0:
assert_allclose(phi1, phi2, atol=1e-10)
@given(nside_pow=integers(0, 29), nest=booleans(),
x=floats(-1, 1, allow_nan=False, allow_infinity=False).filter(lambda x: abs(x) > 1e-10),
y=floats(-1, 1, allow_nan=False, allow_infinity=False).filter(lambda y: abs(y) > 1e-10),
z=floats(-1, 1, allow_nan=False, allow_infinity=False).filter(lambda z: abs(z) > 1e-10))
@settings(max_examples=2000, derandomize=True)
def test_vec2pix(nside_pow, x, y, z, nest):
nside = 2 ** nside_pow
ipix1 = hp_compat.vec2pix(nside, x, y, z, nest=nest)
ipix2 = hp.vec2pix(nside, x, y, z, nest=nest)
assert ipix1 == ipix2
@given(nside_pow=integers(0, 29), nest=booleans(),
frac=floats(0, 1, allow_nan=False, allow_infinity=False).filter(lambda x: x < 1))
@settings(max_examples=2000, derandomize=True)
@example(nside_pow=29, frac=0.1666666694606345, nest=False)
def test_pix2vec(nside_pow, frac, nest):
nside = 2 ** nside_pow
ipix = int(frac * 12 * nside ** 2)
xyz1 = hp_compat.pix2vec(nside, ipix, nest=nest)
xyz2 = hp.pix2vec(nside, ipix, nest=nest)
assert_allclose(xyz1, xyz2, atol=1e-8)
def test_vec2pix_shape():
ipix = hp_compat.vec2pix(8, 1., 2., 3.)
assert isinstance(ipix, integer_types)
ipix = hp_compat.vec2pix(8, [[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]])
assert ipix.shape == (2, 2)
def test_pix2vec_shape():
x, y, z = hp_compat.pix2vec(8, 1)
assert isinstance(x, float)
assert isinstance(y, float)
assert isinstance(z, float)
x, y, z = hp_compat.pix2vec(8, [[1, 2, 3], [4, 5, 6]])
assert x.shape == (2, 3)
assert y.shape == (2, 3)
assert z.shape == (2, 3)
@given(nside_pow=integers(0, 29),
frac=floats(0, 1, allow_nan=False, allow_infinity=False).filter(lambda x: x < 1))
@settings(max_examples=2000, derandomize=True)
def test_nest2ring(nside_pow, frac):
nside = 2 ** nside_pow
nest = int(frac * 12 * nside ** 2)
ring1 = hp_compat.nest2ring(nside, nest)
ring2 = hp.nest2ring(nside, nest)
assert ring1 == ring2
@given(nside_pow=integers(0, 29),
frac=floats(0, 1, allow_nan=False, allow_infinity=False).filter(lambda x: x < 1))
@settings(max_examples=2000, derandomize=True)
@example(nside_pow=29, frac=0.16666666697710755)
def test_ring2nest(nside_pow, frac):
nside = 2 ** nside_pow
ring = int(frac * 12 * nside ** 2)
nest1 = hp_compat.ring2nest(nside, ring)
nest2 = hp.ring2nest(nside, ring)
assert nest1 == nest2
@given(nside_pow=integers(0, 29), step=integers(1, 10), nest=booleans(),
frac=floats(0, 1, allow_nan=False, allow_infinity=False).filter(lambda x: x < 1))
@settings(max_examples=500, derandomize=True)
def test_boundaries(nside_pow, frac, step, nest):
nside = 2 ** nside_pow
pix = int(frac * 12 * nside ** 2)
b1 = hp_compat.boundaries(nside, pix, step=step, nest=nest)
b2 = hp.boundaries(nside, pix, step=step, nest=nest)
assert_allclose(b1, b2, atol=1e-8)
def test_boundaries_shape():
pix = 1
b1 = hp_compat.boundaries(8, pix, step=4)
b2 = hp.boundaries(8, pix, step=4)
assert b1.shape == b2.shape
pix = [1, 2, 3, 4, 5]
b1 = hp_compat.boundaries(8, pix, step=4)
b2 = hp.boundaries(8, pix, step=4)
assert b1.shape == b2.shape
def not_at_origin(vec):
return np.linalg.norm(vec) > 0
@given(vectors=arrays(float, (3,), elements=floats(-1, 1)).filter(not_at_origin),
lonlat=booleans(), ndim=integers(0, 4))
@settings(max_examples=500, derandomize=True)
def test_vec2ang(vectors, lonlat, ndim):
vectors = np.broadcast_to(vectors, (2,) * ndim + (3,))
theta1, phi1 = hp_compat.vec2ang(vectors, lonlat=lonlat)
theta2, phi2 = hp.vec2ang(vectors, lonlat=lonlat)
# Healpy sometimes returns NaNs for phi (somewhat incorrectly)
phi2 = np.nan_to_num(phi2)
assert_allclose(theta1, theta1, atol=1e-10)
assert_allclose(phi1, phi2, atol=1e-10)
@given(lonlat=booleans(),
lon=floats(0, 360, allow_nan=False, allow_infinity=False).filter(lambda lon: abs(lon) > 1e-10),
lat=floats(-90, 90, allow_nan=False, allow_infinity=False).filter(
lambda lat: abs(lat) < 89.99 and abs(lat) > 1e-10))
@settings(max_examples=2000, derandomize=True)
def test_ang2vec(lon, lat, lonlat):
if lonlat:
theta, phi = lon, lat
else:
theta, phi = np.pi / 2. - np.radians(lat), | np.radians(lon) | numpy.radians |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import division, print_function
import ast
from copy import copy
from collections import OrderedDict
from math import cos, sin
import numpy as np
from six import string_types
import warnings
from ase.geometry import cellpar_to_cell, complete_cell, get_distances
from matplotlib.colors import rgb2hex
from scipy.interpolate import interp1d
from pyiron.atomistics.structure.atom import Atom
from pyiron.atomistics.structure.sparse_list import SparseArray, SparseList
from pyiron.atomistics.structure.periodic_table import PeriodicTable, ChemicalElement, ElementColorDictionary
from pyiron.base.settings.generic import Settings
from scipy.spatial import cKDTree, Voronoi
try:
import spglib
except ImportError:
try:
import pyspglib as spglib
except ImportError:
raise ImportError("The spglib package needs to be installed")
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - " \
"Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
class Atoms(object):
"""
The Atoms class represents all the information required to describe a structure at the atomic scale. This class is
written in such a way that is compatible with the `ASE atoms class`_. Some of the functions in this module is based
on the corresponding implementation in the ASE package
Args:
elements (list/numpy.ndarray): List of strings containing the elements or a list of
atomistics.structure.periodic_table.ChemicalElement instances
numbers (list/numpy.ndarray): List of atomic numbers of elements
symbols (list/numpy.ndarray): List of chemical symbols
positions (list/numpy.ndarray): List of positions
scaled_positions (list/numpy.ndarray): List of scaled positions (relative coordinates)
pbc (list/numpy.ndarray/boolean): Tells if periodic boundary conditions should be applied on the three axes
cell (list/numpy.ndarray instance): A 3x3 array representing the lattice vectors of the structure
Note: Only one of elements/symbols or numbers should be assigned during initialization
Attributes:
indices (numpy.ndarray): A list of size N which gives the species index of the structure which has N atoms
.. _ASE atoms class: https://wiki.fysik.dtu.dk/ase/ase/atoms.html
"""
def __init__(self, symbols=None, positions=None, numbers=None, tags=None, momenta=None, masses=None,
magmoms=None, charges=None, scaled_positions=None, cell=None, pbc=None, celldisp=None, constraint=None,
calculator=None, info=None, indices=None, elements=None, dimension=None, species=None,
**qwargs):
if symbols is not None:
if elements is None:
elements = symbols
else:
raise ValueError("Only elements OR symbols should be given.")
if tags is not None or momenta is not None or masses is not None or charges is not None \
or celldisp is not None or constraint is not None or calculator is not None or info is not None:
s.logger.debug('Not supported parameter used!')
self._store_elements = dict()
self._species_to_index_dict = None
self.colorLut = ElementColorDictionary().to_lut()
self._is_scaled = False
if cell is not None:
# make it ASE compatible
if np.linalg.matrix_rank(cell) == 1:
cell = np.eye(len(cell)) * cell
else:
cell = np.array(cell)
self._cell = cell
self._species = list()
self.positions= None
self._pse = PeriodicTable()
self._tag_list = SparseArray()
self.indices = np.array([])
self._info = dict()
self.arrays = dict()
self.adsorbate_info = {}
self.bonds = None
self._pbc = False
self.dimension = 3 # Default
self.units = {"length": "A", "mass": "u"}
el_index_lst = list()
element_list = None
if (elements is None) and (numbers is None) and (indices is None):
return
if numbers is not None: # for ASE compatibility
if not (elements is None):
raise AssertionError()
elements = self.numbers_to_elements(numbers)
if elements is not None:
el_object_list = None
if isinstance(elements, str):
element_list = self.convert_formula(elements)
elif isinstance(elements, (list, tuple, np.ndarray)):
if not all([isinstance(el, elements[0].__class__) for el in elements]):
object_list = list()
for el in elements:
if isinstance(el, (str, np.str, np.str_)):
object_list.append(self.convert_element(el))
if isinstance(el, ChemicalElement):
object_list.append(el)
if isinstance(el, Atom):
object_list.append(el.element)
if isinstance(el, (int, np.integer)):
# pse = PeriodicTable()
object_list.append(self._pse.element(el))
el_object_list = object_list
if len(elements) == 0:
element_list = elements
else:
if isinstance(elements[0], (list, tuple, np.ndarray)):
elements = np.array(elements).flatten()
if isinstance(elements[0], string_types):
element_list = elements
elif isinstance(elements[0], ChemicalElement):
el_object_list = elements
elif isinstance(elements[0], Atom):
el_object_list = [el.element for el in elements]
positions = [el.position for el in elements]
elif elements.dtype in [int, np.integer]:
el_object_list = self.numbers_to_elements(elements)
else:
raise ValueError('Unknown static type for element in list: ' + str(type(elements[0])))
if el_object_list is None:
el_object_list = [self.convert_element(el) for el in element_list]
self.set_species(list(set(el_object_list)))
# species_to_index_dict = {el: i for i, el in enumerate(self.species)}
el_index_lst = [self._species_to_index_dict[el] for el in el_object_list]
elif indices is not None:
el_index_lst = indices
self.set_species(species)
if scaled_positions is not None:
if positions is not None:
raise ValueError("either position or scaled_positions can be given")
if cell is None:
raise ValueError('scaled_positions can only be used with a given cell')
positions = np.dot(np.array(cell).T, np.array(scaled_positions).T).T
if positions is None:
self.dimension = 3
if cell is not None:
positions = np.zeros((len(el_index_lst), self.dimension))
self.indices = np.array(el_index_lst)
self.positions = np.array(positions).astype(np.float)
self._tag_list._length = len(positions)
for key, val in qwargs.items():
print('set qwargs (ASE): ', key, val)
setattr(self, key, val)
if len(positions) > 0:
self.dimension = len(positions[0])
else:
self.dimension = 3
if dimension is not None:
self.dimension = dimension
if cell is not None:
if pbc is None:
self.pbc = True # default setting
else:
self.pbc = pbc
self.set_initial_magnetic_moments(magmoms)
@property
def cell(self):
"""
numpy.ndarray: A size 3x3 array which gives the lattice vectors of the cell as [a1, a2, a3]
"""
return self._cell
@cell.setter
def cell(self, value):
if value is None:
self._cell = None
else:
if self._is_scaled:
self.set_cell(value, scale_atoms=True)
else:
self.set_cell(value)
@property
def species(self):
"""
list: A list of atomistics.structure.periodic_table.ChemicalElement instances
"""
return self._species
# @species.setter
def set_species(self, value):
"""
Setting the species list
Args:
value (list): A list atomistics.structure.periodic_table.ChemicalElement instances
"""
if value is None:
return
value = list(value)
self._species_to_index_dict = {el: i for i, el in enumerate(value)}
self._species = value[:]
self._store_elements = {el.Abbreviation: el for el in value}
@property
def info(self):
"""
dict: This dictionary is merely used to be compatible with the ASE Atoms class.
"""
return self._info
@info.setter
def info(self, val):
self._info = val
@property
def pbc(self):
"""
list: A list of boolean values which gives the periodic boundary consitions along the three axes.
The default value is [True, True, True]
"""
if not isinstance(self._pbc, np.ndarray):
self.set_pbc(self._pbc)
return self._pbc
@pbc.setter
def pbc(self, val):
self._pbc = val
@property
def elements(self):
"""
numpy.ndarray: A size N list of atomistics.structure.periodic_table.ChemicalElement instances according
to the ordering of the atoms in the instance
"""
return np.array([self.species[el] for el in self.indices])
def new_array(self, name, a, dtype=None, shape=None):
"""
Adding a new array to the instance. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the array
a (list/numpy.ndarray): The array to be added
dtype (type): Data type of the array
shape (list/turple): Shape of the array
"""
if dtype is not None:
a = np.array(a, dtype, order='C')
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
if not a.flags['C_CONTIGUOUS']:
a = np.ascontiguousarray(a)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""
Get an array. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the required array
copy (bool): True if a copy of the array is to be returned
Returns:
An array of a copy of the array
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""
Update array. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the array
a (list/numpy.ndarray): The array to be added
dtype (type): Data type of the array
shape (list/turple): Shape of the array
"""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def add_tag(self, *args, **qwargs):
"""
Add tags to the atoms object.
Examples:
For selective dynamics::
>>> self.add_tag(selective_dynamics=[False, False, False])
"""
self._tag_list.add_tag(*args, **qwargs)
# @staticmethod
def numbers_to_elements(self, numbers):
"""
Convert atomic numbers in element objects (needed for compatibility with ASE)
Args:
numbers (list): List of Element Numbers (as Integers; default in ASE)
Returns:
list: A list of elements as needed for pyiron
"""
# pse = PeriodicTable() # TODO; extend to internal PSE which can contain additional elements and tags
atom_number_to_element = {}
for i_el in set(numbers):
i_el = int(i_el)
atom_number_to_element[i_el] = self._pse.element(i_el)
return [atom_number_to_element[i_el] for i_el in numbers]
def copy(self):
"""
Returns a copy of the instance
Returns:
pyiron.atomistics.structure.atoms.Atoms: A copy of the instance
"""
return self.__copy__()
def to_hdf(self, hdf, group_name="structure"):
"""
Save the object in a HDF5 file
Args:
hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str):
Group name with which the object should be stored. This same name should be used to retrieve the object
"""
# import time
with hdf.open(group_name) as hdf_structure:
# time_start = time.time()
hdf_structure["TYPE"] = str(type(self))
for el in self.species:
if isinstance(el.tags, dict):
with hdf_structure.open("new_species") as hdf_species:
el.to_hdf(hdf_species)
hdf_structure['species'] = [el.Abbreviation for el in self.species]
hdf_structure["indices"] = self.indices
with hdf_structure.open("tags") as hdf_tags:
for tag in self._tag_list.keys():
tag_value = self._tag_list[tag]
if isinstance(tag_value, SparseList):
tag_value.to_hdf(hdf_tags, tag)
hdf_structure["units"] = self.units
hdf_structure["dimension"] = self.dimension
if self.cell is not None:
with hdf_structure.open("cell") as hdf_cell:
hdf_cell["cell"] = self.cell
hdf_cell["pbc"] = self.pbc
# hdf_structure["coordinates"] = self.positions # "Atomic coordinates"
hdf_structure["positions"] = self.positions # "Atomic coordinates"
# potentials with explicit bonds (TIP3P, harmonic, etc.)
if self.bonds is not None:
hdf_structure["explicit_bonds"] = self.bonds
# print ('time in atoms.to_hdf: ', time.time() - time_start)
def from_hdf(self, hdf, group_name="structure"):
"""
Retrieve the object from a HDF5 file
Args:
hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str): Group name from which the Atoms object is retreived.
Returns:
pyiron_atomistic.structure.atoms.Atoms: The retrieved atoms class
"""
if "indices" in hdf[group_name].list_nodes():
with hdf.open(group_name) as hdf_atoms:
if "new_species" in hdf_atoms.list_groups():
with hdf_atoms.open("new_species") as hdf_species:
self._pse.from_hdf(hdf_species)
el_object_list = [self.convert_element(el, self._pse) for el in hdf_atoms["species"]]
self.indices = hdf_atoms["indices"]
self._tag_list._length = len(self)
self.set_species(el_object_list)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
self.units = hdf_atoms["units"]
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
# Backward compatibility
position_tag = "positions"
if position_tag not in hdf_atoms.list_nodes():
position_tag = "coordinates"
if "is_absolute" in hdf_atoms.list_nodes():
if not tr_dict[hdf_atoms["is_absolute"]]:
self.set_scaled_positions(hdf_atoms[position_tag])
else:
self.positions = hdf_atoms[position_tag]
else:
self.positions = hdf_atoms[position_tag]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
return self
else:
return self._from_hdf_old(hdf, group_name)
def _from_hdf_old(self, hdf, group_name="structure"):
"""
This function exits merely for the purpose of backward compatibility
"""
with hdf.open(group_name) as hdf_atoms:
self._pse = PeriodicTable()
if "species" in hdf_atoms.list_groups():
with hdf_atoms.open("species") as hdf_species:
self._pse.from_hdf(hdf_species)
chemical_symbols = np.array(hdf_atoms["elements"], dtype=str)
el_object_list = [self.convert_element(el, self._pse) for el in chemical_symbols]
self.set_species(list(set(el_object_list)))
self.indices = [self._species_to_index_dict[el] for el in el_object_list]
self._tag_list._length = len(self)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
if "is_absolute" in hdf_atoms and not tr_dict[hdf_atoms["is_absolute"]]:
self.positions = hdf_atoms["coordinates"]
else:
self.set_scaled_positions(hdf_atoms["coordinates"])
self.units = hdf_atoms["units"]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
return self
def center(self, vacuum=None, axis=(0, 1, 2)):
"""
Center atoms in unit cell.
Adopted from ASE code (https://wiki.fysik.dtu.dk/ase/_modules/ase/atoms.html#Atoms.center)
Args:
vacuum (float): If specified adjust the amount of vacuum when centering. If vacuum=10.0 there will thus be
10 Angstrom of vacuum on each side.
axis (tuple/list): List or turple of integers specifying the axis along which the atoms should be centered
"""
# Find the orientations of the faces of the unit cell
c = self.cell
if c is None:
c = np.identity(self.dimension)
self.cell = c
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.linalg.norm(dirs[i]) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if isinstance(axis, int):
axes = (axis,)
else:
axes = axis
p = self.positions
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = np.dot(c[i], dirs[i]) / np.linalg.norm(c[i])
longer[i] = lng / cosphi
shift[i] = shf / cosphi
# Now, do it!
translation = np.zeros(3)
for i in axes:
nowlen = np.sqrt(np.dot(c[i], c[i]))
self.cell[i] *= 1 + longer[i] / nowlen
translation += shift[i] * c[i] / nowlen
self.positions += translation
if self.pbc is None:
self.pbc = self.dimension * [True]
def set_positions(self, positions):
"""
Set positions. This function is for compatability with ASE
Args:
positions (numpy.ndarray/list): Positions in absolute coordinates
"""
self.positions = np.array(positions)
self._tag_list._length = len(self)
def get_positions(self):
"""
Get positions. This function is for compatability with ASE
Returns:
numpy.ndarray: Positions in absolute coordinates
"""
return self.positions
def select_index(self, el):
"""
Returns the indices of a given element in the structure
Args:
el (str/atomistics.structures.periodic_table.ChemicalElement/list): Element for which the indices should
be returned
Returns:
numpy.ndarray: An array of indices of the atoms of the given element
"""
if isinstance(el, str):
return np.where(self.get_chemical_symbols()==el)[0]
elif isinstance(el, ChemicalElement):
return np.where([e==el for e in self.get_chemical_elements()])[0]
if isinstance(el, (list, np.ndarray)):
if isinstance(el[0], str):
return np.where(np.isin(self.get_chemical_symbols(), el))[0]
elif isinstance(el[0], ChemicalElement):
return np.where([e in el for e in self.get_chemical_elements()])[0]
def select_parent_index(self, el):
"""
Returns the indices of a given element in the structure ignoring user defined elements
Args:
el (str/atomistics.structures.periodic_table.ChemicalElement): Element for which the indices should
be returned
Returns:
numpy.ndarray: An array of indices of the atoms of the given element
"""
parent_basis = self.get_parent_basis()
return parent_basis.select_index(el)
def get_tags(self):
"""
Returns the keys of the stored tags of the structure
Returns:
dict_keys: Keys of the stored tags
"""
return self._tag_list.keys()
def get_pbc(self):
"""
Returns a boolean array of the periodic boundary conditions along the x, y and z axis respectively
Returns:
numpy.ndarray: Boolean array of length 3
"""
if not isinstance(self._pbc, np.ndarray):
self.set_pbc(self._pbc)
return np.array(self._pbc, bool)
def set_pbc(self, value):
"""
Sets the perioic boundary conditions on all three axis
Args:
value (numpy.ndarray/list): An array of bool type with length 3
"""
if value is None:
self._pbc = None
else:
if isinstance(value, np.ndarray):
self._pbc = value
elif value in (True, False):
value = self.dimension * [value]
if not (np.shape(np.array(value)) == (self.dimension,)):
raise AssertionError()
self._pbc = np.array(value, bool)
def convert_element(self, el, pse=None):
"""
Convert a string or an atom instance into a ChemicalElement instance
Args:
el (str/atomistics.structure.atom.Atom): String or atom instance from which the element should
be generated
pse (atomistics.structure.periodictable.PeriodicTable): PeriodicTable instance from which the element
is generated (optional)
Returns:
atomistics.structure.periodictable.ChemicalElement: The required chemical element
"""
if el in list(self._store_elements.keys()):
return self._store_elements[el]
if isinstance(el, string_types): # as symbol
element = Atom(el, pse=pse).element
elif isinstance(el, Atom):
element = el.element
el = el.element.Abbreviation
elif isinstance(el, ChemicalElement):
element = el
el = el.Abbreviation
else:
raise ValueError('Unknown static type to specify a element')
self._store_elements[el] = element
if hasattr(self, 'species'):
if element not in self.species:
self._species.append(element)
self.set_species(self._species)
return element
def get_chemical_formula(self):
"""
Returns the chemical formula of structure
Returns:
str: The chemical formula as a string
"""
species = self.get_number_species_atoms()
formula = ""
for string_sym, num in species.items():
if num == 1:
formula += str(string_sym)
else:
formula += str(string_sym) + str(num)
return formula
def get_chemical_indices(self):
"""
Returns the list of chemical indices as ordered in self.species
Returns:
numpy.ndarray: A list of chemical indices
"""
return self.indices
def get_atomic_numbers(self):
"""
Returns the atomic numbers of all the atoms in the structure
Returns:
numpy.ndarray: A list of atomic numbers
"""
el_lst = [el.AtomicNumber for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_chemical_symbols(self):
"""
Returns the chemical symbols for all the atoms in the structure
Returns:
numpy.ndarray: A list of chemical symbols
"""
el_lst = [el.Abbreviation for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_parent_symbols(self):
"""
Returns the chemical symbols for all the atoms in the structure even for user defined elements
Returns:
numpy.ndarray: A list of chemical symbols
"""
sp_parent_list = list()
for sp in self.species:
if isinstance(sp.Parent, (float, np.float, type(None))):
sp_parent_list.append(sp.Abbreviation)
else:
sp_parent_list.append(sp.Parent)
return np.array([sp_parent_list[i] for i in self.indices])
def get_parent_basis(self):
"""
Returns the basis with all user defined/special elements as the it's parent
Returns:
pyiron.atomistics.structure.atoms.Atoms: Structure without any user defined elements
"""
parent_basis = copy(self)
new_species = np.array(parent_basis.species)
for i, sp in enumerate(new_species):
if not isinstance(sp.Parent, (float, np.float, type(None))):
pse = PeriodicTable()
new_species[i] = pse.element(sp.Parent)
sym_list = [el.Abbreviation for el in new_species]
if len(sym_list) != len(np.unique(sym_list)):
uni, ind, inv_ind = np.unique(sym_list, return_index=True, return_inverse=True)
new_species = new_species[ind].copy()
parent_basis.set_species(list(new_species))
indices_copy = parent_basis.indices.copy()
for i, ind_ind in enumerate(inv_ind):
indices_copy[parent_basis.indices == i] = ind_ind
parent_basis.indices = indices_copy
return parent_basis
parent_basis.set_species(list(new_species))
return parent_basis
def get_chemical_elements(self):
"""
Returns the list of chemical element instances
Returns:
numpy.ndarray: A list of chemical element instances
"""
return self.elements
def get_number_species_atoms(self):
"""
Returns a dictionary with the species in the structure and the corresponding count in the structure
Returns:
collections.OrderedDict: An ordered dictionary with the species and the corresponding count
"""
count = OrderedDict()
# print "sorted: ", sorted(set(self.elements))
for el in sorted(set(self.get_chemical_symbols())):
count[el] = 0
for el in self.get_chemical_symbols():
count[el] += 1
return count
def get_species_symbols(self):
"""
Returns the symbols of the present species
Returns:
numpy.ndarray: List of the symbols of the species
"""
return np.array(sorted([el.Abbreviation for el in self.species]))
def get_species_objects(self):
"""
Returns:
"""
el_set = self.species
el_sym_lst = {el.Abbreviation: i for i, el in enumerate(el_set)}
el_sorted = self.get_species_symbols()
return [el_set[el_sym_lst[el]] for el in el_sorted]
def get_number_of_species(self):
"""
Returns:
"""
return len(self.species)
def get_number_of_degrees_of_freedom(self):
"""
Returns:
"""
return len(self) * self.dimension
def get_center_of_mass(self):
"""
Returns:
com (float): center of mass in A
"""
masses = self.get_masses()
return np.einsum('i,ij->j', masses, self.positions)/np.sum(masses)
def get_masses(self):
"""
Returns:
"""
el_lst = [el.AtomicMass for el in self.species]
return [el_lst[el] for el in self.indices]
def get_masses_dof(self):
"""
Returns:
"""
dim = self.dimension
return np.repeat(self.get_masses(), dim)
def get_volume(self, per_atom=False):
"""
Args:
per_atom (bool): True if volume per atom is to be returned
Returns:
volume (float): Volume in A**3
"""
if per_atom:
return np.abs(np.linalg.det(self.cell))/len(self)
else:
return np.abs(np.linalg.det(self.cell))
def get_density(self):
"""
Returns the density in g/cm^3
Returns:
float: Density of the structure
"""
# conv_factor = Ang3_to_cm3/scipi.constants.Avogadro
# with Ang3_to_cm3 = 1e24
conv_factor = 1.660539040427164
return conv_factor * np.sum(self.get_masses()) / self.get_volume()
def get_scaled_positions(self, wrap=True):
"""
Returns:
"""
pbc = np.array(self.pbc)
positions = np.einsum('jk,ij->ik', np.linalg.inv(self.cell), self.positions)
if wrap:
positions[:, pbc] = np.mod(positions[:, pbc], 1.)
return positions
def get_number_of_atoms(self):
"""
Returns:
"""
# assert(len(self) == np.sum(self.get_number_species_atoms().values()))
return len(self)
def set_absolute(self):
if self._is_scaled:
self._is_scaled = False
def set_relative(self):
if not self._is_scaled:
self._is_scaled = True
def center_coordinates_in_unit_cell(self, origin=0, eps=1e-4):
"""
compact atomic coordinates in supercell as given by a1, a2., a3
Args:
origin: 0 to confine between 0 and 1, -0.5 to confine between -0.5 and 0.5
eps:
Returns:
"""
self.set_scaled_positions(np.mod(self.get_scaled_positions(wrap=False) + eps, 1) - eps + origin)
return self
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
def set_repeat(self, vec):
self *= vec
def reset_absolute(self, is_absolute):
raise NotImplementedError('This function was removed!')
def analyse_ovito_cna_adaptive(self, mode='total'):
from pyiron.atomistics.structure.ovito import analyse_ovito_cna_adaptive
warnings.filterwarnings("ignore")
return analyse_ovito_cna_adaptive(atoms=self, mode=mode)
def analyse_ovito_centro_symmetry(atoms, num_neighbors=12):
from pyiron.atomistics.structure.ovito import analyse_ovito_centro_symmetry
warnings.filterwarnings("ignore")
return analyse_ovito_centro_symmetry(atoms, num_neighbors=num_neighbors)
def analyse_ovito_voronoi_volume(atoms):
from pyiron.atomistics.structure.ovito import analyse_ovito_voronoi_volume
warnings.filterwarnings("module")
return analyse_ovito_voronoi_volume(atoms)
def analyse_phonopy_equivalent_atoms(atoms):
from pyiron.atomistics.structure.phonopy import analyse_phonopy_equivalent_atoms
#warnings.filterwarnings("ignore")
warnings.warn("analyse_phonopy_equivalent_atoms() is obsolete use get_symmetry()['equivalent_atoms'] instead")
return analyse_phonopy_equivalent_atoms(atoms)
@staticmethod
def _ngl_write_cell(a1, a2, a3, f1=90, f2=90, f3=90):
"""
Writes a PDB-formatted line to represent the simulation cell.
Args:
a1, a2, a3 (float): Lengths of the cell vectors.
f1, f2, f3 (float): Angles between the cell vectors (which angles exactly?) (in degrees).
Returns:
(str): The line defining the cell in PDB format.
"""
return 'CRYST1 {:8.3f} {:8.3f} {:8.3f} {:6.2f} {:6.2f} {:6.2f} P 1\n'.format(a1, a2, a3, f1, f2, f3)
@staticmethod
def _ngl_write_atom(num, species, x, y, z, group=None, num2=None, occupancy=1., temperature_factor=0.):
"""
Writes a PDB-formatted line to represent an atom.
Args:
num (int): Atomic index.
species (str): Elemental species.
x, y, z (float): Cartesian coordinates of the atom.
group (str): A...group name? (Default is None, repeat elemental species.)
num2 (int): An "alternate" index. (Don't ask me...) (Default is None, repeat first number.)
occupancy (float): PDB occupancy parameter. (Default is 1.)
temperature_factor (float): PDB temperature factor parameter. (Default is 0.
Returns:
(str): The line defining an atom in PDB format
Warnings:
* The [PDB docs](https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html) indicate that
the xyz coordinates might need to be in some sort of orthogonal basis. If you have weird behaviour,
this might be a good place to investigate.
"""
if group is None:
group = species
if num2 is None:
num2 = num
return 'ATOM {:>6} {:>4} {:>4} {:>5} {:10.3f} {:7.3f} {:7.3f} {:5.2f} {:5.2f} {:>11} \n'.format(
num, species, group, num2, x, y, z, occupancy, temperature_factor, species)
def _ngl_write_structure(self, elements, positions, cell):
"""
Turns structure information into a NGLView-readable protein-database-formatted string.
Args:
elements (numpy.ndarray/list): Element symbol for each atom.
positions (numpy.ndarray/list): Vector of Cartesian atom positions.
cell (numpy.ndarray/list): Simulation cell Bravais matrix.
Returns:
(str): The PDB-formatted representation of the structure.
"""
from ase.geometry import cell_to_cellpar, cellpar_to_cell
cellpar = cell_to_cellpar(cell)
exportedcell = cellpar_to_cell(cellpar)
rotation = np.linalg.solve(cell, exportedcell)
pdb_str = self._ngl_write_cell(*cellpar)
pdb_str += 'MODEL 1\n'
if rotation is not None:
positions = np.array(positions).dot(rotation)
for i, p in enumerate(positions):
pdb_str += self._ngl_write_atom(i, elements[i], *p)
pdb_str += 'ENDMDL \n'
return pdb_str
def _atomic_number_to_radius(self, atomic_number, shift=0.2, slope=0.1, scale=1.0):
"""
Give the atomic radius for plotting, which scales like the root of the atomic number.
Args:
atomic_number (int/float): The atomic number.
shift (float): A constant addition to the radius. (Default is 0.2.)
slope (float): A multiplier for the root of the atomic number. (Default is 0.1)
scale (float): How much to rescale the whole thing by.
Returns:
(float): The radius. (Not physical, just for visualization!)
"""
return (shift + slope * np.sqrt(atomic_number)) * scale
def _add_colorscheme_spacefill(self, view, elements, atomic_numbers, particle_size, scheme='element'):
"""
Set NGLView spacefill parameters according to a color-scheme.
Args:
view (NGLWidget): The widget to work on.
elements (numpy.ndarray/list): Elemental symbols.
atomic_numbers (numpy.ndarray/list): Integer atomic numbers for determining atomic size.
particle_size (float): A scale factor for the atomic size.
scheme (str): The scheme to use. (Default is "element".)
Possible NGLView color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
(nglview.NGLWidget): The modified widget.
"""
for elem, num in set(list(zip(elements, atomic_numbers))):
view.add_spacefill(selection='#' + elem,
radius_type='vdw',
radius=self._atomic_number_to_radius(num, scale=particle_size),
color_scheme=scheme)
return view
def _add_custom_color_spacefill(self, view, atomic_numbers, particle_size, colors):
"""
Set NGLView spacefill parameters according to per-atom colors.
Args:
view (NGLWidget): The widget to work on.
atomic_numbers (numpy.ndarray/list): Integer atomic numbers for determining atomic size.
particle_size (float): A scale factor for the atomic size.
colors (numpy.ndarray/list): A per-atom list of HTML or hex color codes.
Returns:
(nglview.NGLWidget): The modified widget.
"""
for n, num in enumerate(atomic_numbers):
view.add_spacefill(selection=[n],
radius_type='vdw',
radius=self._atomic_number_to_radius(num, scale=particle_size),
color=colors[n])
return view
@staticmethod
def _scalars_to_hex_colors(scalar_field, start=None, end=None, cmap=None):
"""
Convert scalar values to hex codes using a colormap.
Args:
scalar_field (numpy.ndarray/list): Scalars to convert.
start (float): Scalar value to map to the bottom of the colormap (values below are clipped). (Default is
None, use the minimal scalar value.)
end (float): Scalar value to map to the top of the colormap (values above are clipped). (Default is
None, use the maximal scalar value.)
cmap (matplotlib.cm): The colormap to use. (Default is None, which gives a blue-red divergent map.)
Returns:
(list): The corresponding hex codes for each scalar value passed in.
"""
if start is None:
start = np.amin(scalar_field)
if end is None:
end = np.amax(scalar_field)
interp = interp1d([start, end], [0, 1])
remapped_field = interp(np.clip(scalar_field, start, end)) # Map field onto [0,1]
if cmap is None:
try:
from seaborn import diverging_palette
except ImportError:
print("The package seaborn needs to be installed for the plot3d() function!")
cmap = diverging_palette(245, 15, as_cmap=True) # A nice blue-red palette
return [rgb2hex(cmap(scalar)[:3]) for scalar in remapped_field] # The slice gets RGB but leaves alpha
def plot3d(self, show_cell=True, show_axes=True, camera='orthographic', spacefill=True, particle_size=1.0,
select_atoms=None, background='white', color_scheme=None, colors=None,
scalar_field=None, scalar_start=None, scalar_end=None, scalar_cmap=None,
vector_field=None, vector_color=None, custom_array=None, custom_3darray=None):
"""
Plot3d relies on NGLView to visualize atomic structures. Here, we construct a string in the "protein database"
("pdb") format, then turn it into an NGLView "structure". PDB is a white-space sensitive format, so the
string snippets are carefully formatted.
The final widget is returned. If it is assigned to a variable, the visualization is suppressed until that
variable is evaluated, and in the meantime more NGL operations can be applied to it to modify the visualization.
Args:
show_cell (bool): Whether or not to show the frame. (Default is True.)
show_axes (bool): Whether or not to show xyz axes. (Default is True.)
camera (str): 'perspective' or 'orthographic'. (Default is 'perspective'.)
spacefill (bool): Whether to use a space-filling or ball-and-stick representation. (Default is True, use
space-filling atoms.)
particle_size (float): Size of the particles. (Default is 1.)
select_atoms (numpy.ndarray): Indices of atoms to show, either as integers or a boolean array mask.
(Default is None, show all atoms.)
background (str): Background color. (Default is 'white'.)
color_scheme (str): NGLView color scheme to use. (Default is None, color by element.)
colors (numpy.ndarray): A per-atom array of HTML color names or hex color codes to use for atomic colors.
(Default is None, use coloring scheme.)
scalar_field (numpy.ndarray): Color each atom according to the array value (Default is None, use coloring
scheme.)
scalar_start (float): The scalar value to be mapped onto the low end of the color map (lower values are
clipped). (Default is None, use the minimum value in `scalar_field`.)
scalar_end (float): The scalar value to be mapped onto the high end of the color map (higher values are
clipped). (Default is None, use the maximum value in `scalar_field`.)
scalar_cmap (matplotlib.cm): The colormap to use. (Default is None, giving a blue-red divergent map.)
vector_field (numpy.ndarray): Add vectors (3 values) originating at each atom. (Default is None, no
vectors.)
vector_color (numpy.ndarray): Colors for the vectors (only available with vector_field). (Default is None,
vectors are colored by their direction.)
Possible NGLView color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
(nglview.NGLWidget): The NGLView widget itself, which can be operated on further or viewed as-is.
Warnings:
* Many features only work with space-filling atoms (e.g. coloring by a scalar field).
* The colour interpretation of some hex codes is weird, e.g. 'green'.
"""
try: # If the graphical packages are not available, the GUI will not work.
import nglview
except ImportError:
raise ImportError("The package nglview needs to be installed for the plot3d() function!")
if custom_array is not None:
warnings.warn('custom_array is deprecated. Use scalar_field instead', DeprecationWarning)
scalar_field = custom_array
if custom_3darray is not None:
warnings.warn('custom_3darray is deprecated. Use vector_field instead', DeprecationWarning)
vector_field = custom_3darray
parent_basis = self.get_parent_basis()
elements = parent_basis.get_chemical_symbols()
atomic_numbers = parent_basis.get_atomic_numbers()
positions = self.positions
# If `select_atoms` was given, visualize only a subset of the `parent_basis`
if select_atoms is not None:
select_atoms = np.array(select_atoms, dtype=int)
elements = elements[select_atoms]
atomic_numbers = atomic_numbers[select_atoms]
positions = positions[select_atoms]
if colors is not None:
colors = np.array(colors)
colors = colors[select_atoms]
if scalar_field is not None:
scalar_field = np.array(scalar_field)
scalar_field = scalar_field[select_atoms]
if vector_field is not None:
vector_field = np.array(vector_field)
vector_field = vector_field[select_atoms]
if vector_color is not None:
vector_color = np.array(vector_color)
vector_color = vector_color[select_atoms]
# Write the nglview protein-database-formatted string
struct = nglview.TextStructure(self._ngl_write_structure(elements, positions, self.cell))
# Parse the string into the displayable widget
view = nglview.NGLWidget(struct)
if spacefill:
# Color by scheme
if color_scheme is not None:
if colors is not None:
warnings.warn('`color_scheme` is overriding `colors`')
if scalar_field is not None:
warnings.warn('`color_scheme` is overriding `scalar_field`')
view = self._add_colorscheme_spacefill(view, elements, atomic_numbers, particle_size, color_scheme)
# Color by per-atom colors
elif colors is not None:
if scalar_field is not None:
warnings.warn('`colors` is overriding `scalar_field`')
view = self._add_custom_color_spacefill(view, atomic_numbers, particle_size, colors)
# Color by per-atom scalars
elif scalar_field is not None: # Color by per-atom scalars
colors = self._scalars_to_hex_colors(scalar_field, scalar_start, scalar_end, scalar_cmap)
view = self._add_custom_color_spacefill(view, atomic_numbers, particle_size, colors)
# Color by element
else:
view = self._add_colorscheme_spacefill(view, elements, atomic_numbers, particle_size)
view.remove_ball_and_stick()
else:
view.add_ball_and_stick()
if show_cell:
if parent_basis.cell is not None:
view.add_unitcell()
if vector_color is None and vector_field is not None:
vector_color = 0.5 * vector_field / np.linalg.norm(vector_field, axis=-1)[:, np.newaxis] + 0.5
elif vector_field is not None and vector_field is not None: # WARNING: There must be a bug here...
try:
if vector_color.shape != np.ones((len(self), 3)).shape:
vector_color = np.outer(np.ones(len(self)), vector_color / np.linalg.norm(vector_color))
except AttributeError:
vector_color = np.ones((len(self), 3)) * vector_color
if vector_field is not None:
for arr, pos, col in zip(vector_field, positions, vector_color):
view.shape.add_arrow(list(pos), list(pos + arr), list(col), 0.2)
if show_axes: # Add axes
axes_origin = -np.ones(3)
arrow_radius = 0.1
text_size = 1
text_color = [0, 0, 0]
arrow_names = ['x', 'y', 'z']
for n in [0, 1, 2]:
start = list(axes_origin)
shift = np.zeros(3)
shift[n] = 1
end = list(start + shift)
color = list(shift)
# We cast as list to avoid JSON warnings
view.shape.add_arrow(start, end, color, arrow_radius)
view.shape.add_text(end, text_color, text_size, arrow_names[n])
if camera != 'perspective' and camera != 'orthographic':
warnings.warn('Only perspective or orthographic is (likely to be) permitted for camera')
view.camera = camera
view.background = background
return view
def plot3d_ase(self, spacefill=True, show_cell=True, camera='perspective', particle_size=0.5, background='white', color_scheme='element', show_axes=True):
"""
Possible color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
"""
try: # If the graphical packages are not available, the GUI will not work.
import nglview
except ImportError:
raise ImportError("The package nglview needs to be installed for the plot3d() function!")
# Always visualize the parent basis
parent_basis = self.get_parent_basis()
view = nglview.show_ase(parent_basis)
if spacefill:
view.add_spacefill(radius_type='vdw', color_scheme=color_scheme, radius=particle_size)
# view.add_spacefill(radius=1.0)
view.remove_ball_and_stick()
else:
view.add_ball_and_stick()
if show_cell:
if parent_basis.cell is not None:
view.add_unitcell()
if show_axes:
view.shape.add_arrow([-2, -2, -2], [2, -2, -2], [1, 0, 0], 0.5)
view.shape.add_arrow([-2, -2, -2], [-2, 2, -2], [0, 1, 0], 0.5)
view.shape.add_arrow([-2, -2, -2], [-2, -2, 2], [0, 0, 1], 0.5)
if camera!='perspective' and camera!='orthographic':
print('Only perspective or orthographic is permitted')
return None
view.camera = camera
view.background = background
return view
def pos_xyz(self):
"""
Returns:
"""
x = self.positions[:, 0]
y = self.positions[:, 1]
z = self.positions[:, 2]
return x, y, z
def scaled_pos_xyz(self):
"""
Returns:
"""
xyz = self.get_scaled_positions(wrap=False)
return xyz[:,0], xyz[:,1], xyz[:,2]
def __select_slice(self, i_dim, i_flag, dist):
"""
Args:
i_dim:
i_flag:
dist:
Returns:
"""
if i_dim + 1 > self.dimension:
return True
if i_flag == 1:
return self.get_scaled_positions(wrap=False)[:, i_dim] < dist
elif i_flag == 0:
return True
elif i_flag == -1:
return self.get_scaled_positions(wrap=False)[:, i_dim] > 1. - dist
def get_boundary_region(self, dist):
"""
get all atoms in the boundary around the supercell which have a distance
to the supercell boundary of less than dist
Args:
dist:
Returns:
"""
rel_coordinates = self.get_scaled_positions(wrap=False)
dim = self.dimension
cell = self.cell.T # to use same definition as ASE
a1 = cell[0]
a2, a3 = 0, 0
min_i, max_i = -1, 2
iyl, iy, izl, iz = 0, 1, 0, 1
if dim > 1:
a2 = cell[1]
iyl, iy = min_i, max_i
if dim > 2:
a3 = cell[2]
izl, iz = min_i, max_i
index = np.arange(len(self))
new_coordinates = np.zeros((1, dim))
# pbcVec = np.zeros((1, dim))
ia_list = np.zeros((1, 1), dtype=np.int)
for i0 in range(min_i, max_i):
for i1 in range(iyl, iy):
for i2 in range(izl, iz):
# r_vec_abs = i0 * a1 + i1 * a2 + i2 * a3
r_vec = np.array([i0, i1, i2][:dim])
select = self.__select_slice(0, i0, dist) & self.__select_slice(1, i1, dist) & \
self.__select_slice(2, i2, dist)
if np.linalg.norm(r_vec) > 0:
if len(select) > 0:
sel_coordinates = rel_coordinates[select] + r_vec
new_coordinates = np.append(new_coordinates, sel_coordinates, axis=0)
if len(sel_coordinates) > 0:
# rVecs = np.array(len(sel_coordinates) * [r_vec_abs])
# pbcVec = np.append(pbcVec, rVecs, axis=0)
ia_list = np.append(ia_list, index[select])
# print "rVec: ", i0,i1,i2,rVecs[0],index[select],select
element_list = [self.indices[ia] for ia in ia_list[1:]]
self._ia_bounds = ia_list[1:]
# self._pbcVec = pbcVec[1:]
return Atoms(indices=element_list, scaled_positions=new_coordinates[1:], cell=self.cell,
dimension=len(cell), species=self.species)
def get_neighbors(self,
num_neighbors=12,
t_vec=True,
include_boundary=True,
exclude_self=True,
tolerance=2,
id_list=None,
cutoff_radius=None,
cutoff=None):
"""
Args:
num_neighbors (int): number of neighbors
t_vec (bool): True: compute distance vectors
(pbc are automatically taken into account)
include_boundary (bool): True: search for neighbors assuming periodic boundary conditions
False is needed e.g. in plot routines to avoid showing incorrect bonds
exclude_self (bool): include central __atom (i.e. distance = 0)
tolerance (int): tolerance (round decimal points) used for computing neighbor shells
id_list:
cutoff (float/None): Upper bound of the distance to which the search must be done - by default search for
upto 100 neighbors unless num_neighbors is defined explicitly.
cutoff_radius (float/None): Upper bound of the distance to which the search must be done - by default search
for upto 100 neighbors unless num_neighbors is defined explicitly.
Returns:
pyiron.atomistics.structure.atoms.Neighbors: Neighbors instances with the neighbor indices, distances
and vectors
"""
if cutoff is not None and cutoff_radius is None:
warnings.warn('Please use cutoff_radius, rather than cutoff', DeprecationWarning)
cutoff_radius = cutoff
if cutoff_radius is not None and num_neighbors == 12:
num_neighbors = 100
# eps = 1e-4
i_start = 0
if exclude_self:
i_start = 1
def f_ind(x):
return x < len(self)
num_neighbors += 1
neighbor_obj = Neighbors()
if not include_boundary: # periodic boundaries are NOT included
tree = cKDTree(self.positions)
if cutoff_radius is None:
neighbors = tree.query(self.positions, k=num_neighbors)
else:
neighbors = tree.query(self.positions, k=num_neighbors, distance_upper_bound=cutoff_radius)
d_lst, ind_lst, v_lst = [], [], []
ic = 0
for d_i, ind_i in zip(neighbors[0], neighbors[1]):
ff = (ind_i < len(self)) & (ind_i != ic)
ind_l = ind_i[ff]
ind_lst.append(ind_l)
d_lst.append(d_i[ff])
v_lst.append(self.positions[ind_l] - self.positions[ic])
ic += 1
neighbor_obj.indices = ind_lst
neighbor_obj.distances = d_lst
neighbor_obj.vecs = v_lst
return neighbor_obj
# include periodic boundaries
# translate radius in boundary layer with relative coordinates
# TODO: introduce more rigoros definition
radius = 3 * num_neighbors ** (1. / 3.)
rel_width = [radius / np.sqrt(np.dot(a_i, a_i)) for a_i in self.cell]
rel_width_scalar = np.max(rel_width)
# construct cell with additional atoms bounding original cell
boundary_atoms = self.get_boundary_region(rel_width_scalar)
extended_cell = self + boundary_atoms
# build index to map boundary atoms back to original cell
map_to_cell = np.append(np.arange(len(self)), self._ia_bounds)
# transfer relative to absolute coordinates
tree = cKDTree(extended_cell.positions)
if id_list is None:
positions = self.positions
else:
positions = np.array([self.positions[i] for i in id_list])
# print ("len positions: ", len(positions))
if cutoff_radius is None:
neighbors = tree.query(positions, k=num_neighbors)
else:
neighbors = tree.query(positions, k=num_neighbors, distance_upper_bound=cutoff_radius)
# print ("neighbors: ", neighbors)
self.neighbor_distance = [] # neighbors[0]
self.neighbor_distance_vec = []
self.neighbor_index = []
self.neighbor_shellOrder = []
# tolerance = 2 # tolerance for round floating point
def f_ind_ext(x):
return x < len(extended_cell)
neighbor_index = map(lambda x: filter(f_ind_ext, x), neighbors[1])
num_neighbors = []
for i, index in enumerate(neighbor_index):
# print "i, index: ", i, index
index = list(index) # Filter conversion for python 3 compatibility
nbrs_distances = neighbors[0][i][i_start:len(index)]
# if radius: # reduce neighborlist based on radius
# new_index_lst, new_dist_lst = [], []
# for index_red, dis_red in zip(index, nbrs_distances):
# if dis_red < radius:
# new_index_lst.append(index_red)
# new_dist_lst.append(dis_red)
# index, nbrs_distances= new_index_lst, new_dist_lst
self.neighbor_distance.append(nbrs_distances)
self.neighbor_index.append(map_to_cell[index][i_start:])
u, indices = np.unique(np.around(nbrs_distances, decimals=tolerance), return_inverse=True)
self.neighbor_shellOrder.append(indices + 1) # this gives the shellOrder of neighboring atoms back
if t_vec:
nbr_dist = []
if len(index) == 0:
self.neighbor_distance_vec.append(nbr_dist)
continue
vec0 = self.positions[index[0]]
for i_nbr, ind in enumerate(index[i_start:]):
# ind0 = map_to_cell[ind]
vec_r_ij = extended_cell.positions[ind] - vec0
dd0 = neighbors[0][i][i_nbr + i_start]
dd = np.sqrt(np.dot(vec_r_ij, vec_r_ij))
if not (dd - dd0 < 0.001):
raise AssertionError()
# if (dd - dd0 > 0.001):
# print "wrong: ", vec_r_ij, dd,dd0,i_nbr,ind,ind0,i
# print self.positions[ind0], extended_cell.positions[ind], vec0
nbr_dist.append(vec_r_ij)
self.neighbor_distance_vec.append(nbr_dist)
num_neighbors.append(len(index) - i_start)
min_nbr, max_nbr = min(num_neighbors), max(num_neighbors)
if max_nbr == num_neighbors:
# print "neighbor distance: ", self.neighbor_distance
raise ValueError("Increase max_num_neighbors! " + str(max_nbr) + " " + str(num_neighbors))
self.min_nbr_number = min_nbr
self.max_nbr_number = max_nbr
neighbor_obj.distances = self.neighbor_distance
neighbor_obj.vecs = self.neighbor_distance_vec
neighbor_obj.indices = self.neighbor_index
neighbor_obj.shells = self.neighbor_shellOrder
return neighbor_obj
def get_neighborhood(box, position, num_neighbors=12, t_vec=True, include_boundary=True,
tolerance=2, id_list=None, cutoff=None, cutoff_radius=None):
"""
Args:
position: position in a box whose neighborhood information is analysed
num_neighbors:
t_vec (bool): True: compute distance vectors
(pbc are automatically taken into account)
include_boundary (bool): True: search for neighbors assuming periodic boundary conditions
False is needed e.g. in plot routines to avoid showing incorrect bonds
tolerance (int): tolerance (round decimal points) used for computing neighbor shells
id_list:
cutoff (float/ None): Upper bound of the distance to which the search must be done
cutoff_radius (float/ None): Upper bound of the distance to which the search must be done
Returns:
pyiron.atomistics.structure.atoms.Neighbors: Neighbors instances with the neighbor indices, distances
and vectors
"""
class NeighTemp(object):
pass
box = box.copy()
box += box[-1]
pos = box.positions
pos[-1] = np.array(position)
box.positions = pos
neigh = box.get_neighbors(num_neighbors=num_neighbors, t_vec=t_vec,
include_boundary=include_boundary, exclude_self=True,
tolerance=tolerance, id_list=id_list, cutoff=cutoff, cutoff_radius=cutoff_radius)
neigh_return = NeighTemp()
setattr(neigh_return, 'distances', neigh.distances[-1])
setattr(neigh_return, 'shells', neigh.shells[-1])
setattr(neigh_return, 'vecs', neigh.vecs[-1])
setattr(neigh_return, 'indices', neigh.indices[-1])
neigh_return.distances = neigh_return.distances[neigh_return.indices!=len(box)-1]
neigh_return.shells = neigh_return.shells[neigh_return.indices!=len(box)-1]
neigh_return.vecs = np.array(neigh_return.vecs)[neigh_return.indices!=len(box)-1]
neigh_return.indices = neigh_return.indices[neigh_return.indices!=len(box)-1]
return neigh_return
def get_shells(self, id_list=None, max_shell=2, max_num_neighbors=100):
"""
Args:
id_list:
max_shell:
max_num_neighbors:
Returns:
"""
if id_list is None:
id_list = [0]
neighbors = self.get_neighbors(num_neighbors=max_num_neighbors,
id_list=id_list)
shells = neighbors.shells[0]
dist = neighbors.distances[0]
shell_dict = {}
for i_shell in set(shells):
if i_shell > max_shell:
break
shell_dict[i_shell] = np.mean(dist[shells == i_shell])
# print ("shells: ", i_shell, shell_dict[i_shell])
if not (max(shell_dict.keys()) == max_shell):
raise AssertionError()
return shell_dict
def get_shell_matrix(self, shell, id_list=None, restraint_matrix=None, max_num_neighbors=100):
"""
Args:
neigh_list: user defined get_neighbors (recommended if atoms are displaced from the ideal positions)
id_list: cf. get_neighbors
radius: cf. get_neighbors
max_num_neighbors: cf. get_neighbors
restraint_matrix: NxN matrix with True or False, where False will remove the entries.
If an integer is given the sum of the chemical indices corresponding to the number will
be set to True and the rest to False
Returns:
NxN matrix with 1 for the pairs of atoms in the given shell
"""
assert isinstance(shell, int) and shell > 0, "Parameter 'shell' must be an integer greater than 0"
neigh_list = self.get_neighbors(num_neighbors=max_num_neighbors,
id_list=id_list)
Natom = len(neigh_list.shells)
if restraint_matrix is None:
restraint_matrix = (np.ones((Natom, Natom)) == 1)
elif type(restraint_matrix) == list and len(restraint_matrix) == 2:
restraint_matrix = np.outer(1 * (self.get_chemical_symbols() == restraint_matrix[0]),
1 * (self.get_chemical_symbols() == restraint_matrix[1]))
restraint_matrix = ((restraint_matrix + restraint_matrix.transpose()) > 0)
shell_matrix = np.zeros((Natom, Natom))
for ii, ss in enumerate(neigh_list.shells):
unique, counts = np.unique(neigh_list.indices[ii][ss == np.array(shell)], return_counts=True)
shell_matrix[ii][unique] = counts
shell_matrix[restraint_matrix == False] = 0
return shell_matrix
def get_shell_radius(self, shell=1, id_list=None):
"""
Args:
shell:
id_list:
Returns:
"""
if id_list is None:
id_list = [0]
shells = self.get_shells(id_list=id_list, max_shell=shell + 1)
return np.mean(list(shells.values())[shell - 1:])
def occupy_lattice(self, **qwargs):
"""
Replaces specified indices with a given species
"""
new_species = list(np.array(self.species).copy())
new_indices = np.array(self.indices.copy())
for key, i_list in qwargs.items():
el = self._pse.element(key)
if el.Abbreviation not in [spec.Abbreviation for spec in new_species]:
new_species.append(el)
new_indices[i_list] = len(new_species) - 1
else:
index = np.argwhere(np.array(new_species) == el).flatten()
new_indices[i_list] = index
delete_species_indices = list()
retain_species_indices = list()
for i, el in enumerate(new_species):
if len(np.argwhere(new_indices == i).flatten()) == 0:
delete_species_indices.append(i)
else:
retain_species_indices.append(i)
for i in delete_species_indices:
new_indices[new_indices >= i] += -1
new_species = np.array(new_species)[retain_species_indices]
self.set_species(new_species)
self.indices = new_indices
def cluster_analysis(self, id_list, neighbors=None, radius=None, return_cluster_sizes=False):
"""
Args:
id_list:
neighbors:
radius:
return_cluster_sizes:
Returns:
"""
if neighbors is None:
if radius is None:
radius = self.get_shell_radius()
# print "radius: ", radius
neighbors = self.get_neighbors(radius, t_vec=False)
self._neighbor_index = neighbors.indices
self._cluster = [0] * len(self)
c_count = 1
# element_list = self.get_atomic_numbers()
for ia in id_list:
# el0 = element_list[ia]
nbrs = self._neighbor_index[ia]
# print ("nbrs: ", ia, nbrs)
if self._cluster[ia] == 0:
self._cluster[ia] = c_count
self.__probe_cluster(c_count, nbrs, id_list)
c_count += 1
cluster = np.array(self._cluster)
cluster_dict = {i_c: np.where(cluster == i_c)[0].tolist() for i_c in range(1, c_count)}
if return_cluster_sizes:
sizes = [self._cluster.count(i_c + 1) for i_c in range(c_count - 1)]
return cluster_dict, sizes
return cluster_dict # sizes
def __probe_cluster(self, c_count, neighbors, id_list):
"""
Args:
c_count:
neighbors:
id_list:
Returns:
"""
for nbr_id in neighbors:
if self._cluster[nbr_id] == 0:
if nbr_id in id_list: # TODO: check also for ordered structures
self._cluster[nbr_id] = c_count
nbrs = self._neighbor_index[nbr_id]
self.__probe_cluster(c_count, nbrs, id_list)
# TODO: combine with corresponding routine in plot3d
def get_bonds(self, radius=None, max_shells=None, prec=0.1, num_neighbors=20):
"""
Args:
radius:
max_shells:
prec: minimum distance between any two clusters (if smaller considered to be single cluster)
num_neighbors:
Returns:
"""
def get_cluster(dist_vec, ind_vec, prec=prec):
ind_where = np.where(np.diff(dist_vec) > prec)[0] + 1
ind_vec_cl = [np.sort(group) for group in np.split(ind_vec, ind_where)]
dist_vec_cl = [np.mean(group) for group in np.split(dist_vec, ind_where)]
return ind_vec_cl, dist_vec_cl
neighbors = self.get_neighbors(cutoff_radius=radius,
num_neighbors=num_neighbors)
dist = neighbors.distances
ind = neighbors.indices
el_list = self.get_chemical_symbols()
ind_shell = []
for i_a, (d, i) in enumerate(zip(dist, ind)):
id_list, dist_lst = get_cluster(d[d < radius], i[d < radius])
# print ("id: ", d[d<radius], id_list, dist_lst)
ia_shells_dict = {}
for i_shell_list in id_list:
ia_shell_dict = {}
for i_s in i_shell_list:
el = el_list[i_s]
if el not in ia_shell_dict:
ia_shell_dict[el] = []
ia_shell_dict[el].append(i_s)
for el, ia_lst in ia_shell_dict.items():
if el not in ia_shells_dict:
ia_shells_dict[el] = []
if max_shells is not None:
if len(ia_shells_dict[el]) + 1 > max_shells:
continue
ia_shells_dict[el].append(ia_lst)
ind_shell.append(ia_shells_dict)
return ind_shell
# spglib calls
def get_symmetry(self, use_magmoms=False, use_elements=True, symprec=1e-5, angle_tolerance=-1.0):
"""
Args:
use_magmoms:
use_elements: True or False. If False, chemical elements will be ignored
symprec:
angle_tolerance:
Returns:
"""
lattice = np.array(self.get_cell().T, dtype='double', order='C')
positions = np.array(self.get_scaled_positions(wrap=False), dtype='double', order='C')
if use_elements:
numbers = np.array(self.get_atomic_numbers(), dtype='intc')
else:
numbers = np.ones_like(self.get_atomic_numbers(), dtype='intc')
if use_magmoms:
magmoms = self.get_initial_magnetic_moments()
return spglib.get_symmetry(cell=(lattice, positions, numbers, magmoms),
symprec=symprec,
angle_tolerance=angle_tolerance)
else:
return spglib.get_symmetry(cell=(lattice, positions, numbers),
symprec=symprec,
angle_tolerance=angle_tolerance)
def group_points_by_symmetry(self, points):
"""
This function classifies the points into groups according to the box symmetry given by spglib.
Args:
points: (np.array/list) nx3 array which contains positions
Returns: list of arrays containing geometrically equivalent positions
It is possible that the original points are not found in the returned list, as the positions outsie
the box will be projected back to the box.
"""
struct_copy = self.copy()
points = np.array(points).reshape(-1, 3)
struct_copy += Atoms(elements=len(points)*['Hs'], positions=points)
struct_copy.center_coordinates_in_unit_cell();
group_IDs = struct_copy.get_symmetry()['equivalent_atoms'][struct_copy.select_index('Hs')]
return [np.round(points[group_IDs==ID], decimals=8) for ID in np.unique(group_IDs)]
def _get_voronoi_vertices(self, minimum_dist=0.1):
"""
This function gives the positions of Voronoi vertices
This function does not work if there are Hs atoms in the box
Args:
minimum_dist: Minimum distance between two Voronoi vertices to be considered as one
Returns: Positions of Voronoi vertices, box
"""
vor = Voronoi(self.repeat(3*[2]).positions) # Voronoi package does not have periodic boundary conditions
b_cell_inv = np.linalg.inv(self.cell)
voro_vert = vor.vertices
for ind, v in enumerate(voro_vert):
pos = np.mean(voro_vert[(np.linalg.norm(voro_vert-v, axis=-1)<minimum_dist)], axis=0) # Find all points which are within minimum_dist
voro_vert[(np.linalg.norm(voro_vert-v, axis=-1)<0.5)] = np.array(3*[-10]) # Mark atoms to be deleted afterwards
voro_vert[ind] = pos
voro_vert = voro_vert[np.min(voro_vert, axis=-1)>-5]
voro_vert = | np.dot(b_cell_inv.T, voro_vert.T) | numpy.dot |
import rospy
from std_msgs.msg import String
from skeleton_markers.msg import Skeleton
import numpy as np
class SkeletonAngles():
def __init__(self):
self.pub = rospy.Publisher ('skeleton_angles', String, queue_size=10)
self.names = ['head', 'neck', 'torso', 'left_shoulder', 'left_elbow', 'left_hand',
'right_shoulder', 'right_elbow', 'right_hand',
'left_hip', 'left_knee', 'left_foot', 'right_hip', 'right_knee', 'right_foot']
self.positions = {}
for name in self.names:
self.positions[name] = {'x': None, 'y': None, 'z': None}
self.skeleton_angles = np.zeros([8])
def start(self):
#init a listener to kinect and
rospy.init_node('skeleton_angle')
rospy.Subscriber("skeleton", Skeleton, self.callback)
rospy.spin()
def callback(self, data):
positions = data.position
for name in self.names:
self.positions[name]['x'] = positions[self.names.index(name)].x
self.positions[name]['y'] = positions[self.names.index(name)].y
self.positions[name]['z'] = positions[self.names.index(name)].z
#print(self.positions)
#x_0
x_0=np.array([self.positions["left_shoulder"]['x']-self.positions["right_shoulder"]['x'],
self.positions["left_shoulder"]['y']-self.positions["right_shoulder"]['y'],
self.positions["left_shoulder"]['z']-self.positions["right_shoulder"]['z']])
x_0=(x_0/np.linalg.norm(x_0))
#y_0
mid_shoulder=np.array([self.positions["left_shoulder"]['x']+self.positions["right_shoulder"]['x'],
self.positions["left_shoulder"]['y']+self.positions["right_shoulder"]['y'],
self.positions["left_shoulder"]['z']+self.positions["right_shoulder"]['z']])/2
mid_hip=np.array([self.positions["left_hip"]['x']+self.positions["right_hip"]['x'],
self.positions["left_hip"]['y']+self.positions["right_hip"]['y'],
self.positions["left_hip"]['z']+self.positions["right_hip"]['z']])/2
torso=np.array([self.positions["torso"]['x'],
self.positions["torso"]['y'],
self.positions["torso"]['z']])
y_0= mid_shoulder-torso
y_0= y_0/np.linalg.norm(y_0)
#z_0
z_0=np.cross(x_0,y_0)
#z_l2
z_l2=np.array([self.positions["left_elbow"]['x']-self.positions["left_shoulder"]['x'],
self.positions["left_elbow"]['y']-self.positions["left_shoulder"]['y'],
self.positions["left_elbow"]['z']-self.positions["left_shoulder"]['z']])
z_l2=z_l2/ | np.linalg.norm(z_l2) | numpy.linalg.norm |
#-- -- -- -- Intermediate Python
# Used for Data Scientist Training Path
#FYI it's a compilation of how to work
#with different commands.
####### -----> Matplotlib
### --------------------------------------------------------
## Line plot - ex#0
# Print the last item from year and pop
print(year[-1])
print(pop[-1])
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Make a line plot: year on the x-axis, pop on the y-axis
plt.plot(year, pop)
# Display the plot with plt.show()
plt.show()
### --------------------------------------------------------
## Line plot - ex#1
import matplotlib.pyplot as plt
# Print the last item of gdp_cap and life_exp
print(gdp_cap[-1])
print(life_exp[-1])
# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis
plt.plot(gdp_cap, life_exp)
# Display the plot
plt.show()
### --------------------------------------------------------
## Scatter Plot --- ex0
import matplotlib.pyplot as plt
# Change the line plot below to a scatter plot
plt.scatter(gdp_cap, life_exp)
# Put the x-axis on a logarithmic scale
plt.xscale('log')
# Show plot
plt.show()
### --------------------------------------------------------
## Scatter Plot --- ex1
# Import package
import matplotlib.pyplot as plt
# Build Scatter plot
plt.scatter(pop, life_exp)
# Show plot
plt.show()
## HISTOGRAMS
### --------------------------------------------------------
### -> Build a histogram
import matplotlib.pyplot as plt
# Create histogram of life_exp data
plt.hist(life_exp)
# Display histogram
plt.show()
### --------------------------------------------------------
## Build a histogram --- bins
import matplotlib.pyplot as plt
# Build histogram with 5 bins
plt.hist(life_exp, bins=5)
# Show and clean up plot
plt.show()
plt.clf()
# Build histogram with 20 bins
plt.hist(life_exp, bins=20)
# Show and clean up again
plt.show()
plt.clf()
### --------------------------------------------------------
## Build a histogram --- compare
import matplotlib.pyplot as plt
# Histogram of life_exp, 15 bins
plt.hist(life_exp, bins=15)
# Show and clear plot
plt.show()
plt.clf()
# Histogram of life_exp1950, 15 bins
plt.hist(life_exp1950, bins=15)
# Show and clear plot again
plt.show()
plt.clf()
### --------------------------------------------------------
# You're a professor teaching Data Science with Python,
# and you want to visually assess if the grades on
# your exam follow a particular distribution.
# Which plot do you use?
# R/ Histogram
### --------------------------------------------------------
# You're a professor in Data Analytics with Python, and you
# want to visually assess if longer answers on exam
# questions lead to higher grades.
# Which plot do you use?
# Scatter plot
### --------------------------------------------------------
### Labels
import matplotlib.pyplot as plt
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Strings
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development in 2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
# Add title
plt.title(title)
# After customizing, display the plot
plt.show()
### --------------------------------------------------------
## Ticks
import matplotlib.pyplot as plt
# Scatter plot
plt.scatter(gdp_cap, life_exp)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
# Definition of tick_val and tick_lab
tick_val = [1000, 10000, 100000]
tick_lab = ['1k', '10k', '100k']
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
# After customizing, display the plot
plt.show()
### --------------------------------------------------------
#Sizes
# Import numpy as np
import numpy as np
# Store pop as a numpy array: np_pop
np_pop = | np.array(pop) | numpy.array |
#!/usr/bin/env python3
# command line program
import argparse
# deepcopy
import copy
# file
import os.path
# numpy
import numpy as np
import scipy as sp
import scipy.integrate
import scipy.linalg
import scipy.interpolate
# internal modules
import libpost
# tmp plot
import matplotlib.pyplot as plt
# nu: 0.000185
# epsilon: 0.103
# kolmogorov length scale: 0.0028
# kolmogorov time scale: 0.0424
# batchelor scale: (nu D^2 / epsilon)^(1/4)
# batchelor (scale < kolmogorov length) eq (D < nu)
# D < 1.85e-4
sc = 1e0
D = 1.85e-4 / sc
s0 = 1e0 * 0.0028 # initial thickness
dA0 = (1e0 * 0.0028)**2 # initial surface
c_nb = 100
c_array = np.linspace(0.0, 1.0, num=c_nb)
C_nb = c_nb
C_array = np.linspace(0.0, 1.0, num=C_nb)
rho_nb = c_nb
log_rho_array = np.linspace(-np.log(10), np.log(10000), num=rho_nb)
#process_times = [0.0, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128]
process_times = [0.002, 0.004, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 1.536, 2.048, 4.096, 8.192] # step 0.002
def parse():
parser = argparse.ArgumentParser(description='Computes statistics of the lagrangian gradients matrix (computed along particle trajectories)')
return parser.parse_args()
def dL_dt(foperator, L, t):
return np.matmul(foperator(t), L.reshape((3,3), order='C')).flatten(order='C')
def F(a):
return np.log((4.0/a + 1) + np.sqrt((4.0/a + 1)**2 - 1.0)) - 1.6/np.sqrt(a + 1.4)
def P(rho, mu_pp, mu_p, mu):
# init result
result = | np.zeros((rho.size, mu.size)) | numpy.zeros |
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as st
from abc import ABCMeta, abstractmethod
from .mvar.comp import ldl
from .mvarmodel import Mvar
from .aec.utils import filter_band, calc_ampenv, FQ_BANDS
import six
from six.moves import map
from six.moves import range
from six.moves import zip
########################################################################
# Spectrum functions:
########################################################################
def spectrum(acoef, vcoef, fs=1, resolution=100):
"""
Generating data point from matrix *A* with MVAR coefficients.
Args:
*acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
Returns:
*A_z* : numpy.array
z-transformed A(f) complex matrix in shape (*resolution*, k, k)
*H_z* : numpy.array
inversion of *A_z*
*S_z* : numpy.array
spectrum matrix (*resolution*, k, k)
References:
.. [1] <NAME>, <NAME>, <NAME> (2004) “Granger causality
and information flow in multivariate processes”
Physical Review E 70, 050902.
"""
p, k, k = acoef.shape
freqs = np.linspace(0, fs*0.5, resolution)
A_z = np.zeros((len(freqs), k, k), complex)
H_z = np.zeros((len(freqs), k, k), complex)
S_z = np.zeros((len(freqs), k, k), complex)
I = np.eye(k, dtype=complex)
for e, f in enumerate(freqs):
epot = np.zeros((p, 1), complex)
ce = np.exp(-2.j*np.pi*f*(1./fs))
epot[0] = ce
for k in range(1, p):
epot[k] = epot[k-1]*ce
A_z[e] = I - np.sum([epot[x]*acoef[x] for x in range(p)], axis=0)
H_z[e] = np.linalg.inv(A_z[e])
S_z[e] = np.dot(np.dot(H_z[e], vcoef), H_z[e].T.conj())
return A_z, H_z, S_z
def spectrum_inst(acoef, vcoef, fs=1, resolution=100):
"""
Generating data point from matrix *A* with MVAR coefficients taking
into account zero-lag effects.
Args:
*acoef* : numpy.array
array of shape (k, k, p+1) where *k* is number of channels and
*p* is a model order. acoef[0] - is (k, k) matrix for zero lag,
acoef[1] for one data point lag and so on.
*vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
Returns:
*A_z* : numpy.array
z-transformed A(f) complex matrix in shape (*resolution*, k, k)
*H_z* : numpy.array
inversion of *A_z*
*S_z* : numpy.array
spectrum matrix (*resolution*, k, k)
References:
.. [1] <NAME>, Multivariate Autoregressive Model with
Instantaneous Effects to Improve Brain Connectivity Estimation,
Int. J. Bioelectromagn. 11, 74–79 (2009).
"""
p, k, k = acoef.shape
freqs = np.linspace(0, fs/2, resolution)
B_z = np.zeros((len(freqs), k, k), complex)
L, U, Lt = ldl(vcoef)
Linv = np.linalg.inv(L)
I = np.eye(k, dtype=complex)
bcoef = np.array([np.dot(Linv, acoef[x]) for x in range(p)])
b0 = np.eye(k) - Linv
for e, f in enumerate(freqs):
epot = np.zeros((p, 1), complex)
ce = | np.exp(-2.j*np.pi*f*(1./fs)) | numpy.exp |
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import animation
import seaborn as sns
import numpy as np
import cmocean
import os
from mpl_toolkits.axes_grid1 import AxesGrid
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy
import scipy.ndimage
from scipy.stats import norm
import matplotlib.image as mpimg
class Plotter():
def __init__(self, dic_data, deck, data_modes,
plot_deltas = False):
self.zz = deck.targetplot
plot_contour_linear = deck.doc["Plots"]["Contour Plots"]["Linear"]["Plot_it"]
plot_contour_log = deck.doc["Plots"]["Contour Plots"]["Log"]["Plot_it"]
plot_quiver = deck.doc["Plots"]["Quiver"]["Plot_it"]
plot_streamplots = deck.doc["Plots"]["Streamplots"]["Plot_it"]
gif_heatmaps = deck.doc["Plots"]["Heatmaps"]["Gif_it"]
gif_contourlin = deck.doc["Plots"]["Contour Plots"]["Linear"]["Gif_it"]
gif_contourlog = deck.doc["Plots"]["Contour Plots"]["Log"]["Gif_it"]
for self.index, dic_image in enumerate(dic_data.dataframe):
index = self.index
if plot_contour_linear.lower() == "true":
self.create_contourplot_linear(dic_data.dic_paths[index], dic_image, deck, data_modes)
if plot_contour_log.lower() == "true":
self.create_contourplot_log(dic_data.dic_paths[index], dic_image, deck, data_modes)
if plot_quiver.lower() == "true":
self.create_quiver(dic_data.dic_paths[index], dic_image, deck)
if plot_streamplots.lower() == "true":
self.create_streamplot(dic_data.dic_paths[index], dic_image, deck)
# Do we really need this ?
self.plot_dataset(dic_data.dic_paths[index], dic_image, deck)
if plot_deltas == True:
if index == 0:
pass
else:
self.plot_deltas(dic_data.dic_paths[index], dic_image, deck)
if deck.plot_heatmaps.lower() == "true":
for index2, gdf in enumerate(data_modes.grouped):
if index == index2:
self.build_deltaheatmaps(dic_data.dic_paths[index], gdf, deck, data_modes.scale_min, data_modes.scale_max)
if gif_heatmaps == "true":
self.create_heatmaps_gif(data_modes.grouped, deck, data_modes.scale_min, data_modes.scale_max)
if gif_contourlin.lower() == "true":
self.create_contourplotlin_gif(dic_data.dataframe, deck, data_modes, dic_data.dic_paths)
if gif_contourlog.lower() == "true":
self.create_contourplotlog_gif(dic_data.dataframe, deck, data_modes, dic_data.dic_paths)
def filter_NaN_Matrix(self, U, sigVal):
#Fonction pour limiter la propagation des NaNs dans le filtre gaussien lissant l'image
V=U.copy()
V[np.isnan(U)]=0
VV=scipy.ndimage.gaussian_filter(V,sigma=sigVal)
W=0*U.copy()+1
W[np.isnan(U)]=0
WW=scipy.ndimage.gaussian_filter(W,sigma=sigVal)
| np.seterr(divide='ignore', invalid='ignore') | numpy.seterr |
import gan
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# 今更きけないGAN
# https://qiita.com/triwave33/items/1890ccc71fab6cbca87e
class GanTest2D:
def __init__(self, data_num, latent_dim, train_epoch):
self.DATA_NUM = data_num
self.LATENT_DIM = latent_dim
self.TRAIN_EPOCH = train_epoch
self.real_datas = None
return
# run
def run(self):
# make real data
self.make_real_data(self.DATA_NUM)
self.__plot_scat1(self.real_datas[:,0], self.real_datas[:,1], label='real data')
# make gan model
self.make_gan_model()
#self.make_gan_model_separating_disc_gene()
# graph of real and judged as true by discriminator data
self.__check_disc(self.gan, 100)
return
# data
def make_real_data(self, data_num):
self.real_datas = self.__sample_data_in_circle(data_num, radius=0.5)
#self.real_datas = self.__sample_data_in_half_circle(data_num, radius=0.5)
return
def __sample_data_in_circle(self, data_num, radius):
#
center = np.array([0.5, 0.5])
#center = np.array([0.0, 0.0])
# sampling num
sampling_margin = 2
sampling_num = int((1.0 * 1.0) / (radius * radius * 3.14) * data_num * sampling_margin)
# sampling
end_sampling_flag = False
x = np.empty((0,2), float)
# sampling roop
while not end_sampling_flag:
# x in [-1,1)
x_sampled = np.random.rand(sampling_num, 2) * 2.0 - 1.0
x_sampled = x_sampled[np.sqrt(np.sum(np.square(x_sampled - center), axis=1)) <= radius]
#
x = np.append(x, x_sampled, axis=0)
# check flag
end_sampling_flag = x.shape[0] >= data_num
# extract
x = x[0:data_num]
return x
def __sample_data_in_half_circle(self, data_num, radius):
#
center = np.array([0.5, 0.5])
#center = np.array([0.0, 0.0])
# sampling num
sampling_margin = 2
sampling_num = int((1.0 * 1.0) / (radius * radius * 3.14) * data_num * sampling_margin)
# sampling
end_sampling_flag = False
x = np.empty((0,2), float)
# sampling roop
while not end_sampling_flag:
# x in [-1,1)
x_sampled = np.random.rand(sampling_num, 2) * 2.0 - 1.0
x_sampled = x_sampled[np.sqrt(np.sum(np.square(x_sampled - center), axis=1)) <= radius]
x_sampled = x_sampled[x_sampled[:,1] < center[1]]
#
x = np.append(x, x_sampled, axis=0)
# check flag
end_sampling_flag = x.shape[0] >= data_num
# extract
x = x[0:data_num]
return x
# gan
def make_gan_model(self):
# make model
self.gan = gan.GAN(latent_dim=self.LATENT_DIM, data_dim=self.real_datas.shape[1])
#self.gan.make_model(gene_hidden_neurons=[32, 16, 16], disc_hidden_neurons=[32, 16, 16])
self.gan.make_model(gene_hidden_neurons=[32, 16, 16], disc_hidden_neurons=[124, 64, 16])
# train gan model
fig = plt.figure()
ims = []
ims.append([self.__plot_gene_data(self.gan, data_num=3000, show=False)])
# training epoch roop
for iep in range(self.TRAIN_EPOCH):
self.gan.train_step(self.real_datas, batch_size=32, now_epoch=iep)
#self.gan.train_step_test1(self.real_datas, batch_size=32, now_epoch=iep)
# images for animation
ims.append([self.__plot_gene_data(self.gan, data_num=3000, show=False)])
# graph of real and generated data
ani = animation.ArtistAnimation(fig, ims, interval=100)
ani.save('generated_point.gif', writer='pillow')
plt.show()
return
def make_gan_model_separating_disc_gene(self):
# make model
self.gan = gan.GAN(latent_dim=self.LATENT_DIM, data_dim=self.real_datas.shape[1])
#self.gan.make_model(gene_hidden_neurons=[32, 16, 16], disc_hidden_neurons=[32, 16, 16])
self.gan.make_model(gene_hidden_neurons=[32, 16, 16], disc_hidden_neurons=[248, 124, 16])
# train disc model
for iep in range(self.TRAIN_EPOCH):
self.gan.train_step_only_disc_with_random_noise(self.real_datas, batch_size=32, now_epoch=iep)
# train gene model
fig = plt.figure()
ims = []
ims.append([self.__plot_gene_data(self.gan, data_num=3000, show=False)])
# training epoch roop
for iep in range(self.TRAIN_EPOCH):
self.gan.train_step_only_gene(self.real_datas, batch_size=32, now_epoch=iep)
# images for animation
ims.append([self.__plot_gene_data(self.gan, data_num=3000, show=False)])
# graph of real and generated data
ani = animation.ArtistAnimation(fig, ims, interval=100)
ani.save('generated_point.gif', writer='pillow')
plt.show()
return
def __plot_gene_data(self, gan, data_num, title=None, show=True):
'''
plot generated data
'''
latents = np.random.normal(0, 1, (300, self.LATENT_DIM))
gene_datas = gan.gene_model.predict(latents)
image = self.__plot_scat1(gene_datas[:,0], gene_datas[:,1], color='c', title=title, show=show)
return image
def __plot_disc_predict(self, gan, data_num, binary=False, save=False, savefilename=''):
'''
plot discrimination model prediction
'''
# grid [-1,1] and [-1,1]
x1d = np.linspace(start=-1, stop=1, num=data_num)
x1, x2 = np.meshgrid(x1d, x1d)
x1 = | np.ravel(x1) | numpy.ravel |
import numpy as np
import math
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from random import sample
class RansacPointGenerator:
"""generates a set points - linear distributed + a set of outliers"""
def __init__(self, numpointsInlier, numpointsOutlier):
self.numpointsInlier = numpointsInlier
self.numpointsOutlier = numpointsOutlier
self.points = []
pure_x = np.linspace(0, 1, numpointsInlier)
pure_y = np.linspace(0, 1, numpointsInlier)
noise_x = np.random.normal(np.random.rand(), 0.025, numpointsInlier)
noise_y = np.random.normal(0, 0.025, numpointsInlier)
outlier_x = np.random.random_sample((numpointsOutlier,))
outlier_y = | np.random.random_sample((numpointsOutlier,)) | numpy.random.random_sample |
'''
File taken from <NAME>'s github, 2020-12-9. Credit for
original development goes to <NAME>. Modifications made
by <NAME>.
'''
import numpy as np
import math
import pickle
from scipy.io import readsav
def get_sigma(oom,nlev):
sigma=np.empty([nlev])*0.0
if oom==0:
stp=1.0/(nlev+1.)
sigma[nlev-1]=1.0-stp
for n in range(nlev-2,-1,-1):
sigma[n]=sigma[n+1]-stp
if oom>0:
stp=-1.0*oom/nlev
sigma[nlev-1]=10.**(stp/2.)
for n in range(nlev-2,-1,-1):
sigma[n]=sigma[n+1]*10.**(stp)
return sigma
####################
def fort26(path,runname,oom, surfp,ver,savet,fortfile,):
with open(path+runname+'/'+fortfile) as f:
first_line=f.readline()
nlat,nlon,nlev=first_line.split()
nlat,nlon,nlev=int(nlat),int(nlon),int(nlev)
print(' ')
print(' ....reading ',fortfile)
print(' nlat=', nlat, 'nlon=', nlon, 'nlev=', nlev)
f.close()
data26=np.empty([nlon*nlat*nlev, 6])
l=0
lp=0
with open(path+runname+'/'+fortfile) as f:
for line in f:
if l==0:
l+=1
continue
elif l%2==1 and l<=nlon*nlat*nlev*2.:
line_pair=np.empty([6])
lon, lat, lev, u, v = line.split()
line_pair[:5] = np.float32(lon), np.float32(lat), int(lev), np.float32(u), np.float32(v)
elif l%2==0 and l<=nlon*nlat*nlev*2.:
line_pair[5]=np.float32(line)
data26[lp,:]=line_pair
lp+=1
elif l>nlon*nlat*nlev*2.:
print(' END OF FILE: DONE')
break
l+=1
f.close()
lon_arr_f=data26[:,0]
lon_arr=np.array([])
for l in range(0,len(lon_arr_f)):
el=lon_arr_f[l]
if not el in lon_arr:
lon_arr=np.append(lon_arr,el)
lat_arr_f=data26[:,1]
lat_arr=np.array([])
for l in range(0,len(lat_arr_f)):
el=lat_arr_f[l]
if not el in lat_arr:
lat_arr=np.append(lat_arr,el)
lev_arr_f=data26[:,2]
lev_arr=np.array([])
for l in range(0,len(lev_arr_f)):
el=lev_arr_f[l]
if not el in lev_arr:
lev_arr= | np.append(lev_arr,el) | numpy.append |
from KASD.initializers import initializers, serialize as _serialize_initializer, get as _get_initializer
from KASD.regularizers import regularizers, serialize as _serialize_regularizer, get as _get_regularizer
from KASD.constraints import constraints, serialize as _serialize_constraint, get as _get_constraint
from KASD.activations import activations
from ..initializers import randomize as _randomize_initializer
from ..regularizers import randomize as _randomize_regularizer
from ..constraints import randomize as _randomize_constraint
from ..utils.math import factors
from ..utils.rand_funcs import*
from .. import Process, run
from collections import deque
from copy import deepcopy
import numpy as np
data_formats = ['channels_last', 'channels_first']
paddings = ['valid', 'causal', 'same']
interpolations = ['nearest', 'bilinear']
implementations = [1, 2]
merge_modes = ['sum', 'mul', 'concat', 'ave']
def regularizer_(reg):
if reg is None: return None
reg = _get_regularizer(reg)
reg = _serialize_regularizer(reg)
_randomize_regularizer(reg)
return reg
def initializer_(init):
if init is None: return None
init = _get_initializer(init)
init = _serialize_initializer(init)
_randomize_initializer(init)
return init
def constraint_(const, input_shape):
if const is None: return None
const = _get_constraint(const)
const = _serialize_constraint(const)
_randomize_constraint(const, input_shape)
return const
default_ranges = {
'Dense/units': [1, 128],
'Dropout/seed': [1, 1024],
'RepeatVector/n': [1, 64],
'ActivityRegularization/l1': [-1.0, 1.0],
'ActivityRegularization/l2': [-1.0, 1.0],
'SpatialDropout1D/seed': [1, 1024],
'SpatialDropout2D/seed': [1, 1024],
'SpatialDropout3D/seed': [1, 1024],
'Conv1D/filters': [1, 128],
'Conv2D/filters': [1, 128],
'SeparableConv1D/filters': [1, 128],
'SeparableConv1D/depth_multiplier': [1, 32],
'SeparableConv2D/filters': [1, 128],
'SeparableConv2D/depth_multiplier': [1, 32],
'DepthwiseConv2D/filters': [1, 128],
'DepthwiseConv2D/depth_multiplier': [1, 32],
'Conv2DTranspose/filters': [1, 128],
'Conv3D/filters': [1, 128],
'Conv3DTranspose/filters': [1, 128],
'UpSampling1D/size': [2, 32],
'UpSampling2D/size': ([2, 32], [2, 32]),
'UpSampling3D/size': ([2, 32], [2, 32], [2, 32]),
'ZeroPadding1D/padding': (([0, 32], [0, 32]),),
'ZeroPadding2D/padding': (([0, 32], [0, 32]), ([0, 32], [0, 32])),
'ZeroPadding3D/padding': (([0, 32], [0, 32]), ([0, 32], [0, 32]), ([0, 32], [0, 32])),
'SimpleRNN/units': [1, 128],
'GRU/units': [1, 128],
'LSTM/units': [1, 128],
'SimpleRNNCell/units': [1, 128],
'GRUCell/units': [1, 128],
'LSTMCell/units': [1, 128],
'CuDNNGRU/units': [1, 128],
'CuDNNLSTM/units': [1, 128],
'BatchNormalization/momentum': [-10, 10],
'BatchNormalization/epsilon': [1e-5, 1e-2],
'GaussianNoise/stddev': [1e-3, 10],
'AlphaDropout/seed': [1, 1024],
'LeakyReLU/alpha': [0, 16],
'ELU/alpha': [0, 16],
'ThresholdedReLU/theta': [0, 10],
'ReLU/threshold/max_value': [0, 16],
'ReLU/negative_slope': [0, 16],
'ConvLSTM2D/filters': [1, 128],
'ConvLSTM2DCell/filters': [1, 128]}
###Layer Samples###
def _sample_null(serial, attributes=[], ranges=default_ranges): pass
InputLayer_sample=Add_sample=Subtract_sample=Multiply_sample=Average_sample=Maximum_sample=Minimum_sample=Concatenate_sample=Lambda_sample=_sample_null
def Dot_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def normalize():
return bool(np.random.randint(0, 2))
run(queue, attributes, locals())
def Dense_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def units():
return np.random.randint(ranges['Dense/units'][0], ranges['Dense/units'][1]+1)
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice())
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def Activation_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def activation():
return activations.choice()
run(queue, attributes, locals())
def Dropout_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def rate():
return np.random.sample()
@Process(serial, queue)
def noise_shape():
return noise_shape_(input_shape)
@Process(serial, queue)
def seed():
return np.random.randint(ranges['Dropout/seed'][0], ranges['Dropout/seed'][1]+1)
run(queue, attributes, locals())
def Flatten_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats+[None])
run(queue, attributes, locals())
def Reshape_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def target_shape():
_factors = factors(np.prod(input_shape[1:]), dims=np.array(output_shape[1:]).shape[0])
if not isinstance(_factors, (list, np.ndarray)):
_factors = np.array([[_factors]])
_factors = np.concatenate((_factors, | np.flip(_factors, axis=-1) | numpy.flip |
import gym
from gym import error, spaces, utils
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from gym.utils import seeding
import struct
from array import array
import os
import math
import numpy as np
def state_transition_from_direction(d):
# Five actions for each block: nothing(0), right(1), up(2), left(3), down(4):
if type(d) != int or d < 0 or d > 4:
raise ValueError('Unsupported action, got {}'.format(d))
return (0, 0)
else:
if d == 0:
return (0, 0)
elif d == 1:
return (0, 1)
elif d == 2:
return (-1, 0)
elif d == 3:
return (0, -1)
else:
return (1, 0)
def get_transition(action):
'''
Five actions for each block: nothing(0), right(1), up(2), left(3), down(4):
For pair of blocks the following table of actions is available:
00 | 0 10 | 5 20 | 10 30 | 15 40 | 20
01 | 1 11 | 6 21 | 11 31 | 16 41 | 21
02 | 2 12 | 7 22 | 12 32 | 17 42 | 22
03 | 3 13 | 8 23 | 13 33 | 18 43 | 23
04 | 4 14 | 9 24 | 14 34 | 19 44 | 24
'''
if type(action) != int or action < 0 or action > 24:
raise ValueError('Unsupported action, got {}'.format(action))
return (0, 0, 0, 0)
else:
st1 = state_transition_from_direction(action // 5)
st2 = state_transition_from_direction(action % 5)
return np.array([st1[0], st1[1], st2[0], st2[1]])
def load_mnist():
path_to_mnist = os.environ['MNIST_TRAIN_PATH']
with open(path_to_mnist, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051, got {}'.format(magic))
image_data = array("B", file.read())
images = []
for i in range(size):
images.append([0] * rows * cols)
for i in range(size):
images[i][:] = image_data[i * rows * cols:(i + 1) * rows * cols]
return np.array(images)
def randomize_trans_mnist(image, in_length, out_length):
image = image.reshape(in_length, in_length)
top_left = np.random.random_integers(low=0, high=out_length - in_length, size=2)
trans_mnist = np.zeros((out_length, out_length))
trans_mnist[top_left[0]:top_left[0] + in_length, top_left[1]:top_left[1] + in_length] = image
return trans_mnist, top_left
class MnistPairEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.mnist = load_mnist()
self.in_image_length = int(np.sqrt(self.mnist.shape[1]))
self.number_of_images = self.mnist.shape[0]
self.out_image_length = 40
self.state = np.array([0, 0, 0, 0]) # format (top_left_1_x, top_left_1_y, top_left_2_x, top_left_2_y)
self.total_episode_steps = 20
self.idle_discouragement = -0.5
self.solution_reward = 10
self.current_step = 0
self.pair1, top_left1 = randomize_trans_mnist(self.mnist[0, :], self.in_image_length, self.out_image_length)
self.pair2, top_left2 = randomize_trans_mnist(self.mnist[0, :], self.in_image_length, self.out_image_length)
self.action_space = spaces.Discrete(25)
self.observation_space = spaces.Discrete((self.out_image_length - self.in_image_length + 1) ** 2)
self.solution = np.concatenate((top_left1, top_left2))
fig = plt.figure()
fig.add_axes([0, 0, 0.5, 1.0])
fig.add_axes([0.5, 0, 0.5, 1.0])
self.ax1 = fig.axes[0]
self.ax2 = fig.axes[1]
self.ax1.set_yticklabels([])
self.ax1.set_xticklabels([])
self.ax2.set_yticklabels([])
self.ax2.set_xticklabels([])
def step(self, action):
self.state += get_transition(action)
self.current_step += 1
np.clip(self.state, 0, self.out_image_length - self.in_image_length, out=self.state)
crop1 = self.pair1[self.state[0]:self.state[0]+self.in_image_length, self.state[1]:self.state[1]+self.in_image_length]
crop2 = self.pair2[self.state[2]:self.state[2]+self.in_image_length, self.state[3]:self.state[3]+self.in_image_length]
observation = np.concatenate((np.ravel(crop1), np.ravel(crop2)))
if np.array_equal(self.state, self.solution):
return observation, self.solution_reward, True, {'successful': True}
else:
reward = np.corrcoef(crop1.ravel(), crop2.ravel())[0, 1]
if np.isnan(reward):
reward = self.idle_discouragement
return observation, reward, self.current_step >= self.total_episode_steps, {'successful': False}
def reset(self):
self.state = | np.array([0, 0, 0, 0]) | numpy.array |
import numpy as np
from phonopy.harmonic.dynamical_matrix import get_dynamical_matrix
from phonopy.units import VaspToTHz, Hbar, EV, Angstrom, THz, AMU
from phono3py.phonon.solver import set_phonon_c, set_phonon_py
from phono3py.phonon3.real_to_reciprocal import RealToReciprocal
from phono3py.phonon3.reciprocal_to_normal import ReciprocalToNormal
from phono3py.phonon3.triplets import (get_triplets_at_q,
get_nosym_triplets_at_q,
get_bz_grid_address)
class Interaction(object):
def __init__(self,
supercell,
primitive,
mesh,
symmetry,
fc3=None,
band_indices=None,
constant_averaged_interaction=None,
frequency_factor_to_THz=VaspToTHz,
frequency_scale_factor=None,
unit_conversion=None,
is_mesh_symmetry=True,
symmetrize_fc3q=False,
cutoff_frequency=None,
lapack_zheev_uplo='L'):
if frequency_scale_factor is None:
self._set_fc3(fc3)
else:
self._set_fc3(fc3 * frequency_scale_factor ** 2)
self._supercell = supercell
self._primitive = primitive
self._mesh = | np.array(mesh, dtype='intc') | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial.legendre import leggauss as p_roots
from .Elemento import *
class FEMSections:
"""# Clase FEMSections
***
Clase que define un problema de elementos finitos"""
defaultConfig = {'numeroElementos': 40, 'ordenAproximacion': 1, 'puntosGauss': 3, 'h': 1 / 1000}
"""Configuracion por defecto para una instancia de la clase,
esta se usara a menos de que se especifique otra configuracion.
"""
x = lambda z, xa, he: ((xa + he) - xa) / 2 * z + (xa + (xa + he)) / 2
"""Funcion de transformacion de coordenadas naturales a globales
"""
pesos = [0, 0, 0, 0, 0, 0]
"""Diccionario que contiene los pesos de la cuadratura de gauss
para evaluarlos, cada indice corresponde a una lista
de los numeros de gauss a ser multiplicados, por ejemplo, si se
escribe FEMSections.pesos[2] se obtienen los pesos correspondientes
a integrar con 3 puntos en la cuadratura de gauss
"""
pesos[0] = [2]
pesos[1] = [1, 1]
pesos[2] = [5 / 9, 8 / 9, 5 / 9]
pesos[3] = [(18 + np.sqrt(30)) / (36), (18 + np.sqrt(30)) / (36), (18 - np.sqrt(30)) / (36),
(18 - np.sqrt(30)) / (36)]
pesos[4] = [(322 + 13 * np.sqrt(70)) / (900), (322 + 13 * np.sqrt(70)) / (900), 128 / 225,
(322 - 13 * np.sqrt(70)) / (900), (322 - 13 * np.sqrt(70)) / (900)]
pesos[5] = [0.4679139346, 0.4679139346, 0.3607615730, 0.3607615730, 0.1713244924, 0.1713244924]
puntos = [0, 0, 0, 0, 0, 0]
"""Diccionario que contiene los puntos de la cuadratura de gauss para
evaluarlos, cada indice corresponde a una lista de los numeros de
gauss a ser evaluados, por ejemplo, si se escribe FEMSections.puntos[2]
se obtienen los puntos correspondientes a integrar con 3 puntos
en la cuadratura de gauss
"""
puntos[0] = [0]
puntos[1] = [-np.sqrt(1 / 3), np.sqrt(1 / 3)]
puntos[2] = [-np.sqrt(3 / 5), 0, np.sqrt(3 / 5)]
puntos[3] = [-np.sqrt(3 / 7 - 2 / 7 * np.sqrt(6 / 5)), np.sqrt(3 / 7 - 2 / 7 * np.sqrt(6 / 5)),
-np.sqrt(3 / 7 + 2 / 7 * np.sqrt(6 / 5)), np.sqrt(3 / 7 + 2 / 7 * np.sqrt(6 / 5))]
puntos[4] = [-1 / 3 * np.sqrt(5 - 2 * np.sqrt(10 / 7)), 1 / 3 * np.sqrt(5 - 2 * np.sqrt(10 / 7)), 0,
-1 / 3 * np.sqrt(5 + 2 * np.sqrt(10 / 7)), 1 / 3 * np.sqrt(5 + 2 * | np.sqrt(10 / 7) | numpy.sqrt |
import numpy as np
import scipy.sparse as sparse
from tqdm import tqdm
def compute_normals(vertices, faces):
"""
Compute normals of a triangular mesh
Parameters
-----------------------------
vertices : (n,3) array of vertices coordinates
faces : (m,3) array of vertex indices defining faces
Output
-----------------------------
normals : (m,3) array of normalized per-face normals
"""
v1 = vertices[faces[:, 0]]
v2 = vertices[faces[:, 1]]
v3 = vertices[faces[:, 2]]
normals = np.cross(v2-v1, v3-v1)
normals /= np.linalg.norm(normals, axis=1, keepdims=True)
return normals
def compute_faces_areas(vertices, faces):
"""
Compute per-face areas of a triangular mesh
Parameters
-----------------------------
vertices : (n,3) array of vertices coordinates
faces : (m,3) array of vertex indices defining faces
Output
-----------------------------
faces_areas : (m,) array of per-face areas
"""
v1 = vertices[faces[:,0]] # (m,3)
v2 = vertices[faces[:,1]] # (m,3)
v3 = vertices[faces[:,2]] # (m,3)
faces_areas = 0.5 * np.linalg.norm(np.cross(v2-v1,v3-v1),axis=1) # (m,)
return faces_areas
def compute_vertex_areas(vertices, faces, faces_areas=None):
"""
Compute per-vertex areas of a triangular mesh.
Area of a vertex, approximated as one third of the sum of the area
of its adjacent triangles
Parameters
-----------------------------
vertices : (n,3) array of vertices coordinates
faces : (m,3) array of vertex indices defining faces
Output
-----------------------------
vert_areas : (n,) array of per-vertex areas
"""
N = vertices.shape[0]
if faces_areas is None:
faces_areas = compute_faces_areas(vertices,faces) # (m,)
# This is way faster than using np.add.at for some reason...
I = np.concatenate([faces[:,0], faces[:,1], faces[:,2]])
J = np.zeros_like(I)
V = np.concatenate([faces_areas, faces_areas, faces_areas])/3
# Get the (n,) array of vertex areas
vertex_areas = np.array(sparse.coo_matrix((V, (I, J)), shape=(N, 1)).todense()).flatten()
return vertex_areas
def neigh_faces(faces):
"""
Return the indices of neighbor faces for each vertex. This supposed all vertices appear in
the face list.
Parameters
--------------------
faces : (m,3) list of faces
Output
--------------------
neighbors : (n,) list of indices of neighbor faces for each vertex
"""
n_vertices = 1+faces.max()
neighbors = [[] for i in range(n_vertices)]
for face_ind, (i,j,k) in enumerate(faces):
neighbors[i].append(face_ind)
neighbors[j].append(face_ind)
neighbors[k].append(face_ind)
neighbors = [np.unique(x) for x in neighbors]
return neighbors
def per_vertex_normal(vertices, faces, face_normals=None):
"""
Computes per-vertex normals as an average of adjacent face normals.
Parameters
--------------------
vertices : (n,3) coordinates of vertices
faces : (m,3) faces defined as indices of vertices
face_normals : (m,3) per-face normals (optional)
Output
--------------------
vert_normals : (n,3) array of per-vertex normals
"""
if face_normals is None:
face_normals = compute_normals(vertices, faces) # (m,3)
vert2faces = neigh_faces(faces) # (n, p_i)
vert_normals = np.array([face_normals[vert2faces[i]].mean(0) for i in range(vertices.shape[0])])
vert_normals /= np.linalg.norm(vert_normals, axis=1, keepdims=True)
return vert_normals
def edges_from_faces(faces):
"""
Compute all edges in the mesh
Parameters
--------------------------------
faces : (m,3) array defining faces with vertex indices
Output
--------------------------
edges : (p,2) array of all edges defined by vertex indices
with no particular order
"""
# Number of verties
N = 1 + np.max(faces)
# Use a sparse matrix and find non-zero elements
I = np.concatenate([faces[:,0], faces[:,1], faces[:,2]])
J = np.concatenate([faces[:,1], faces[:,2], faces[:,0]])
V = np.ones_like(I)
M = sparse.coo_matrix((V, (I, J)), shape=(N, N))
inds1,inds2 = M.nonzero() # (p,), (p,)
edges = np.hstack([inds1[:,None], inds2[:,None]])
edges = np.sort(edges, axis=1)
return np.unique(edges, axis=0)
def geodesic_distmat(vertices, faces):
"""
Compute geodesic distance matrix using Dijkstra algorithm.
"""
N = vertices.shape[0]
edges = edges_from_faces(faces)
I = edges[:, 0] # (p,)
J = edges[:, 1] # (p,)
V = np.linalg.norm(vertices[J] - vertices[I], axis=1) # (p,)
In = np.concatenate([I, J])
Jn = np.concatenate([J, I])
Vn = np.concatenate([V, V])
graph = sparse.coo_matrix((Vn, (In, Jn)), shape=(N, N)).tocsc()
geod_dist = sparse.csgraph.dijkstra(graph)
return geod_dist
def heat_geodesic_from(inds, vertices, faces, normals, A, W=None, t=1e-3, face_areas=None,
vert_areas=None, grads=None, solver_heat=None, solver_lap=None):
"""
Computes geodesic distances between vertices of index inds and all other vertices
using the Heat Method
Parameters
-------------------------
inds : int or (p,) array of ints - index of the source vertex (or vertices)
vertices : (n,3) vertices coordinates
faces : (m,3) triangular faces defined by 3 vertices index
normals : (m,3) per-face normals
A : (n,n) sparse - area matrix of the mesh so that the laplacian L = A^-1 W
W : (n,n) sparse - stiffness matrix so that the laplacian L = A^-1 W.
Optional if solvers are given !
t : float - time parameter for which to solve the heat equation
face_area : (m,) - Optional, array of per-face area, for faster computation
vert_areas : (n,) - Optional, array of per-vertex area, for faster computation
solver_heat : callable -Optional, solver for (A + tW)x = b given b
solver_lap : callable -Optional, solver for Wx = b given b
"""
n_vertices = vertices.shape[0]
n_inds = len(inds) if type(inds) in [np.ndarray, list] else 1
if face_areas is None:
face_areas = compute_faces_areas(vertices, faces)
if vert_areas is None:
vert_areas = compute_vertex_areas(vertices, faces)
if grads is None:
grads = _get_grad_dir(vertices, faces, normals, face_areas=face_areas) # (3,m,3)
# grads = None
# Define the dirac function d on the given index. Not that the area normalization
# will be simplified later on so this is actually A*d with A the area matrix
delta = np.zeros((n_vertices, n_inds)) # (n,p)
delta[(inds,np.arange(n_inds))] = 1 # works even if inds is an int
delta = delta.squeeze() # (n,) if n_inds is 1
# Solve (I + tL)u = d. Actually (A + tW)u = Ad
if solver_heat is not None:
u = solver_heat(delta)
else:
u = sparse.linalg.spsolve(A + t*W, delta) # (n,) or (n,p)
# Compute and normalize the gradient of the solution
g = grad_f(u, vertices, faces, normals, face_areas=face_areas, grads=grads) # (m,3) or (m,p,3)
h = - g / np.linalg.norm(g, axis=-1, keepdims=True) # (m,3) or (m,p,3)
# Solve L*phi = div(h). Actually W*phi = A*div(h)
div_h = div_f(h, vertices, faces, normals, vert_areas=vert_areas, grads=grads) # (n,) or (n,p)
if solver_lap is not None:
phi = solver_lap(A@div_h) # (n,) or (n,p)
else:
phi = sparse.linalg.spsolve(W, A @ div_h) # (n,) or (n,p)
# Phi is defined up to an additive constant. Minimum distance is 0
phi -= np.min(phi, axis=0, keepdims=True) # (n,) or (n,p)
if n_inds > 1:
phi[(inds, np.arange(n_inds))] = 0
else:
phi[inds] = 0
return phi.squeeze()
def heat_geodmat(vertices, faces, normals, A, W, t=1e-3, face_areas=None, vert_areas=None,
batch_size=None, verbose=False):
"""
Computes geodesic distances between all pairs of vertices using the Heat Method
Parameters
-------------------------
vertices : (n,3) vertices coordinates
faces : (m,3) triangular faces defined by 3 vertices index
normals : (m,3) per-face normals
A : (n,n) sparse - area matrix of the mesh so that the laplacian L = A^-1 W
W : (n,n) sparse - stiffness matrix so that the laplacian L = A^-1 W
t : float - time parameter for which to solve the heat equation
face_areas : (m,) - Optional, array of per-face area, for faster computation
vert_areas : (n,) - Optional, array of per-vertex area, for faster computation
batch_size : int - size of batches to use for computation. None means full shape
"""
n_vertices = vertices.shape[0]
if face_areas is None:
face_areas = compute_faces_areas(vertices, faces)
if vert_areas is None:
vert_areas = compute_vertex_areas(vertices, faces, face_areas)
# Prefactor linear systems
solver_heat = sparse.linalg.factorized(A.tocsc() + t * W)
solver_lap = sparse.linalg.factorized(W)
# Precompute gradient directions for each shapes
grads = _get_grad_dir(vertices, faces, normals, face_areas=face_areas) # (3,m,3)
batch_size = n_vertices if batch_size is None else batch_size
n_batches = n_vertices // batch_size + int(n_vertices % batch_size > 0)
distmat = np.zeros((n_vertices, n_vertices))
if verbose:
ind_list = tqdm(range(n_batches))
else:
ind_list = range(n_batches)
for batch_ind in ind_list:
# Handle batch size of 1 (and possibly the last batcg of size 1)
if batch_size > 1:
batch = np.arange(batch_ind*batch_size, min(n_vertices, (1 + batch_ind) * batch_size))
else:
batch = batch_ind
if batch_ind == n_batches - 1 and n_vertices % batch_size == 1:
batch = batch[0]
distmat[:,batch] = heat_geodesic_from(batch, vertices, faces, normals, A, W=None, t=t,
face_areas=face_areas, vert_areas=vert_areas, grads=grads,
solver_heat=solver_heat, solver_lap=solver_lap)
return distmat
def farthest_point_sampling(d, k, random_init=True, n_points=None):
"""
Samples points using farthest point sampling using either a complete distance matrix
or a function giving distances to a given index i
Parameters
-------------------------
d : (n,n) array or callable - Either a distance matrix between points or
a function computing geodesic distance from a given index.
k : int - number of points to sample
random_init : Whether to sample the first point randomly or to take the furthest away
from all the other ones. Only used if d is a distance matrix
n_points : In the case where d is callable, specifies the size of the output
Output
--------------------------
fps : (k,) array of indices of sampled points
"""
if callable(d):
return farthest_point_sampling_call(d, k, n_points=n_points)
else:
if d.shape[0] != d.shape[1]:
raise ValueError(f"D should be a n x n matrix not a {d.shape[0]} x {d.shape[1]}")
return farthest_point_sampling_distmat(d, k, random_init=random_init)
def farthest_point_sampling_distmat(D, k, random_init=True):
"""
Samples points using farthest point sampling using a complete distance matrix
Parameters
-------------------------
D : (n,n) distance matrix between points
k : int - number of points to sample
random_init : Whether to sample the first point randomly or to
take the furthest away from all the other ones
Output
--------------------------
fps : (k,) array of indices of sampled points
"""
if random_init:
rng = np.random.default_rng()
inds = [rng.integers(D.shape[0])]
else:
inds = [np.argmax(D.sum(1))]
dists = D[inds[0]]
for _ in range(k-1):
newid = np.argmax(dists)
inds.append(newid)
dists = np.minimum(dists, D[newid])
return np.asarray(inds)
def farthest_point_sampling_call(d_func, k, n_points=None, verbose=False):
"""
Samples points using farthest point sampling, initialized randomly
Parameters
-------------------------
d_func : callable - for index i, d_func(i) is a (n_points,) array of geodesic distance to
other points
k : int - number of points to sample
n_points : Number of points. If not specified, checks d_func(0)
Output
--------------------------
fps : (k,) array of indices of sampled points
"""
rng = np.random.default_rng()
if n_points is None:
n_points = d_func(0).shape
else:
assert n_points > 0
inds = [rng.integers(n_points)]
dists = d_func(inds[0])
iterable = range(k-1) if not verbose else tqdm(range(k))
for i in iterable:
if i == k-1:
continue
newid = np.argmax(dists)
inds.append(newid)
dists = np.minimum(dists, d_func(newid))
return np.asarray(inds)
def _get_grad_dir(vertices, faces, normals, face_areas=None):
"""
Compute the gradient directions for each face using linear interpolationof the hat
basis on each face.
Parameters
--------------------------
vertices : (n,3) coordinates of vertices
faces : (m,3) indices of vertices for each face
normals : (m,3) normals coordinate for each face
face_areas : (m,) - Optional, array of per-face area, for faster computation
Output
--------------------------
grads : (3,m,3) array of per-face gradients.
"""
v1 = vertices[faces[:, 0]] # (m,3)
v2 = vertices[faces[:, 1]] # (m,3)
v3 = vertices[faces[:, 2]] # (m,3)
if face_areas is None:
face_areas = 0.5 * np.linalg.norm(np.cross(v2-v1, v3-v1), axis=1) # (m,)
grad1 = np.cross(normals, v3-v2)/(2*face_areas[:, None]) # (m,3)
grad2 = np.cross(normals, v1-v3)/(2*face_areas[:, None]) # (m,3)
grad3 = np.cross(normals, v2-v1)/(2*face_areas[:, None]) # (m,3)
return | np.asarray([grad1, grad2, grad3]) | numpy.asarray |
# Copyright 2017 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Given multimodal queries, complete the outfit wiht bi-LSTM and VSE model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import os
import pickle as pkl
import tensorflow as tf
import numpy as np
import configuration
import polyvore_model_bi as polyvore_model
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", "",
"Model checkpoint file or directory containing a "
"model checkpoint file.")
tf.flags.DEFINE_string("image_dir", "", "Directory containing images.")
tf.flags.DEFINE_string("feature_file", "", "File which contains the features.")
tf.flags.DEFINE_string("word_dict_file", "", "File containing word list.")
tf.flags.DEFINE_string("query_file", "",
"A json file containing the query to generate outfit.")
tf.flags.DEFINE_string("result_dir", "results",
"Directory to save the results.")
tf.flags.DEFINE_float("balance_factor", 2.0,
"Trade off between image and text input."
"Larger balance_factor encourages higher correlation with text query")
def norm_row(a):
"""L2 normalize each row of a given set."""
try:
return a / np.linalg.norm(a, axis=1)[:, np.newaxis]
except:
return a / np.linalg.norm(a)
def rnn_one_step(sess, input_feed, lstm_state, direction='f'):
"""Run one step of the RNN."""
if direction == 'f':
# Forward
[lstm_state, lstm_output] = sess.run(
fetches=['lstm/f_state:0', 'f_logits/f_logits/BiasAdd:0'],
feed_dict={'lstm/f_input_feed:0': input_feed,
'lstm/f_state_feed:0': lstm_state})
else:
# Backward
[lstm_state, lstm_output] = sess.run(
fetches=['lstm/b_state:0', 'b_logits/b_logits/BiasAdd:0'],
feed_dict={'lstm/b_input_feed:0': input_feed,
'lstm/b_state_feed:0': lstm_state})
return lstm_state, lstm_output
def run_forward_rnn(sess, test_idx, test_feat, num_lstm_units):
""" Run forward RNN given a query."""
res_set = []
lstm_state = np.zeros([1, 2 * num_lstm_units])
for test_id in test_idx:
input_feed = np.reshape(test_feat[test_id], [1, -1])
# Run first step with all zeros initial state.
[lstm_state, lstm_output] = rnn_one_step(
sess, input_feed, lstm_state, direction='f')
# Maximum length of the outfit is set to 10.
for step in range(3):
curr_score = np.exp(np.dot(lstm_output, np.transpose(test_feat)))
curr_score /= np.sum(curr_score)
next_image = np.argsort(-curr_score)[0][0]
# 0.00001 is used as a probablity threshold to stop the generation.
# i.e, if the prob of end-of-set is larger than 0.00001, then stop.
if next_image == test_feat.shape[0] - 1 or curr_score[0][-1] > 0.001:
# print('OVER')
break
else:
input_feed = np.reshape(test_feat[next_image], [1, -1])
[lstm_state, lstm_output] = rnn_one_step(
sess, input_feed, lstm_state, direction='f')
res_set.append(next_image)
return res_set
def run_backward_rnn(sess, test_idx, test_feat, num_lstm_units):
""" Run backward RNN given a query."""
res_set = []
lstm_state = np.zeros([1, 2 * num_lstm_units])
for test_id in reversed(test_idx):
input_feed = np.reshape(test_feat[test_id], [1, -1])
[lstm_state, lstm_output] = rnn_one_step(
sess, input_feed, lstm_state, direction='b')
for step in range(10):
curr_score = np.exp(np.dot(lstm_output, np.transpose(test_feat)))
curr_score /= np.sum(curr_score)
next_image = np.argsort(-curr_score)[0][0]
# 0.00001 is used as a probablity threshold to stop the generation.
# i.e, if the prob of end-of-set is larger than 0.00001, then stop.
if next_image == test_feat.shape[0] - 1 or curr_score[0][-1] > 0.001:
# print('OVER')
break
else:
input_feed = np.reshape(test_feat[next_image], [1, -1])
[lstm_state, lstm_output] = rnn_one_step(
sess, input_feed, lstm_state, direction='b')
res_set.append(next_image)
return res_set
def run_fill_rnn(sess, start_id, end_id, num_blank, test_feat, num_lstm_units):
"""Fill in the blanks between start and end."""
if num_blank == 0:
return [start_id, end_id]
lstm_f_outputs = []
lstm_state = np.zeros([1, 2 * num_lstm_units])
input_feed = np.reshape(test_feat[start_id], [1, -1])
[lstm_state, lstm_output] = rnn_one_step(
sess, input_feed, lstm_state, direction='f')
f_outputs = []
for i in range(num_blank):
f_outputs.append(lstm_output[0])
curr_score = np.exp(np.dot(lstm_output, np.transpose(test_feat)))
curr_score /= | np.sum(curr_score) | numpy.sum |
import numpy as np
import time
import h5py
import matplotlib.pyplot as plt
import scipy
from sklearn.model_selection import train_test_split
np.random.seed(1)
def sigmoid(Z):
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def sigmoid_backward(dA, cache):
Z = cache
s = 1/(1+np.exp(-Z))
dA = np.squeeze(np.asarray(dA))
s = np.squeeze(np.asarray(s))
dZ = dA * s * (1-s)
if (Z.shape[0] == 1):
dZ = dZ.reshape((1, dZ.shape[0]))
assert (dZ.shape == Z.shape)
return dZ
def relu(Z):
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
Z = cache
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def initialize_parameters_deep(layer_dims):
np.random.seed(5)
parameters = {}
L = len(layer_dims)
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
# print(parameters)
return parameters
def linear_forward(A, W, b):
Z = np.dot(W, A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
if activation == "sigmoid":
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
caches = []
A = X
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], "sigmoid")
caches.append(cache)
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], "sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
m = Y.shape[1]
# Compute loss from aL and y.
cost = -np.sum(np.multiply(np.log(AL),Y) + np.multiply(np.log(1 - AL), 1 - Y)) / m
cost = np.squeeze(cost)
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = np.dot(dZ, A_prev.T) / m
db = np.sum(dZ, axis=1, keepdims=True) / m
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape)
dAL = - (np.divide(Y, AL) - | np.divide(1 - Y, 1 - AL) | numpy.divide |
from .Ball3D import Ball3D
import numpy as np
from numpy.matlib import repmat
from numpy import zeros, eye, ones, matrix
from numpy import cos, sin, arccos, sqrt, pi, arctan2
class HumanBall3D(Ball3D):
"""
This the passive human 3D ball model. We assume a ball is control by human.
The human has no response to the robot.
"""
def __init__(self, agent, dT, auto = True, init_state=[5,5,0,0,0,0]):
self.max_a = 4
Ball3D.__init__(self, agent, dT, auto, init_state)
self.RLS_cache['pred_delay'] = 3
self.RLS_cache['A'] = self.A()
self.RLS_cache['B'] = matrix(zeros((6, self.RLS_cache['pred_delay'] * 6 + 6)))
self.RLS_cache['F'] = matrix(zeros((20, 20)))
self.RLS_cache['alpha'] = 0.8
self.RLS_cache['lambd'] = 0.98
def rls_estimate_state(self, obstacle):
u_cl = np.vstack([obstacle.m_his[:,-1], obstacle.m_his[:,-2], obstacle.m_his[:,-3], self.goal])
phi = np.vstack([self.x_est, u_cl])
A = self.RLS_cache['A']
B = self.RLS_cache['B']
F = self.RLS_cache['F']
alpha = self.RLS_cache['alpha']
lambd = self.RLS_cache['lambd']
#\hat x(k|k-1)
x_pred = A * self.x_est + B * u_cl
#\hat x(k|k)
x_est = (1 - alpha) * x_pred + alpha * self.observe(self.x)
F = 1/lambd * ( F - (F * phi * phi.T * F) / (lambd + phi.T * F * phi))
AB = | np.hstack([A, B]) | numpy.hstack |
import cv2
import numpy as np
import dito.core
# often-used constants
sqrt_05 = np.sqrt(0.5)
def draw_circle(image, center, radius, color, thickness, line_type, start_angle=None, end_angle=None):
"""
TODO: fix round corners when using start_angle/end_angle and thickness != cv2.FILLED
"""
if (start_angle is None) and (end_angle is None):
cv2.circle(img=image, center=dito.core.tir(center), radius=radius, color=color, thickness=thickness, lineType=line_type)
else:
if start_angle is None:
start_angle = 0.0
if end_angle is None:
end_angle = 360.0
cv2.ellipse(img=image, center=dito.core.tir(center), axes=(radius, radius), angle=0.0, startAngle=start_angle, endAngle=end_angle, color=color, thickness=thickness, lineType=line_type)
def draw_ring(image, center, radius1, radius2, color, thickness, line_type, start_angle=None, end_angle=None):
if thickness == cv2.FILLED:
# draw circle outline with thickness equal to the radius difference
circle_radius = (radius1 + radius2) // 2
circle_thickness = abs(radius1 - radius2)
draw_circle(image=image, center=center, radius=circle_radius, color=color, thickness=circle_thickness, line_type=line_type, start_angle=start_angle, end_angle=end_angle)
else:
# draw two circles
draw_circle(image=image, center=center, radius=radius1, color=color, thickness=thickness, line_type=line_type, start_angle=start_angle, end_angle=end_angle)
draw_circle(image=image, center=center, radius=radius2, color=color, thickness=thickness, line_type=line_type, start_angle=start_angle, end_angle=end_angle)
def draw_polygon(image, points, color, thickness, line_type):
points_int = np.round(np.array(points)).astype(np.int32)
if thickness == cv2.FILLED:
cv2.fillPoly(img=image, pts=[points_int], color=color, lineType=line_type)
else:
cv2.polylines(img=image, pts=[points_int], isClosed=True, color=color, thickness=thickness, lineType=line_type)
def draw_regular_polygon(image, point_count, position, radius, color, thickness, line_type, angle_offset=0.0):
(x, y) = position
points = []
for angle in np.linspace(start=0.0, stop=2.0 * np.pi, num=point_count, endpoint=False):
points.append([
radius * np.cos(angle + angle_offset) + x,
radius * np.sin(angle + angle_offset) + y,
])
draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)
def draw_regular_star(image, point_count, position, radius_outer, radius_inner, color, thickness, line_type, angle_offset=0.0):
(x, y) = position
points = []
for (n_point, angle) in enumerate(np.linspace(start=0.0, stop=2.0 * np.pi, num=2 * point_count, endpoint=False)):
radius = radius_outer if (n_point % 2) == 0 else radius_inner
points.append([
radius * np.cos(angle + angle_offset) + x,
radius * np.sin(angle + angle_offset) + y,
])
draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)
def draw_regular_skeleton(image, point_count, position, radius, color, thickness, line_type, angle_offset=0.0):
thickness = 1 if thickness == cv2.FILLED else thickness
(x, y) = position
for angle in | np.linspace(start=0.0, stop=2.0 * np.pi, num=point_count, endpoint=False) | numpy.linspace |
import numpy as np
import logging
import pathlib
import xml.etree.ElementTree as ET
import cv2
import os
class VOCCOCODataset:
def __init__(self, root, transform=None, target_transform=None, is_test=False, keep_difficult=False, label_file=None):
"""Dataset for VOC data.
Args:
root: the root of the VOC2007 or VOC2012 dataset, the directory contains the following sub-directories:
Annotations, ImageSets, JPEGImages, SegmentationClass, SegmentationObject.
"""
self.root = pathlib.Path(root)
self.transform = transform
self.target_transform = target_transform
self.is_test = is_test
if is_test:
image_sets_file = self.root / "ImageSets/Main/test.txt"
else:
image_sets_file = self.root / "ImageSets/Main/trainval.txt"
# if the labels file exists, read in the class names
label_file_name = self.root / "labels.txt"
if os.path.isfile(label_file_name):
class_string = ""
with open(label_file_name, 'r') as infile:
for line in infile:
class_string += line.rstrip()
# classes should be a comma separated list
classes = class_string.split(',')
# prepend BACKGROUND as first class
classes.insert(0, 'BACKGROUND')
classes = [ elem.replace(" ", "") for elem in classes]
self.class_names = tuple(classes)
logging.info("VOC Labels read from file: " + str(self.class_names))
else:
logging.info("No labels file, using default VOC classes.")
# self.class_names = ('BACKGROUND',
# 'aeroplane', 'bicycle', 'bird', 'boat',
# 'bottle', 'bus', 'car', 'cat', 'chair',
# 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant',
# 'sheep', 'sofa', 'train', 'tvmonitor')
self.class_names = ('BACKGROUND',
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
self.ids = self._read_image_ids(image_sets_file)
self.keep_difficult = keep_difficult
self.class_dict = {class_name: i for i, class_name in enumerate(self.class_names)}
def __getitem__(self, index):
image_id = self.ids[index]
boxes, labels, is_difficult = self._get_annotation(image_id)
if not self.keep_difficult:
boxes = boxes[is_difficult == 0]
labels = labels[is_difficult == 0]
image = self._read_image(image_id)
if self.transform:
# print(image_id)
image, boxes, labels = self.transform(image, boxes, labels)
if self.target_transform:
boxes, labels = self.target_transform(boxes, labels)
return image, boxes, labels
def get_image(self, index):
image_id = self.ids[index]
image = self._read_image(image_id)
if self.transform:
image, _ = self.transform(image)
return image
def get_annotation(self, index):
image_id = self.ids[index]
return image_id, self._get_annotation(image_id)
def __len__(self):
return len(self.ids)
def _read_image_ids(self, image_sets_file):
valid_inds = []
with open(image_sets_file) as f:
for line in f:
image_id = line.rstrip()
if not self.is_test: # filter images without gt_bbox
annotation_file = self.root / f"Annotations/{image_id}.xml"
objects = ET.parse(annotation_file).findall("object")
for object in objects:
_class_name = object.find('name')
if _class_name is not None:
class_name = _class_name.text.lower().strip()
if class_name in self.class_names:
valid_inds.append(image_id)
break
else:
valid_inds.append(image_id)
return valid_inds
# def _filter_imgs(self):
# """Filter images without annotation."""
# valid_inds = []
# for i, img_info in enumerate(self.data_infos):
# img_id = img_info['id']
# xml_path = os.path.join(self.img_prefix, 'Annotations',
# f'{img_id}.xml')
# tree = ET.parse(xml_path)
# root = tree.getroot()
# for obj in root.findall('object'):
# name = obj.find('name').text
# if name in self.CLASSES:
# valid_inds.append(i)
# break
# return valid_inds
def _get_annotation(self, image_id):
annotation_file = self.root / f"Annotations/{image_id}.xml"
objects = ET.parse(annotation_file).findall("object")
boxes = []
labels = []
is_difficult = []
for object in objects:
_class_name = object.find('name')
if _class_name is not None:
class_name = _class_name.text.lower().strip()
else:
continue
# we're only concerned with clases in our list
if class_name in self.class_dict:
bbox = object.find('bndbox')
# VOC dataset format follows Matlab, in which indexes start from 0
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
boxes.append([x1, y1, x2, y2])
labels.append(self.class_dict[class_name])
is_difficult_str = object.find('difficult').text
is_difficult.append(int(is_difficult_str) if is_difficult_str else 0)
return ( | np.array(boxes, dtype=np.float32) | numpy.array |
import arrayfire as af
from arrayfire.library import Dtype
import collections
import math
import numpy as np
from types import ModuleType
import typing as tp
from ._arith import \
exp, \
log, \
minimum, \
sqrt, \
minimum
from ._array import asscalar, ndarray
from ._conversion import \
convert_numpy_to_af_type, \
convert_af_to_numpy_type
from cocos.numerics.linalg import cholesky
from cocos.options import \
GPUOptions, \
RandomNumberGenerator
SIZE_TYPE = tp.Optional[tp.Union[int, tp.Sequence]]
def map_rng_to_random_engine(rng: RandomNumberGenerator):
if rng == RandomNumberGenerator.PHILOX_4X32_10:
return af.random.RANDOM_ENGINE.PHILOX_4X32_10
elif rng == RandomNumberGenerator.THREEFRY_2X32_16:
return af.random.RANDOM_ENGINE.THREEFRY_2X32_16
elif rng == RandomNumberGenerator.MERSENNE_GP11213:
return af.random.RANDOM_ENGINE.MERSENNE_GP11213
elif rng == RandomNumberGenerator.PHILOX:
return af.random.RANDOM_ENGINE.PHILOX
elif rng == RandomNumberGenerator.THREEFRY:
return af.random.RANDOM_ENGINE.THREEFRY
elif rng == RandomNumberGenerator.DEFAULT:
return af.random.RANDOM_ENGINE.DEFAULT
else:
raise ValueError("The requested random number generator "
"is not supported.")
# initialized default random number engine
random_engine \
= af.random.Random_Engine(
engine_type=map_rng_to_random_engine(GPUOptions.default_rng))
################################################################################
# functions to get and set the seed
################################################################################
def seed(seed: tp.Optional[int] = None):
"""
Seed the generator.
"""
if seed is None:
seed = 0
af.set_seed(seed)
def get_seed() -> int:
"""
Returns the current seed of the generator.
"""
return af.get_seed()
################################################################################
# supporting functions for antithetic random numbers
################################################################################
def get_antithetic_slices(shape: tp.Sequence[int],
antithetic_dimension: int) \
-> tp.Tuple[slice, ...]:
"""
This function generates a tuple of slices to index the original array of
random numbers to take either half or one less than half of the the original
random numbers along the antithetic dimension and all of the random numbers
along the other dimensions.
"""
slices = []
for axis, dimension in enumerate(shape):
if axis == antithetic_dimension:
s = slice(0, math.floor(dimension / 2), 1)
else:
s = slice(0, dimension, 1)
slices.append(s)
return tuple(slices)
def verify_shape_and_antithetic_dimension(shape: tp.Sequence[int],
antithetic_dimension: tp.Optional[
int] = None):
"""
This function makes sure that the length shape argument is between 1 and 4
and checks that the antithetic dimension is one of the dimensions in the
shape argument.
"""
if len(shape) > 4:
raise ValueError('arrays with more than 4 axes are not supported')
if len(shape) < 1:
raise ValueError('array must have at least one axis')
if antithetic_dimension < 0 or antithetic_dimension > len(shape) - 1:
raise ValueError(
f'antithetic dimension must be None or between 0 and {len(shape)}')
################################################################################
# Basic continuous random number generators
################################################################################
def rand(d0: int,
d1: tp.Optional[int] = None,
d2: tp.Optional[int] = None,
d3: tp.Optional[int] = None,
dtype: np.generic = np.float32) -> ndarray:
"""
Random values in a given shape.
"""
af_type = convert_numpy_to_af_type(dtype)
af_array = af.data.randu(d0, d1, d2, d3, dtype=af_type)
return ndarray(af_array)
def randn(d0: int,
d1: tp.Optional[int] = None,
d2: tp.Optional[int] = None,
d3: tp.Optional[int] = None,
dtype: np.generic = np.float32):
"""
Return a sample (or samples) from the “standard normal” distribution.
"""
af_type = convert_numpy_to_af_type(dtype)
af_array = af.data.randn(d0, d1, d2, d3, dtype=af_type)
return ndarray(af_array)
def _random_with_dtype_internal(shape: tp.Sequence[int],
rng_function: tp.Callable,
dtype: np.generic = np.float32,
num_pack: ModuleType = np):
draw_shape = list(shape)
if num_pack == np:
x = rng_function(*draw_shape)
if x.dtype != dtype:
x = x.astype(dtype)
else:
x = rng_function(*draw_shape, dtype=dtype)
return x
def rand_with_dtype(shape: tp.Sequence[int],
dtype: np.generic = np.float32,
num_pack: ModuleType = np):
return _random_with_dtype_internal(shape=shape,
rng_function=num_pack.random.rand,
dtype=dtype,
num_pack=num_pack)
def randn_with_dtype(shape: tp.Sequence[int],
dtype: np.generic = np.float32,
num_pack: ModuleType = np):
return _random_with_dtype_internal(shape=shape,
rng_function=num_pack.random.randn,
dtype=dtype,
num_pack=num_pack)
def randn_antithetic(shape: tp.Sequence[int],
antithetic_dimension: tp.Optional[int] = None,
dtype: np.generic = np.float32,
num_pack: ModuleType = np):
verify_shape_and_antithetic_dimension(shape, antithetic_dimension)
draw_shape = list(shape)
if antithetic_dimension is not None:
# adjust dimension over which antithetic random numbers are to be drawn
draw_shape[antithetic_dimension] \
= math.ceil(shape[antithetic_dimension] / 2)
# draw original random numbers
if num_pack == np:
z = num_pack.random.randn(*draw_shape)
if z.dtype != dtype:
z = z.astype(dtype)
else:
z = num_pack.random.randn(*draw_shape, dtype=dtype)
if antithetic_dimension is not None:
# reflect random numbers at 0 and concatenate to original random numbers
slices = get_antithetic_slices(shape, antithetic_dimension)
z = num_pack.concatenate((z, -z[slices]),
axis=antithetic_dimension)
return z
def rand_antithetic(shape: tp.Sequence[int],
antithetic_dimension: tp.Optional[int] = None,
dtype: np.generic = np.float32,
num_pack: ModuleType = np):
verify_shape_and_antithetic_dimension(shape, antithetic_dimension)
draw_shape = list(shape)
if antithetic_dimension is not None:
# adjust dimension over which antithetic random numbers are to be drawn
draw_shape[antithetic_dimension] \
= math.ceil(shape[antithetic_dimension] / 2)
# draw original random numbers
if num_pack == np:
u = num_pack.random.rand(*draw_shape)
if u.dtype != dtype:
u = u.astype(dtype)
else:
u = num_pack.random.rand(*draw_shape, dtype=dtype)
if antithetic_dimension is not None:
# reflect random numbers at 0 and concatenate to original random numbers
slices = get_antithetic_slices(shape, antithetic_dimension)
u = num_pack.concatenate((u, 1.0 - u[slices]),
axis=antithetic_dimension)
return u
################################################################################
# Basic discrete random number generators
################################################################################
def randint(low: int,
high: tp.Optional[int] = None,
size: tp.Optional[tp.Union[tp.Tuple[int, ...], int]] = None,
dtype: np.generic = np.int32) \
-> ndarray:
"""
Draws an array of random integers ranging from low to high-1 of the
specified shape.
:param low: lowest number to draw
:param high: highest integer to draw (excluding)
:param size: shape of output array
:param dtype: data type of integer to be generated
:return: an ndarray of random integers
"""
if not high:
high = low
low = 0
if not size:
size = (1,)
elif isinstance(size, int):
size = (size,)
n = np.prod(size)
divisor = 1.0 / (high - low)
u = rand(n)
u = minimum(u, 1.0 - np.finfo(np.float32).eps)
if dtype != np.int32:
i = (u / divisor).astype(np.int64) + low
i = i.astype(dtype)
else:
i = (u / divisor).astype(np.int32) + low
return i.reshape(size)
def choice(a: ndarray,
size: tp.Optional[tp.Union[tp.Tuple[int, ...], int]] = None,
replace: bool = True,
p: tp.Optional[ndarray] = None) -> ndarray:
if p:
raise ValueError('p != None is not supported')
if not replace:
raise ValueError('replace=False is not supported')
i = randint(0, a.size, size=size)
if not isinstance(size, int):
return a[i].reshape(size)
else:
return a[i]
def _draw_and_reshape(size: SIZE_TYPE,
rng_func: tp.Callable[[int], ndarray]) \
-> ndarray:
if not size:
n = 1
elif isinstance(size, int):
n = size
elif isinstance(size, (list, tuple)):
n = np.prod(size)
else:
raise TypeError("size must be either of type int or tuple")
random_numbers = rng_func(n)
if size is None:
random_numbers = asscalar(random_numbers)
elif not isinstance(size, int):
random_numbers = random_numbers.reshape(size)
return random_numbers
def uniform(low: float = 0.0,
high: float = 1.0,
size: tp.Optional[SIZE_TYPE] = None):
"""
Draw samples from a uniform distribution.
"""
if high < low:
raise ValueError("high must not be less than low")
u = _draw_and_reshape(size, rand)
return u * (high - low) + low
def _exponential_internal(scale: float,
n: int,
antithetic: bool = False) -> ndarray:
u = rand(n)
u = minimum(u, 1.0 - np.finfo(np.float32).eps)
x: ndarray = log(1.0 - u) * (-scale)
return x
def exponential(scale: float=1.0,
size: tp.Optional[SIZE_TYPE] = None,
antithethic: bool = False) -> ndarray:
return _draw_and_reshape(size,
lambda n: _exponential_internal(
scale=scale,
n=n,
antithetic=antithethic))
def standard_exponential(size: tp.Optional[SIZE_TYPE] = None) -> ndarray:
return exponential(size=size)
################################################################################
# gamma random number generator by Marsaglia and Tsang
# using Cocos vectorization
################################################################################
def gamma_rand_marsaglia_and_tsang_arrayfire(alpha: float,
lambda_: float,
n: int) \
-> af.array:
random_numbers = af.constant(0, n, dtype=Dtype.f32)
# Gamma(alpha, lambda) generator using Marsaglia and Tsang method
# Algorithm 4.33
if alpha >= 1.0:
d = alpha - 1 / 3
c = 1.0 / np.sqrt(9.0 * d)
number_generated = 0
number_generated_total = 0
while number_generated < n:
number_left = n - number_generated
z = af.randn(number_left, dtype=Dtype.f32)
y = (1.0 + c * z)
v = y * y * y
accept_index_1 = ((z >= -1.0 / c) & (v > 0.0))
z_accept_1 = z[accept_index_1]
# del z
v_accept_1 = v[accept_index_1]
# del v
u_accept_1 = af.randu(v_accept_1.elements(), dtype=Dtype.f32)
# del U
accept_index_2 = \
u_accept_1 < af.exp((0.5 * z_accept_1 * z_accept_1 + d - d * v_accept_1 + d * af.log(v_accept_1)))
x_accept = d * v_accept_1[accept_index_2] / lambda_
number_accept = x_accept.elements()
random_numbers[number_generated: | np.minimum(n, number_generated + number_accept) | numpy.minimum |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 5 17:32:01 2021
Basically we need to find the optimal parameters using the validation results,
and index the test results for the same parameters.
@author: oscar
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
import re
# To plot in seperate window in Spyder
# %matplotlib qt
# Specify root directory (where directories.txt file is located)
root_directory = r'D:\Dropbox (Imperial NGNI)\NGNI Share\Workspace\Oscar\Work\MUA compression\Upload code'
##########################################################################
# Read directories.txt file
with open(root_directory + '\\directories.txt') as f:
lines = f.readlines()
# Get path to BDP results
for path in lines:
if path.startswith('BDP_results'):
pattern = "'(.*?)'"
BDP_results_directory = re.search(pattern, path).group(1)
# Get SCLV directory
for path in lines:
if path.startswith('SCLV_path'):
pattern = "'(.*?)'"
SCLV_directory = re.search(pattern, path).group(1)
BP_vec = [1,5,10,20,50,100]
S_vector = np.arange(2,40)
time_steps_vec = [5, 10, 15]
lag_values_vec = [0, 5, 10]
window_len_vec = [0, 0.05, 0.1, 0.2]
alpha_vec = [0, 1e-4, 1e-2]
degree_vec = [2, 3, 4]
for Sabes_or_Flint in ['Flint']: #'Sabes',
for train_or_test in ['test']: #'train',
print(Sabes_or_Flint + ' - ' + train_or_test)
if Sabes_or_Flint == 'Sabes':
# Sabes, test
if train_or_test == 'test':
file_names = root_directory + '\\filenames_Sabes_test.txt'
result_folder = BDP_results_directory + '\\results_test_Sabes\\'
# Sabes, train
if train_or_test == 'train':
file_names = root_directory + '\\filenames_Sabes_train.txt'
result_folder = BDP_results_directory + '\\results_train_Sabes\\'
elif Sabes_or_Flint == 'Flint':
# Flint, test
if train_or_test == 'test':
file_names = root_directory + '\\filenames_Flint_test.txt'
result_folder = BDP_results_directory + '\\results_test_Flint\\'
# Flint, train
if train_or_test == 'train':
file_names = root_directory + '\\filenames_Flint_train.txt'
result_folder = BDP_results_directory + '\\results_train_Flint\\'
# Look at all stored file names, we have it this way so the indexing is
# consistent, always relative to the .txt file
with open(file_names) as f:
lines = f.readlines()
# Validation results
all_results = np.zeros((len(lines), len(BP_vec), len(time_steps_vec),
len(lag_values_vec), len(
window_len_vec), len(alpha_vec),
len(degree_vec), 60))
# Test results
all_test_results = np.zeros((len(lines), len(BP_vec), len(time_steps_vec),
len(lag_values_vec), len(
window_len_vec), len(alpha_vec),
len(degree_vec), 60))
for filename_index in np.arange(len(lines)):
file_name = lines[filename_index].replace('\n','')
for delta_count, delta_time in enumerate(BP_vec):
for time_step_counter, timesteps in enumerate(time_steps_vec):
for lag_c, lag_value in enumerate(lag_values_vec):
for window_c, wdw in enumerate(window_len_vec):
for a_count, alpha in enumerate(alpha_vec):
for degree_count, degree in enumerate(degree_vec):
for S in S_vector:
try: # If result exists for that param combo, some jobs in HPC failed
# Storing evaluation results into hdf5 file
result_filename = result_folder+file_name +\
'_delta_'+str(int(delta_time))+'ms_S_'+str(int(S)) +\
'_wdw_' + str(int(wdw*1000)) + '_lag_'+str(lag_value)\
+ '_timestep_'+str(timesteps) +\
'_alpha_' + str(alpha) + '_deg_' \
+ str(degree) + '.pkl'
with open(result_filename, 'rb') as file:
results = pickle.load(file)
result = np.mean(results['cc_valid'])
test_result = np.mean(results['cc_test'])
# Validation results
# NOTE: we assume no information is
# lost from increasing S, so BDP
# should never decrease from
# increasing S, so we take the max.
all_results[filename_index, delta_count,
time_step_counter, lag_c, window_c, a_count,
degree_count, S] = np.max(np.hstack((result,all_results[filename_index, delta_count,
time_step_counter, lag_c, window_c, a_count,
degree_count, :])))
# Test results
all_test_results[filename_index, delta_count,
time_step_counter, lag_c, window_c, a_count,
degree_count, S] = np.max(np.hstack((test_result,all_test_results[filename_index, delta_count,
time_step_counter, lag_c, window_c, a_count,
degree_count, :])))
except: # If the S/BP combo doesn't exist, replace with max value from a smaller S value for the same BP value
# Validation results
all_results[filename_index, delta_count,
time_step_counter, lag_c, window_c, a_count,
degree_count, S] = np.max(all_results[filename_index, delta_count,
time_step_counter, lag_c, window_c, a_count,
degree_count, :])
# Test results
all_test_results[filename_index, delta_count,
time_step_counter, lag_c, window_c, a_count,
degree_count, S] = np.max(all_test_results[filename_index, delta_count,
time_step_counter, lag_c, window_c, a_count,
degree_count, :])
print('Formatting done \n')
# Finding the best parameters for each BP/S combo
best_res = np.zeros((len(BP_vec), np.max(S_vector)+1))
best_params = np.zeros((len(BP_vec), np.max(S_vector)+1, len(lines), 5))
for BP_count, delta_time in enumerate(BP_vec):
for S in S_vector:
all_temp = 0
all_temp_counter = 0
for rec in np.arange(len(lines)):
xx = all_results[rec, BP_count, :, :, :, :, :, S]
# Find index of maximum value from 2D numpy array
y = np.where(xx == np.nanmax(xx))
temp = np.copy(xx)
for yc, yy in enumerate(y): # iterating through axes
try:
if np.nanmax(xx) != 0: # If max is not 0, should be a unique solution
best_params[BP_count, S, rec, yc] = yy[0]
temp = temp[yy[0]] # reduce to smaller set
except: # Occurs if yy is empty becaus eonly NaNs in the slaice
best_params[BP_count, S, rec, yc] = float('NaN')
temp = 0
# If we have an actual BDP value
if temp != 0:
all_temp_counter += 1
all_temp += temp
if all_temp != 0: # keep from dividing 0 by 0
all_temp = all_temp / all_temp_counter
best_res[BP_count, S] += all_temp
else:
best_res[BP_count, S] += best_res[BP_count, S-1]
# Use validated params to get test results
best_val_val_params = np.zeros((len(BP_vec), np.max(S_vector)+1, len(lines)))
best_test_val_params = np.zeros((len(BP_vec), np.max(S_vector)+1, len(lines)))
for delta_count, delta_time in enumerate(BP_vec):
for S in S_vector:
xx = 0
xx_count = 0
xx_test = 0
xx_test_count = 0
for rec in np.arange(len(lines)):
params = best_params[delta_count, S, rec]
# Verify results match, make sure param indexing is correct
# Validation results
try:
temp_val = all_results[rec, delta_count, int(params[0]), int(params[1]),
int(params[2]), int(params[3]), int(params[4]), S]
except: # If params don't exist, if it's NaN (no file for that combo)
temp_val = float('NaN')
if not np.isnan(temp_val) and temp_val != 0:
xx_count += 1
xx += temp_val
# Test results, same params
try:
temp_test = all_test_results[rec, delta_count, int(params[0]), int(params[1]),
int(params[2]), int(params[3]), int(params[4]), S]
except:
temp_test = float('NaN')
# Get validation performance using optimal parameters for each rec
best_val_val_params[delta_count, S, rec] = temp_val
# Test results with the same
best_test_val_params[delta_count, S, rec] = temp_test
# Make sure each S is at least as good as all S before it
for BP_counter, BP in enumerate(BP_vec):
for rec in np.arange(len(lines)):
for S in S_vector:
best_val_val_params[BP_counter,S,rec] = np.max(best_val_val_params[BP_counter,1:S+1,rec] )
best_test_val_params[BP_counter,S,rec] = np.max(best_test_val_params[BP_counter,1:S+1,rec] )
# Store (hyper-)paramater optimised results, so we can load them
# easily for later use
with open(BDP_results_directory+'\\S_vs_BDP_'+train_or_test+'_'+Sabes_or_Flint + '.pkl', 'wb') as file:
results = {'best_test_val_params':best_test_val_params,
'best_val_val_params': best_val_val_params}
# A new file will be created
pickle.dump(results, file)
# Boxplot for each BP, show boxplot of each S with label of nb of
# succesful recordings
S_vector_plot = np.arange(40)
for BP_counter, BP in enumerate(BP_vec):
fig = plt.figure()
b = | np.transpose(best_test_val_params[BP_counter,:,:]) | numpy.transpose |
from __future__ import division
import numpy as np
from chainercv.datasets import CamVidDataset
n_class = 11
dataset = CamVidDataset(split="train")
n_cls_pixels = np.zeros((n_class,))
n_img_pixels = np.zeros((n_class,))
for img, label in dataset:
for cls_i in np.unique(label):
if cls_i == -1:
continue
n_cls_pixels[cls_i] += np.sum(label == cls_i)
n_img_pixels[cls_i] += label.size
freq = n_cls_pixels / n_img_pixels
median_freq = | np.median(freq) | numpy.median |
# Particle Filter
'''
This function has originally been written by <NAME>, an intern at the University of Leeds
It has been adapted to work with the BusSim models
A particle filter designed to work with agent base models.
model requires: agents2state(), state2agents()
model requests: boundaries, agents[:].active
step requires: measured_state
save requests: true_state
'''
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
class ParticleFilter:
'''
A particle filter to model the dynamics of the
state of the model as it develops in time.
Parameters:
'number_of_particles': The number of particles used to simulate the model
'number_of_iterations': The number of iterations to run the model/particle filter
'resample_window': The number of iterations between resampling particles
'agents_to_visualise': The number of agents to plot particles for
'particle_std': The standard deviation of the noise added to particle states
'model_std': The standard deviation of the noise added to model observations
'do_save': Boolean to determine if data should be saved and stats printed
'do_ani': Boolean to determine if particle filter data should be animated
and displayed
'''
def __init__(self, model, number_of_particles, arr_std=0,dep_std=0, traffic_std=0, resample_window=10, do_copies=True, do_save=False):
'''
Initialise Particle Filter
Firstly, set all attributes using filter parameters. Set time and
initialise base model using model parameters. Initialise particle
models using a deepcopy of base model. Determine particle filter
dimensions, set state of particles to the state of the base model,
and then initialise all remaining arrays.
'''
self.time = 0
# Dimensions
self.number_of_particles = number_of_particles
self.dimensions = len(model.agents2state().T)
# Models
self.models = list([deepcopy(model) for _ in range(self.number_of_particles)])
for unique_id in range(len(self.models)):
self.models[unique_id].unique_id = unique_id
if not do_copies:
# Only relevent if there is randomness in the initialisation of the model.
for model in self.models:
model.__init__(*model.params)
# Filter
self.states = np.empty((self.number_of_particles, self.dimensions))
for particle in range(self.number_of_particles):
self.states[particle] = self.models[particle].agents2state()
self.weights = np.ones(self.number_of_particles)
# Params
self.arr_std = arr_std
self.dep_std = dep_std
self.traffic_std=traffic_std
self.resample_window = resample_window
# Save
self.do_save = do_save
if self.do_save:
self.active = []
self.means = []
self.mean_errors = []
self.variances = []
return
def step(self, measured_state, true_state=None):
'''
Step Particle Filter
Loop through process. Predict the base model and particles
forward. If the resample window has been reached,
reweight particles based on distance to base model and resample
particles choosing particles with higher weights. Then save
and animate the data. When done, plot save figures.
'''
self.predict(measured_state)
self.reweight(measured_state)
self.resample(measured_state)
if self.do_save:
self.save(true_state)
return
def reweight(self, measured_state):
'''def
Reweight
Add noise to the base model state to get a measured state. Calculate
the distance between the particle states and the measured base model
state and then calculate the new particle weights as 1/distance.
Add a small term to avoid dividing by 0. Normalise the weights.
'''
states = self.states[:, :len(measured_state)] # For shorter measurements to state vectors
#print(states)
distance = np.linalg.norm(states - measured_state, axis=1) #Frobenius norm
#print(distance)
self.weights = 1 / np.fmax(distance, 1e-99) # to avoid fp_err
#self.weights = np.exp(-np.fmax(distance, 1e-99)) # to avoid fp_err
self.weights /= np.sum(self.weights)
#print(self.weights)
return
def resample(self,measured_state): # systematic sampling
'''
Resample
Calculate a random partition of (0,1) and then
take the cumulative sum of the particle weights.
Carry out a systematic resample of particles.
Set the new particle states and weights and then
update agent locations in particle models.
'''
if not self.time % self.resample_window:
offset = (np.arange(self.number_of_particles) + np.random.uniform()) / self.number_of_particles
cumsum = np.cumsum(self.weights)
i, j = 0, 0
indexes = np.zeros(self.number_of_particles, 'i')
while i < self.number_of_particles and j < self.number_of_particles:
if offset[i] < cumsum[j]: # reject
indexes[i] = j
i += 1
else:
j += 1
self.states = self.states[indexes]
#print(self.states)
self.weights = self.weights[indexes]
self.states[:,len(measured_state)+1:len(measured_state)+self.models[0].FleetSize-1] += np.random.normal(0, self.arr_std, self.states[:,len(measured_state)+1:len(measured_state)+self.models[0].FleetSize-1].shape)
self.states[:,len(measured_state)+self.models[0].FleetSize+2:-3] += np.random.normal(0, self.dep_std, self.states[:,len(measured_state)+self.models[0].FleetSize+2:-3].shape)
self.states[:,-1] += np.random.normal(0, self.traffic_std, self.states[:,-1].shape)
#apply the measured_state
#for s in range(len(self.states)):
# self.states[s,:len(measured_state)]=measured_state
return
def predict(self,measured_state):
'''
Predict
Increment time. Step the base model. For each particle,
step the particle model and then set the particle states
as the agent locations with some added noise. Reassign the
locations of the particle agents using the new particle
states.
This is the main interaction between the
model and the particle filter.
'''
for particle in range(self.number_of_particles):
self.models[particle].state2agents(self.states[particle])
self.models[particle].step()
self.states[particle] = self.models[particle].agents2state()
self.time += 1
return
def predict_one(self, particle): # not working
map(self.predict_one, np.arange(self.number_of_particles))
self.models[particle].state2agents(self.states[particle])
self.models[particle].step()
self.states[particle] = self.models[particle].agents2state()
return
def save(self, true_state):
'''
Save and Plot Save
Calculate number of active agents, mean, and variance
of particles and calculate mean error between the mean
and the true base model state. Plot active agents,mean
error and mean variance.
'''
states = self.states[:, :len(true_state)] # For shorter measurements to state vectors
try:
activity = sum([agent.active for agent in self.model.agents])
print('act')
except AttributeError:
activity = None
self.active.append(activity)
mean = np.average(states, weights=self.weights, axis=0)
self.means.append(mean)
variance = np.average((states - mean)**2, weights=self.weights, axis=0)
self.variances.append(np.average(variance))
if true_state is None:
self.mean_errors.append(None)
else:
self.mean_errors.append(np.linalg.norm(mean - true_state, axis=0))
return
def plot_save(self):
plt.figure(1)
plt.plot(self.active)
plt.ylabel('Active agents')
plt.figure(2)
plt.plot(self.mean_errors, '-k')
plt.ylabel('Particle Mean Error')
ax2 = plt.twinx()
ax2.plot(self.variances)
ax2.set_ylabel('Particle Variance Error')
plt.show()
print('Max mean error = ',max(self.mean_errors))
print('Average mean error = ',np.average(self.mean_errors))
print('Max mean variance = ',max(self.variances))
print('Average mean variance = ',np.average(self.variances))
return
def ani(self, model, pf_agents=2):
'''
Animate
Plot the base model state and some of the
particles. Only do this if there is at least 1 active
agent in the base model. We adjust the markersizes of
each particle to represent the weight of that particle.
We then plot some of the agent locations in the particles
and draw lines between the particle agent location and
the agent location in the base model.
'''
plt.figure(1)
plt.clf()
# pf
if np.std(self.weights):
markersizes = self.weights
markersizes *= 4 / np.std(markersizes) # revar
markersizes += 4 - np.mean(markersizes) # remean
markersizes = np.clip(markersizes, 1, 8) # clip
else:
markersizes = 8*np.ones(self.weights.shape)
for unique_id in range(pf_agents):
for particle in range(self.number_of_particles):
loc0 = model.agents[unique_id].location
loc = self.models[particle].agents[unique_id].location
locs = | np.array([loc0, loc]) | numpy.array |
#!/usr/bin/env python3
import copy
import json
import os.path
import sys
from pprint import pprint
import numpy as np
import yaml
from tf_pwa.config_loader import ConfigLoader
from tf_pwa.data import data_index, data_mask, data_to_numpy
from tf_pwa.phasespace import PhaseSpaceGenerator
this_dir = os.path.dirname(__file__)
sys.path.insert(0, this_dir + "/..")
def gen_phasespace(top, finals, number):
a = PhaseSpaceGenerator(top, finals)
flat_mc_data = a.generate(number)
return flat_mc_data
def simple_select(phsp, amp):
weights = amp(phsp)
max_weights = np.max(weights) * 1.01
rnd = np.random.random(weights.shape)
select_index = weights / max_weights > rnd
select_data = data_mask(phsp, select_index)
return select_data
def save_dat(file_name, data, config):
idx = [("particle", i, "p") for i in config.get_dat_order(True)]
data = data_to_numpy(data)
data_p = np.array([data_index(data, i) for i in idx])
dat = np.transpose(data_p, (1, 0, 2)).reshape((-1, 4))
np.savetxt(file_name, dat)
dat = | np.loadtxt(file_name) | numpy.loadtxt |
"""
Set of programs to read and interact with output from Bifrost
"""
import numpy as np
import os
from glob import glob
from . import cstagger
class BifrostData(object):
"""
Reads data from Bifrost simulations in native format.
"""
def __init__(self, file_root, snap=None, meshfile=None, fdir='.',
verbose=True, dtype='f4', big_endian=False,
ghost_analyse=False):
"""
Loads metadata and initialises variables.
Parameters
----------
file_root - string
Basename for all file names. Snapshot number will be added
afterwards, and directory will be added before.
snap - integer, optional
Snapshot number. If None, will read first snapshot in sequence.
meshfile - string, optional
File name (including full path) for file with mesh. If set
to None (default), a uniform mesh will be created.
fdir - string, optional
Directory where simulation files are. Must be a real path.
verbose - bool, optional
If True, will print out more diagnostic messages
dtype - string, optional
Data type for reading variables. Default is 32 bit float.
big_endian - string, optional
If True, will read variables in big endian. Default is False
(reading in little endian).
ghost_analyse - bool, optional
If True, will read data from ghost zones when this is saved
to files. Default is never to read ghost zones.
Examples
--------
This reads snapshot 383 from simulation "cb24bih", whose file
root is "cb24bih_", and is found at directory /data/cb24bih:
>>> a = Bifrost.Data("cb24bih_", snap=383, fdir=""/data/cb24bih")
Scalar variables do not need de-staggering and are available as
memory map (only loaded to memory when needed), e.g.:
>>> a.r.shape
(504, 504, 496)
Composite variables need to be obtained by get_var():
>>> vx = a.get_var("ux")
"""
self.fdir = fdir
self.verbose = verbose
self.file_root = os.path.join(self.fdir, file_root)
self.meshfile = meshfile
self.ghost_analyse = ghost_analyse
# endianness and data type
if big_endian:
self.dtype = '>' + dtype
else:
self.dtype = '<' + dtype
self.set_snap(snap)
def _set_snapvars(self):
"""
Sets list of avaible variables
"""
self.snapvars = ['r', 'px', 'py', 'pz', 'e']
self.auxvars = self.params['aux'].split()
if (self.do_mhd):
self.snapvars += ['bx', 'by', 'bz']
self.hionvars = []
if 'do_hion' in self.params:
if self.params['do_hion'] > 0:
self.hionvars = ['hionne', 'hiontg', 'n1',
'n2', 'n3', 'n4', 'n5', 'n6', 'fion', 'nh2']
self.compvars = ['ux', 'uy', 'uz', 's', 'ee']
self.simple_vars = self.snapvars + self.auxvars + self.hionvars
self.auxxyvars = []
# special case for the ixy1 variable, lives in a separate file
if 'ixy1' in self.auxvars:
self.auxvars.remove('ixy1')
self.auxxyvars.append('ixy1')
self.vars2d = []
# special case for 2D variables, stored in a separate file
for var in self.auxvars:
if any(i in var for i in ('xy', 'yz', 'xz')):
self.auxvars.remove(var)
self.vars2d.append(var)
def set_snap(self, snap):
"""
Reads metadata and sets variable memmap links for a given snapshot
number.
Parameters
----------
snap - integer
Number of simulation snapshot to load.
"""
if snap is None:
try:
tmp = sorted(glob("%s*idl" % self.file_root))[0]
snap = int(tmp.split(self.file_root + '_')[1].split(".idl")[0])
except IndexError:
raise ValueError(("(EEE) set_snap: snapshot not defined and no"
" .idl files found"))
self.snap = snap
self.snap_str = '_%03i' % snap
self._read_params()
# Read mesh for all snaps because meshfiles could differ
self.__read_mesh(self.meshfile)
# variables: lists and initialisation
self._set_snapvars()
self._init_vars()
def _read_params(self):
"""
Reads parameter file (.idl)
"""
if (self.snap < 0):
filename = self.file_root + '.idl.scr'
elif (self.snap == 0):
filename = self.file_root + '.idl'
else:
filename = self.file_root + self.snap_str + '.idl'
self.params = read_idl_ascii(filename)
# assign some parameters as attributes
for p in ['x', 'y', 'z', 'b']:
try:
setattr(self, 'n' + p, self.params['m' + p])
except KeyError:
raise KeyError(('read_params: could not find '
'm%s in idl file!' % p))
for p in ['dx', 'dy', 'dz', 'do_mhd']:
try:
setattr(self, p, self.params[p])
except KeyError:
raise KeyError(('read_params: could not find '
'%s in idl file!' % p))
try:
if self.params['boundarychk'] == 1:
self.nzb = self.nz + 2 * self.nb
else:
self.nzb = self.nz
except KeyError:
self.nzb = self.nz
# check if units are there, if not use defaults and print warning
unit_def = {'u_l': 1.e8, 'u_t': 1.e2, 'u_r': 1.e-7,
'u_b': 1.121e3, 'u_ee': 1.e12}
for unit in unit_def:
if unit not in self.params:
print(("(WWW) read_params:"" %s not found, using "
"default of %.3e" % (unit, unit_def[unit])))
self.params[unit] = unit_def[unit]
def __read_mesh(self, meshfile):
"""
Reads mesh file
"""
if meshfile is None:
meshfile = os.path.join(self.fdir, self.params['meshfile'].strip())
if os.path.isfile(meshfile):
f = open(meshfile, 'r')
for p in ['x', 'y', 'z']:
dim = int(f.readline().strip('\n').strip())
assert dim == getattr(self, 'n' + p)
# quantity
setattr(self, p, np.array(
[float(v) for v in f.readline().strip('\n').split()]))
# quantity "down"
setattr(self, p + 'dn', np.array(
[float(v) for v in f.readline().strip('\n').split()]))
# up derivative of quantity
setattr(self, 'd%sid%sup' % (p, p), np.array(
[float(v) for v in f.readline().strip('\n').split()]))
# down derivative of quantity
setattr(self, 'd%sid%sdn' % (p, p), np.array(
[float(v) for v in f.readline().strip('\n').split()]))
f.close()
if self.ghost_analyse:
# extend mesh to cover ghost zones
self.z = np.concatenate((
self.z[0] - np.linspace(self.dz*self.nb, self.dz, self.nb),
self.z,
self.z[-1] + np.linspace(self.dz, self.dz*self.nb, self.nb)))
self.zdn = np.concatenate((
self.zdn[0] - np.linspace(self.dz*self.nb, self.dz, self.nb),
self.zdn, (self.zdn[-1] +
np.linspace(self.dz, self.dz*self.nb, self.nb))))
self.dzidzup = np.concatenate((
np.repeat(self.dzidzup[0], self.nb),
self.dzidzup,
np.repeat(self.dzidzup[-1], self.nb)))
self.dzidzdn = np.concatenate((
np.repeat(self.dzidzdn[0], self.nb),
self.dzidzdn,
np.repeat(self.dzidzdn[-1], self.nb)))
self.nz = self.nzb
else: # no mesh file
print('(WWW) Mesh file %s does not exist.' % meshfile)
if self.dx == 0.0:
self.dx = 1.0
if self.dy == 0.0:
self.dy = 1.0
if self.dz == 0.0:
self.dz = 1.0
print(('(WWW) Creating uniform grid with [dx,dy,dz] = '
'[%f,%f,%f]') % (self.dx, self.dy, self.dz))
# x
self.x = np.arange(self.nx) * self.dx
self.xdn = self.x - 0.5 * self.dx
self.dxidxup = np.zeros(self.nx) + 1. / self.dx
self.dxidxdn = np.zeros(self.nx) + 1. / self.dx
# y
self.y = np.arange(self.ny) * self.dy
self.ydn = self.y - 0.5 * self.dy
self.dyidyup = np.zeros(self.ny) + 1. / self.dy
self.dyidydn = np.zeros(self.ny) + 1. / self.dy
# z
if self.ghost_analyse:
self.nz = self.nzb
self.z = np.arange(self.nz) * self.dz
self.zdn = self.z - 0.5 * self.dz
self.dzidzup = np.zeros(self.nz) + 1. / self.dz
self.dzidzdn = np.zeros(self.nz) + 1. / self.dz
def _init_vars(self, *args, **kwargs):
"""
Memmaps "simple" variables, and maps them to methods.
Also, sets file name[s] from which to read a data
"""
self.variables = {}
for var in self.simple_vars:
try:
self.variables[var] = self._get_simple_var(
var, *args, **kwargs)
setattr(self, var, self.variables[var])
except Exception:
if self.verbose:
print(('(WWW) init_vars: could not read '
'variable %s' % var))
for var in self.auxxyvars:
try:
self.variables[var] = self._get_simple_var_xy(var, *args,
**kwargs)
setattr(self, var, self.variables[var])
except Exception:
if self.verbose:
print(('(WWW) init_vars: could not read '
'variable %s' % var))
rdt = self.r.dtype
cstagger.init_stagger(self.nz, self.dx, self.dy, self.z.astype(rdt),
self.zdn.astype(rdt), self.dzidzup.astype(rdt),
self.dzidzdn.astype(rdt))
def get_var(self, var, snap=None, *args, **kwargs):
"""
Reads a given variable from the relevant files.
Parameters
----------
var - string
Name of the variable to read. Must be Bifrost internal names.
snap - integer, optional
Snapshot number to read. By default reads the loaded snapshot;
if a different number is requested, will load that snapshot
by running self.set_snap(snap).
"""
if (snap is not None) and (snap != self.snap):
self.set_snap(snap)
if var in self.simple_vars: # is variable already loaded?
return self._get_simple_var(var, *args, **kwargs)
elif var in self.auxxyvars:
return self._get_simple_var_xy(var, *args, **kwargs)
elif var in self.compvars: # add to variable list
self.variables[var] = self._get_composite_var(var, *args, **kwargs)
setattr(self, var, self.variables[var])
return self.variables[var]
else:
raise ValueError(
("get_var: could not read variable %s. Must be "
"one of %s" %
(var, (self.simple_vars + self.compvars + self.auxxyvars))))
def _get_simple_var(self, var, order='F', mode='r', *args, **kwargs):
"""
Gets "simple" variable (ie, only memmap, not load into memory).
Parameters
----------
var - string
Name of the variable to read. Must be Bifrost internal names.
order - string, optional
Must be either 'C' (C order) or 'F' (Fortran order, default).
mode - string, optional
numpy.memmap read mode. By default is read only ('r'), but
you can use 'r+' to read and write. DO NOT USE 'w+'.
Returns
-------
result - numpy.memmap array
Requested variable.
"""
if self.snap < 0:
filename = self.file_root
fsuffix_b = '.scr'
elif self.snap == 0:
filename = self.file_root
fsuffix_b = ''
else:
filename = self.file_root + self.snap_str
fsuffix_b = ''
if var in (self.snapvars):
fsuffix_a = '.snap'
idx = (self.snapvars).index(var)
filename += fsuffix_a + fsuffix_b
elif var in self.auxvars:
fsuffix_a = '.aux'
idx = self.auxvars.index(var)
filename += fsuffix_a + fsuffix_b
elif var in self.hionvars:
idx = self.hionvars.index(var)
isnap = self.params['isnap']
if isnap <= -1:
filename = filename + '.hion.snap.scr'
elif isnap == 0:
filename = filename + '.hion.snap'
elif isnap > 0:
filename = '%s.hion_%s.snap' % (self.file_root, isnap)
else:
raise ValueError(('_get_simple_var: could not find variable '
'%s. Available variables:' % (var) +
'\n' + repr(self.simple_vars)))
dsize = np.dtype(self.dtype).itemsize
if self.ghost_analyse:
offset = self.nx * self.ny * self.nzb * idx * dsize
ss = (self.nx, self.ny, self.nzb)
else:
offset = (self.nx * self.ny *
(self.nzb + (self.nzb - self.nz) // 2) * idx * dsize)
ss = (self.nx, self.ny, self.nz)
return np.memmap(filename, dtype=self.dtype, order=order, mode=mode,
offset=offset, shape=ss)
def _get_simple_var_xy(self, var, order='F', mode='r'):
"""
Reads a given 2D variable from the _XY.aux file
"""
if var in self.auxxyvars:
fsuffix = '_XY.aux'
idx = self.auxxyvars.index(var)
filename = self.file_root + fsuffix
else:
raise ValueError(('_get_simple_var_xy: variable'
' %s not available. Available vars:'
% (var) + '\n' + repr(self.auxxyvars)))
# Now memmap the variable
if not os.path.isfile(filename):
raise IOError(('_get_simple_var_xy: variable'
' %s should be in %s file, not found!' %
(var, filename)))
# size of the data type
dsize = np.dtype(self.dtype).itemsize
offset = self.nx * self.ny * idx * dsize
return np.memmap(filename, dtype=self.dtype, order=order, mode=mode,
offset=offset, shape=(self.nx, self.ny))
def _get_composite_var(self, var, *args, **kwargs):
"""
Gets composite variables (will load into memory).
"""
if var in ['ux', 'uy', 'uz']: # velocities
p = self._get_simple_var('p' + var[1], order='F')
if getattr(self, 'n' + var[1]) < 5:
return p / self.r # do not recentre for 2D cases (or close)
else: # will call xdn, ydn, or zdn to get r at cell faces
return p / cstagger.do(self.r, var[1] + 'dn')
elif var == 'ee': # internal energy
return self.e / self.r
elif var == 's': # entropy?
return np.log(self.p) - self.params['gamma'] * np.log(self.r)
else:
raise ValueError(('_get_composite_var: do not know (yet) how to'
'get composite variable %s.' % var))
def get_quantity(self, quant, *args, **kwargs):
"""
Calculates a quantity from the simulation quantiables.
Parameters
----------
quant - string
Name of the quantity to calculate (see below for some categories).
Returns
-------
array - ndarray
Array with the dimensions of the simulation.
Notes
-----
Not all possibilities for quantities are shown here. But there are
a few main categories:
- DERIV_QUANT: allows to calculate derivatives of any variable.
It must start with d followed with the varname and
ending with dxdn etc, e.g., 'dbxdxdn'
- CENTRE_QUANT: allows to center any vector. It must end with xc
etc, e.g., 'ixc',
- MODULE_QUANT: allows to calculate the module of any vector.
It must start with 'mod' followed with the root
letter of varname, e.g., 'modb'
- DIV_QUANT: allows to calculate the divergence of any vector.
It must start with div followed with the root letter
of the varname, e.g., 'divb'
- SQUARE_QUANT: allows to calculate the squared modules for any
vector. It must end with 2 after the root lelter
of the varname, e.g. 'u2'.
"""
quant = quant.lower()
DERIV_QUANT = ['dxup', 'dyup', 'dzup', 'dxdn', 'dydn', 'dzdn']
CENTRE_QUANT = ['xc', 'yc', 'zc']
MODULE_QUANT = ['mod']
DIV_QUANT = ['div']
SQUARE_QUANT = ['2']
if (quant[:3] in MODULE_QUANT) or (quant[-1] in SQUARE_QUANT):
# Calculate module of vector quantity
q = quant[3:]
if q == 'b':
if not self.do_mhd:
raise ValueError("No magnetic field available.")
if getattr(self, 'nx') < 5: # 2D or close
result = getattr(self, q + 'x') ** 2
else:
result = self.get_quantity(q + 'xc') ** 2
if getattr(self, 'ny') < 5: # 2D or close
result += getattr(self, q + 'y') ** 2
else:
result += self.get_quantity(q + 'yc') ** 2
if getattr(self, 'nz') < 5: # 2D or close
result += getattr(self, q + 'z') ** 2
else:
result += self.get_quantity(q + 'zc') ** 2
if quant[:3] in MODULE_QUANT:
return np.sqrt(result)
elif quant[-1] in SQUARE_QUANT:
return result
elif quant[0] == 'd' and quant[-4:] in DERIV_QUANT:
# Calculate derivative of quantity
axis = quant[-3]
q = quant[1:-4] # base variable
try:
var = getattr(self, q)
except AttributeError:
var = self.get_var(q)
if getattr(self, 'n' + axis) < 5: # 2D or close
return np.zeros_like(var)
else:
return cstagger.do(var, 'd' + quant[-4:])
elif quant[-2:] in CENTRE_QUANT:
# This brings a given vector quantity to cell centres
axis = quant[-2]
q = quant[:-1] # base variable
if q[:-1] == 'i' or q == 'e':
AXIS_TRANSFORM = {'x': ['yup', 'zup'],
'y': ['xup', 'zup'],
'z': ['xup', 'yup']}
else:
AXIS_TRANSFORM = {'x': ['xup'],
'y': ['yup'],
'z': ['zup']}
transf = AXIS_TRANSFORM[axis]
try:
var = getattr(self, q)
except AttributeError:
var = self.get_var(q)
if getattr(self, 'n' + axis) < 5: # 2D or close
return var
else:
if len(transf) == 2:
tmp = cstagger.do(var, transf[0])
return cstagger.do(tmp, transf[1])
else:
return cstagger.do(var, transf[0])
elif quant[:3] in DIV_QUANT:
# Calculates divergence of vector quantity
q = quant[3:] # base variable
try:
varx = getattr(self, q + 'x')
vary = getattr(self, q + 'y')
varz = getattr(self, q + 'z')
except AttributeError:
varx = self.get_var(q + 'x')
vary = self.get_var(q + 'y')
varz = self.get_var(q + 'z')
if getattr(self, 'nx') < 5: # 2D or close
result = np.zeros_like(varx)
else:
result = cstagger.ddxup(varx)
if getattr(self, 'ny') > 5:
result += cstagger.ddyup(vary)
if getattr(self, 'nz') > 5:
result += cstagger.ddzup(varz)
return result
else:
raise ValueError(('get_quantity: do not know (yet) how to '
'calculate quantity %s. Note that simple_var '
'available variables are: %s.\nIn addition, '
'get_quantity can read others computed variables'
' see e.g. help(self.get_quantity) for guidance'
'.' % (quant, repr(self.simple_vars))))
def write_rh15d(self, outfile, desc=None, append=True,
sx=slice(None), sy=slice(None), sz=slice(None)):
"""
Writes snapshot in RH 1.5D format.
Parameters
----------
outfile - string
File name to write
append - bool, optional
If True (default) will append output as a new snapshot in file.
Otherwise, creates new file (will fail if file exists).
desc - string, optional
Description string
sx, sy, sz - slice object
Slice objects for x, y, and z dimensions, when not all points
are needed. E.g. use slice(None) for all points, slice(0, 100, 2)
for every second point up to 100.
Returns
-------
None.
"""
from . import rh15d
# unit conversion to SI
ul = self.params['u_l'] / 1.e2 # to metres
ur = self.params['u_r'] # to g/cm^3 (for ne_rt_table)
ut = self.params['u_t'] # to seconds
uv = ul / ut
ub = self.params['u_b'] * 1e-4 # to Tesla
ue = self.params['u_ee'] # to erg/g
hion = False
if 'do_hion' in self.params:
if self.params['do_hion'] > 0:
hion = True
if self.verbose:
print('Slicing and unit conversion...')
temp = self.tg[sx, sy, sz]
rho = self.r[sx, sy, sz]
rho = rho * ur
# TIAGO: must get this at cell centres!
if self.do_mhd:
Bx = self.bx[sx, sy, sz]
By = self.by[sx, sy, sz]
Bz = self.bz[sx, sy, sz]
# Change sign of Bz (because of height scale) and By
# (to make right-handed system)
Bx = Bx * ub
By = -By * ub
Bz = -Bz * ub
else:
Bx = By = Bz = None
# TIAGO: must get this at cell centres!
vz = self.get_var('uz')[sx, sy, sz]
vz *= -uv
x = self.x[sx] * ul
y = self.y[sy] * (-ul)
z = self.z[sz] * (-ul)
# convert from rho to H atoms, ideally from subs.dat. Otherwise
# default.
if hion:
print('Getting hion data...')
ne = self.get_var('hionne')
# slice and convert from cm^-3 to m^-3
ne = ne[sx, sy, sz]
ne = ne * 1.e6
# read hydrogen populations (they are saved in cm^-3)
nh = np.empty((6,) + temp.shape, dtype='Float32')
for k in range(6):
nv = self.get_var('n%i' % (k + 1))
nh[k] = nv[sx, sy, sz]
nh = nh * 1.e6
else:
ee = self.get_var('ee')[sx, sy, sz]
ee = ee * ue
if os.access('%s/subs.dat' % self.fdir, os.R_OK):
grph = subs2grph('%s/subs.dat' % self.fdir)
else:
grph = 2.380491e-24
nh = rho / grph * 1.e6 # from rho to nH in m^-3
# interpolate ne from the EOS table
if self.verbose:
print('ne interpolation...')
eostab = Rhoeetab(fdir=self.fdir)
ne = eostab.tab_interp(rho, ee, order=1) * 1.e6 # cm^-3 to m^-3
if desc is None:
desc = 'BIFROST snapshot from sequence %s, sx=%s sy=%s sz=%s.' % \
(self.file_root, repr(sx), repr(sy), repr(sz))
if hion:
desc = 'hion ' + desc
# write to file
if self.verbose:
print('Write to file...')
rh15d.make_xarray_atmos(outfile, temp, vz, nh, z, ne=ne, x=x, y=y,
append=append, Bx=Bx, By=By, Bz=Bz, desc=desc,
snap=self.snap)
def write_multi3d(self, outfile, mesh='mesh.dat', desc=None,
sx=slice(None), sy=slice(None), sz=slice(None)):
"""
Writes snapshot in Multi3D format.
Parameters
----------
outfile - string
File name to write
mesh - string, optional
File name of the mesh file to write.
desc - string, optional
Description string
sx, sy, sz - slice object
Slice objects for x, y, and z dimensions, when not all points
are needed. E.g. use slice(None) for all points, slice(0, 100, 2)
for every second point up to 100.
Returns
-------
None.
"""
from .multi3dn import Multi3dAtmos
# unit conversion to cgs and km/s
ul = self.params['u_l'] # to cm
ur = self.params['u_r'] # to g/cm^3 (for ne_rt_table)
ut = self.params['u_t'] # to seconds
uv = ul / ut / 1e5 # to km/s
ue = self.params['u_ee'] # to erg/g
nh = None
hion = False
if 'do_hion' in self.params:
if self.params['do_hion'] > 0:
hion = True
if self.verbose:
print('Slicing and unit conversion...')
temp = self.tg[sx, sy, sz]
rho = self.r[sx, sy, sz]
rho = rho * ur
# Change sign of vz (because of height scale) and vy (to make
# right-handed system)
vx = self.get_var('ux')[sx, sy, sz]
vx *= uv
vy = self.get_var('uy')[sx, sy, sz]
vy *= -uv
vz = self.get_var('uz')[sx, sy, sz]
vz *= -uv
x = self.x[sx] * ul
y = self.y[sy] * ul
z = self.z[sz] * (-ul)
# if Hion, get nH and ne directly
if hion:
print('Getting hion data...')
ne = self.get_var('hionne')
# slice and convert from cm^-3 to m^-3
ne = ne[sx, sy, sz]
ne = ne * 1.e6
# read hydrogen populations (they are saved in cm^-3)
nh = np.empty((6,) + temp.shape, dtype='Float32')
for k in range(6):
nv = self.get_var('n%i' % (k + 1))
nh[k] = nv[sx, sy, sz]
nh = nh * 1.e6
else:
ee = self.get_var('ee')[sx, sy, sz]
ee = ee * ue
# interpolate ne from the EOS table
print('ne interpolation...')
eostab = Rhoeetab(fdir=self.fdir)
ne = eostab.tab_interp(rho, ee, order=1)
# write to file
print('Write to file...')
nx, ny, nz = temp.shape
fout = Multi3dAtmos(outfile, nx, ny, nz, mode="w+")
fout.ne[:] = ne
fout.temp[:] = temp
fout.vx[:] = vx
fout.vy[:] = vy
fout.vz[:] = vz
fout.rho[:] = rho
# write mesh?
if mesh is not None:
fout2 = open(mesh, "w")
fout2.write("%i\n" % nx)
x.tofile(fout2, sep=" ", format="%11.5e")
fout2.write("\n%i\n" % ny)
y.tofile(fout2, sep=" ", format="%11.5e")
fout2.write("\n%i\n" % nz)
z.tofile(fout2, sep=" ", format="%11.5e")
fout2.close()
def write_mesh(self, x=None, y=None, z=None, nx=None, ny=None, nz=None,
dx=None, dy=None, dz=None, meshfile="newmesh.mesh"):
"""
Writes mesh to ascii file.
"""
def __xxdn(f):
'''
f is centered on (i-.5,j,k)
'''
nx = len(f)
d = -5. / 2048
c = 49. / 2048
b = -245. / 2048
a = .5 - b - c - d
x = (a * (f + np.roll(f, 1)) +
b * ( | np.roll(f, -1) | numpy.roll |
import numpy as np
import json
def smooth_raman_json(
min_freq: float,
max_freq: float,
points: int,
width: float,
num_acoustic: int,
filename: str,
) -> np.ndarray:
frequency = []
intensity = []
file = json.loads(open(filename).read())
# Load the json file data into lists
for i in range(len(file['frequency'])):
# Check for the max and min frequency
if max_freq >= file['frequency'][i] >= min_freq:
frequency.append(file['frequency'][i])
intensity.append(file['average-3d'][i])
return smooth_raman(
min_freq,
max_freq,
points,
width,
num_acoustic,
frequency=np.array(frequency),
intensity=np.array(intensity),
)
def smooth_raman(
min_freq: float,
max_freq: float,
points: int,
width: float,
num_acoustic: int,
frequency: np.ndarray,
intensity: np.ndarray,
) -> np.ndarray:
assert 0 <= num_acoustic <= 6
assert points > 0
# Skip the acoustic modes, which are not Raman active (but can have
# non-zero Raman intensity due to numerical issues as their frequencies
# are close to zero)
intensity = intensity[num_acoustic:]
frequency = frequency[num_acoustic:]
# Only select peaks within the given frequency range
peaks_in_range = (min_freq <= frequency) & (frequency <= max_freq)
intensity = intensity[peaks_in_range]
frequency = frequency[peaks_in_range]
# Lorentzian distribution smoothing for the data set
frequency_smooth = | np.linspace(min_freq, max_freq, points) | numpy.linspace |
import argparse
import os
import random
import time
from collections import deque
import numpy as np
import torch
from gym_mdptetris.envs import board, piece, tetris
from mdptetris_experiments.agents.FFNN import (NN1D, NNHeuristic,
NNHeuristicSimple)
from mdptetris_experiments.agents.linear_agent import (LinearGame,
LinearGameStandard)
from torch import nn
from torch.utils.tensorboard import SummaryWriter
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, default='0',
help="Select the GPU to run the model on.")
parser.add_argument("--test", action='store_true',
help="Test a trained model")
parser.add_argument("--render", action='store_true',
help="Render the environment")
parser.add_argument("--one_piece", action='store_true',
help="Only train or test model on one piece per episode.")
parser.add_argument("--board_height", type=int,
default=20, help="Height for the Tetris board")
parser.add_argument("--board_width", type=int, default=10,
help="Width for the Tetris board")
parser.add_argument("--replay_buffer_length", type=int, default=20000,
help="Number of timesteps to store in the replay memory buffer")
parser.add_argument("--training_start", type=int, default=2000,
help="Minimum timesteps for training to start.")
parser.add_argument("--batch_size", type=int, default=512,
help="Timestep batch size for training the model.")
parser.add_argument("--alpha", type=float, default=1e-3,
help="Adam optimiser learning rate.")
parser.add_argument("--gamma", type=float, default=0.99,
help="Future reward discount rate")
parser.add_argument("--init_epsilon", type=float, default=1,
help="Initial epsilon value for random action selection.")
parser.add_argument("--final_epsilon", type=float, default=1e-3,
help="Minimum epsilon value for exploration.")
parser.add_argument("--epochs", type=int, default=3000,
help="Number of epochs to train the agent.")
parser.add_argument("--target_network_update", type=int, default=5,
help="Epoch interval to update the target network.")
parser.add_argument("--saving_interval", type=int, default=500,
help="Epoch interval between model checkpoints.")
parser.add_argument("--epsilon_decay_period", type=int, default=2000,
help="Number of epochs to linearly decay the epsilon over.")
parser.add_argument("--state_rep", type=str, default="heuristic",
help="State representation for the Tetris game. Heuristic or 1D.")
parser.add_argument("--log_dir", type=str, default="runs",
help="Directory to save TensorBoard data to.")
parser.add_argument("--load_file", type=str, default=None,
help="Path to partially trained model")
parser.add_argument("--save_dir", type=str, default=f"runs/run-info",
help="Directory to save model and run info to")
parser.add_argument("--seed", type=int, default=None,
help="Seed value for environment.")
parser.add_argument("--comment", type=str, default=None,
help="Run comment for TensorBoard writer.")
args = parser.parse_args()
return args
# Define state representations and respective NN architectures
state_rep = {
"heuristic": [NNHeuristic, LinearGame],
"heuristic-simplenet": [NNHeuristicSimple, LinearGame],
"1D": [NN1D, LinearGameStandard]
}
class MBDQN:
def __init__(self, args: argparse.Namespace):
"""
Class that implements a model-based DQN agent (MBDQN) to learn a game of
Tetris. The model for the environment is provided in the linear_agent
file, which allows generation of subsequent states, and retrieval of
their representation as either the full board, or as a set of features.
Attribution: Approach inspired by <NAME>'s: https://github.com/uvipen/Tetris-deep-Q-learning-pytorch
:param args: A Namespace object containing experiment hyperparameters
"""
self.env = state_rep[args.state_rep][1](board_height=args.board_height,
board_width=args.board_width)
self._init_hyperparams(args)
# Initialise models
input_dims = args.board_height * args.board_width if args.state_rep == "1D" else 6
self.model = state_rep[args.state_rep][0](input_dims).to(self.device)
self.target = state_rep[args.state_rep][0](input_dims).to(self.device)
self.target.load_state_dict(self.model.state_dict())
self.target.eval()
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=args.alpha)
self.replay_buffer = deque(maxlen=args.replay_buffer_length)
self.loss_criterion = nn.MSELoss()
def train(self):
"""
Method to train the agent. Iterates through timesteps to gather training
data, which is then stored in the buffer. After an episode concludes, makes
a training step. Outputs information on the current training status
of the agent while training, and saves the trained model at intervals.
"""
self.epochs = []
self.timesteps = []
state = self.env.reset().to(self.device)
self.epoch = 0
self.timestep = 0
ep_score = 0
while self.epoch < self.total_epochs:
action, new_state = self.get_action_and_new_state()
if self.render:
self.env.render()
reward, done = self.env.step(action, self.one_piece)
ep_score += reward
self.timestep += 1
self.replay_buffer.append([state, reward, new_state, done])
self.timesteps.append(reward)
# Train the model if the episode has concluded and update log
if done:
self.update_model()
if self.epoch > 0:
self._log(ep_score)
ep_score = 0
state = self.env.reset().to(self.device)
else:
state = new_state.to(self.device)
def test(self, nb_episodes: int = 1000):
"""
Method to test the performance of a trained agent for specified
number of episodes. Outputs performance during testing and saves
results to csv files. The agent is loaded from the pre-specified
load file passed when the agent is instantiated.
:param nb_episodes: Number of episodes to test the trained agent for.
"""
self.load()
episode_rewards = []
episode_durations = []
self.epsilon = 0
print("Testing start:")
for i in range(nb_episodes):
done = False
state = self.env.reset()
ep_score = 0
timesteps = 0
while not done:
if self.render:
self.env.render()
action, _ = self.get_action_and_new_state()
reward, done = self.env.step(action, self.one_piece)
ep_score += reward
timesteps += 1
if not self.one_piece and ep_score > 100:
break
episode_rewards.append(ep_score)
episode_durations.append(timesteps)
print(
f"Episode: {i}, Episode reward: {ep_score}, Episode duration: {timesteps}")
self.writer.add_scalar(
f"DQN-{self.runid}/Episode reward", ep_score, i)
self.writer.add_scalar(
f"DQN-{self.runid}/Episode duration", timesteps, i)
np.array(episode_rewards).tofile(
f"{self.save_dir}/DQN-test-rewards-{self.runid}.csv", sep=',')
| np.array(episode_durations) | numpy.array |
#!/usr/bin/env python3
# Standard library
import datetime as dt
import functools
import json
import logging as log
import os
import re
import warnings
from multiprocessing import Pool
# Third-party
import matplotlib as mpl
import matplotlib.pyplot as plt
import netCDF4 as nc4
import numpy as np
import scipy as sp
import shapely.geometry as geo
from descartes.patch import PolygonPatch
from matplotlib import cm
from mpl_toolkits.basemap import Basemap
try:
from numpy.ma.core import MaskedArrayFutureWarning
except ImportError:
MaskedArrayFutureWarning = None # type: ignore
# Local
from ..utils.netcdf import nc_prepare_file
from ..utils.spatial import path_along_domain_boundary
from .utilities_misc import Domain
from .utilities_misc import Field2D
from .utilities_misc import inds2lonlat
from .utilities_misc import order_dict
__all__ = []
# Plot precip
PRECIP_LEVELS_PSEUDO_LOG_ORIG = np.array(
[
0.1,
0.2,
1.0,
2.0,
4.0,
6.0,
10.0,
20.0,
40.0,
60.0,
]
)
PRECIP_LEVELS_PSEUDO_LOG = np.array(
[
0.1,
0.22,
0.46,
1,
2.2,
4.6,
10,
22,
46,
100,
]
)
PRECIP_LEVELS_PSEUDO_LOG_NARROW = np.array(
[
1,
1.5,
2.2,
3.2,
4.6,
7,
10,
15,
22,
32,
] # 46,
)
PRECIP_LEVELS_LOG = 10 ** np.arange(-1, 2.1, 0.2)
PRECIP_LEVELS_LOG_NARROW = 10 ** np.arange(0, 1.6, 0.1)
assert len(PRECIP_LEVELS_LOG) == 16
# Precip NCL colormap 'precip2_17lev'
# src: www.ncl.ucar.edu/Document/Graphics/ColorTables/precip2_17lev.shtml
PRECIP_COLORS_RGB_RADAR = [
(100, 100, 100),
(150, 130, 150),
(4, 2, 252),
(4, 142, 44),
(4, 254, 4),
(252, 254, 4),
(252, 202, 4),
(252, 126, 4),
(252, 26, 4),
(172, 2, 220),
]
PRECIP_COLORS_HEX_RADAR = [
"{:02X}{:02X}{:02X}".format(r, g, b) for r, g, b in PRECIP_COLORS_RGB_RADAR
]
PRECIP_COLORS_RGB_MCH17 = [
(255, 255, 255),
# (235, 246, 255),
(214, 226, 255),
(181, 201, 255),
(142, 178, 255),
(127, 150, 255),
(114, 133, 248),
(99, 112, 248),
(0, 158, 30),
(60, 188, 61),
(179, 209, 110),
(185, 249, 110),
(255, 249, 19),
(255, 163, 9),
(229, 0, 0),
(189, 0, 0),
(129, 0, 0),
# ( 0, 0, 0),
]
PRECIP_COLORS_HEX_MCH17 = [
"{:02X}{:02X}{:02X}".format(r, g, b) for r, g, b in PRECIP_COLORS_RGB_RADAR
]
def create_cmap_precip(
colors_rgb=PRECIP_COLORS_RGB_RADAR,
levels=PRECIP_LEVELS_PSEUDO_LOG,
over="black",
lognorm=False,
):
"""Create precipitation colormap."""
if len(levels) != len(colors_rgb):
err = ("numbers of precip levels and colors differ: {} != {}").format(
len(levels), len(colors_rgb)
)
raise ValueError(err)
if lognorm:
levels = np.log10(levels)
cols = np.array(colors_rgb) / 255
fct = lambda l: (l - levels[0]) / (levels[-1] - levels[0])
cols_cmap = [(fct(l), c) for l, c in zip(levels, cols)]
cmap = mpl.colors.LinearSegmentedColormap.from_list("precip", cols_cmap)
cmap.set_under("white", alpha=0)
cmap.set_over(over)
return cmap
cmap_precip_pseudo_log = create_cmap_precip(
PRECIP_COLORS_RGB_RADAR, PRECIP_LEVELS_PSEUDO_LOG
)
cmap_precip_pseudo_log__lognorm = create_cmap_precip(
PRECIP_COLORS_RGB_RADAR, PRECIP_LEVELS_PSEUDO_LOG, lognorm=True
)
cmap_precip_pseudo_log_narrow__lognorm = create_cmap_precip(
PRECIP_COLORS_RGB_RADAR, PRECIP_LEVELS_PSEUDO_LOG_NARROW, lognorm=True
)
cmap_precip_log = create_cmap_precip(PRECIP_COLORS_RGB_MCH17, PRECIP_LEVELS_LOG)
def plot_precip(
outfile,
title,
fld,
lon=None,
lat=None,
*,
grid=None,
levels=None,
topo=None,
cmap_topo="terrain",
cmap=None,
clabel=None,
map_limits=None,
logtrans=False,
title_standalone=False,
cbar_standalone=False,
cbar_extend="max",
cbar_orientation="horizontal",
cbar_ticklabel_rotation=None,
cbar_ticklabel_offset=0,
cbar_ticklabel_stride=1,
draw_gridlines=True,
title_x=0.5,
title_y=1.02,
dpi=300,
title_fs=12,
fsS=14,
fsM=16,
fsL=18,
fsScale=1,
):
if title_standalone or cbar_standalone:
outfile = outfile.replace(".png", ".plot.png")
print("plot " + outfile)
if lon is None or lat is None:
if grid is None:
raise ValueError("must pass lon/lat or grid")
lon, lat = grid["lon"], grid["lat"]
n_levels_default = 10
auto_levels = levels is None
fsS *= fsScale
fsM *= fsScale
fsL *= fsScale
fig, ax = plt.subplots()
w_standalone = 0.6 * fig.get_size_inches()[0]
m = setup_map_crclim(
lon,
lat,
ax=ax,
lw_coasts=2,
map_limits=map_limits,
draw_gridlines=draw_gridlines,
)
mlon, mlat = m(lon, lat)
if topo is not None:
# Plot topography
# SR_TMP<
topo_mode = "color"
# SR_TMP>
if topo_mode == "color":
levels_topo = np.arange(0, 4001, 500)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=MaskedArrayFutureWarning)
ax.contourf(mlon, mlat, topo, levels=levels_topo, cmap=cmap_topo)
elif topo_mode == "contour":
levels_topo = np.arange(0, 4001, 1000)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=MaskedArrayFutureWarning)
ax.contour(
mlon, mlat, topo, levels=levels_topo, colors="black", linewidths=0.5
)
else:
raise ValueError("invalid topography plot mode: " + topo_mode)
if auto_levels and logtrans:
# Try to derive levels somewhat smartly
# If it fails, leave it to matplotlib
try:
logmin = np.log10(np.percentile(fld[fld > 0], 1))
logmax = np.log10(np.percentile(fld[fld > 0], 99))
if logmin == logmax:
levels = None
else:
levels = 10 ** np.linspace(logmin, logmax, n_levels_default)
except:
levels = None
if not logtrans:
fld_plt = fld
levels_plt = levels
else:
# Use logarithmic contour levels
# Manual transformation rather than LogNorm() to allow 'extend'
with np.errstate(divide="ignore"):
fld_plt = np.where(fld > 0, np.log10(fld), np.nan)
levels_plt = np.log10(levels) if levels is not None else None
# Plot field
_lvls = n_levels_default if auto_levels else levels_plt
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=MaskedArrayFutureWarning)
p = ax.contourf(mlon, mlat, fld_plt, _lvls, cmap=cmap, extend=cbar_extend)
if levels_plt is None:
# Extract levels that matplotlib computed on its own
levels_plt = | np.asarray(p.levels) | numpy.asarray |
import math
import numpy as np
import pretty_midi
from midi2audio import FluidSynth
from config.load_yaml import configs
# 从轻快到沉重
instruments = ['Music Box', 'Cello', 'Acoustic Grand Piano', 'Pan Flute', 'Viola',
'Electric Piano 1', 'Church Organ', 'Acoustic Guitar (nylon)', ' Fretless Bass']
# instruments = ['Pan Flute', 'Cello', 'Contrabass', 'Viola']
def cluster_to_notes_and_cc(c_label, start):
'''
:param c_label:
:param start:
:return:
'''
# Control Change
cc_value = get_cc_value(c_label, func='logistic')
cc = pretty_midi.ControlChange(number=10, value=cc_value, time=start * configs['packet_time'] / 1e6)
# Note
velocity = get_note_velocity(c_label)
note_off_param = get_note_off_param(c_label)
pitch_max = int(np.max(c_label[:, 1]) / (configs['height'] / (configs['oct_num']*12)))
pitch_min = int(np.min(c_label[:, 1]) / (configs['height'] / (configs['oct_num']*12)))
pitch_mean = int((np.mean(c_label[:, 1])) / (configs['height'] / (configs['oct_num']*12)))
pitch_played = np.array(configs['pitch_major'][pitch_max-pitch_min]) + configs['root_pitch'] + pitch_mean
note_list = []
for pitch in pitch_played:
note = pretty_midi.Note(velocity=velocity, pitch=pitch,
start=start * configs['packet_time'] / 1e6,
end=(start + note_off_param) * configs['packet_time'] / 1e6)
note_list.append(note)
return note_list, cc
def get_cc_value(label, func='logistic'):
'''
x轴坐标和spanning的映射关系
:param label:
:param func:
:return:
'''
mean_label_x = np.mean(label[:, 0])
if func == 'linear':
cc_value = int(mean_label_x * 127 / configs['width'])
elif func == 'logistic':
cc_value = int(128 / (1 + np.exp(-0.03 * (mean_label_x - configs['width']/2))))
else:
raise AttributeError(f'func only has linear and logistic, but got {func}.')
return cc_value
def get_note_velocity(label, mapping='constant'):
'''
听觉频响曲线, 事件数量越多,velocity越大
:param label:
:param mapping: 'constant': 常数
'area': 根据聚类面积大小决定velocity大小
:return:
'''
if mapping == 'area':
obj_area = len(label) # 利用事件数来估计物体的面积
total_area = configs['width'] * configs['height']
obj_ratio = 1.0 * obj_area / total_area
# 以下是一个同时满足 x = 0.001, y = 60以及x = 1, y = 120的对数函数
if obj_ratio < 0.001:
velocity = 0
else:
velocity = int(20 * math.log(obj_ratio, 10) + 120)
elif mapping == 'constant':
velocity = 100
else:
raise AttributeError(f'get wrong mapping method {mapping}')
return velocity
def get_note_off_param(label):
'''
返回音符应当持续的帧数
:param label:
:return:
'''
c_width = np.max(label[:, 0]) - np.min(label[:, 0])
noff_param = 10 * c_width // configs['width'] + 1
return noff_param
def label_list_to_midi(label_list, midi_path):
'''
设计该函数先以可交互方式设计,即仅进行当前帧分析,而不分析后续帧
整体设计思想:先设计好每帧图像的声音映射方案。具体流程为首先判断该帧图像是否需要发声,若不需要直接进入下一帧,若需要则给出声音映射。
:param label_list: 长度为evt_num的数组,且每个元素是三维(x, y, label)
:param midi_path:
:return:
'''
init = True
midi_data = pretty_midi.PrettyMIDI()
cluster_idx_list = []
for i, label in enumerate(label_list):
if label is not None:
cluster_idx = | np.unique(label[:, 2]) | numpy.unique |
# pylint: disable=redefined-outer-name, comparison-with-callable
"""Test helper functions."""
import gzip
import importlib
import logging
import os
import pickle
import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pytest
from _pytest.outcomes import Skipped
from packaging.version import Version
from ..data import InferenceData, from_dict
_log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def eight_schools_params():
"""Share setup for eight schools."""
return {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": | np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]) | numpy.array |
import numpy as np
from scipy.linalg import block_diag
from scipy.stats import norm
import matplotlib.pyplot as plt
import os
#from pyprobml_utils import save_fig
figdir = "../figures";
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
def gauss_plot2d(mu, sigma, plot_options):
plt.scatter(mu[0], mu[1],marker="x", c=plot_options['color'])
plt.plot(*cov_to_pts(sigma)+mu.reshape(2,1), '-o',c=plot_options['color'], markersize=0.1)
def cov_to_pts( cov ):
"""helper function to get covariance interval for plotting, this can likely be included in the utils folder"""
circ = np.linspace( 0, 2*np.pi, 100 )
sf = np.asarray( [ np.cos( circ ), np.sin( circ ) ] )
[u,s,v] = np.linalg.svd( cov )
pmat = u*2.447*np.sqrt(s) # 95% confidence
return np.dot( pmat, sf )
def gauss_soft_condition(pmu, py, A, y):
sy_inv = np.linalg.inv(py['sigma'])
smu_inv = np.linalg.inv(pmu['sigma'])
post = {}
post['sigma'] = np.linalg.inv(smu_inv + A.T.dot(sy_inv).dot(A))
# reshape is needed to assist in + broadcasting
ny = py['mu'].shape[0] # 4
nm = pmu['mu'].shape[0] # 2
post['mu'] = post['sigma'].dot(A.T.dot(sy_inv).dot(y.reshape(ny,1) - py['mu']) +
smu_inv.dot(pmu['mu']).reshape(nm,1))
# these values are unused
model = norm(loc=A.dot(pmu['mu']) + py['mu'], scale=py['sigma'] + A.dot(pmu['sigma']).dot(A.T))
log_evidence = norm.pdf(y)
return post, log_evidence
def sensor_fusion():
sigmas = [0.01 * np.eye(2), 0.01*np.eye(2)]
helper(sigmas)
save_fig("demoGaussBayes2dEqualSpherical.pdf")
plt.show()
sigmas = [ 0.05*np.eye(2), 0.01*np.eye(2) ]
helper(sigmas)
save_fig("demoGaussBayes2dUnequalSpherical.pdf")
plt.show()
sigmas = [0.01*np.array([[10, 1], [1, 1]]), 0.01* | np.array([[1, 1], [1, 10]]) | numpy.array |
""" Plot data split by compartments
Classes:
* :py:class:`CompartmentPlot`: compartment plotting tool
"""
# Standard lib
from typing import Tuple, Optional, Dict
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Our own imports
from .styling import set_plot_style
from .utils import bootstrap_ci, get_histogram
# Classes
class CompartmentPlot(object):
""" Plot data split by multiple compartments
:param int n_compartments:
How many compartments to split the data into
:param int topk:
How many samples to take from each compartment
"""
def __init__(self,
n_compartments: int,
topk: Optional[int] = None,
figsize: Tuple[int] = (8, 8),
plot_style: str = 'dark',
suffix: str = '.png'):
self.n_compartments = n_compartments
self.topk = topk
self.figsize = figsize
self.plot_style = plot_style
self.suffix = suffix
# Color palettes for the different compartments
self.colors = (['blue', 'orange', 'green', 'red', 'purple', 'grey'])[:n_compartments]
self.palletes = [sns.color_palette(c.capitalize()+'s', n_colors=10)
for c in self.colors]
# Calculated values
self._bin_indices = None
self._bin_values = None
self._xdata = None
self._xcolumn = None
self._ycolumn = None
self._plotdata = None
self._distdata = None
self._total_count = None
def calc_indices(self, values: np.ndarray):
""" Calculate the indicies for each bin
:param ndarray values:
The values to use to generate the bins
"""
if self.topk is None:
self.topk = values.shape[0] // self.n_compartments
if values.shape[0] < self.topk * self.n_compartments:
err = 'Got too few values for {} samples of {} compartments: {}'
err = err.format(self.topk, self.n_compartments, values.shape[0])
raise ValueError(err)
print(f'Spliting into {self.n_compartments} compartments of {self.topk} samples each')
# Sort all the indices
indices = | np.argsort(values) | numpy.argsort |
import time, os, math, inspect, subprocess
import numpy as np
import multiprocessing as mp
import json
import warnings
from copy import deepcopy
import pickle as pickle
from scipy.linalg import solve_triangular
from typing import List
from flare.struc import Structure
from flare.env import AtomicEnvironment
from flare.gp import GaussianProcess
from flare.gp_algebra import partition_c
from flare.gp_algebra import en_kern_vec_unit as en_kern_vec
from flare.kernels.utils import from_mask_to_args, str_to_kernel_set
from flare.cutoffs import quadratic_cutoff
from flare.util import Z_to_element
from flare.mgp.utils import get_bonds, get_triplets, get_triplets_en, \
get_2bkernel, get_3bkernel
from flare.mgp.splines_methods import PCASplines, CubicSpline
from flare.util import Z_to_element, NumpyEncoder
class MappedGaussianProcess:
'''
Build Mapped Gaussian Process (MGP)
and automatically save coefficients for LAMMPS pair style.
:param: struc_params : Parameters for a dummy structure which will be
internally used to probe/store forces associated with different atomic
configurations
:param: grid_params : Parameters for the mapping itself, such as
grid size of spline fit, etc.
:param: mean_only : if True: only build mapping for mean (force)
:param: container_only : if True: only build splines container
(with no coefficients)
:param: GP: None or a GaussianProcess object. If a GP is input,
and autorun is true, automatically build a mapping corresponding
to the GaussianProcess.
:param: lmp_file_name : LAMMPS coefficient file name
:param: autorun: Attempt to build map immediately
Examples:
>>> struc_params = {'species': [0, 1],
'cube_lat': cell, # should input the cell matrix
'mass_dict': {'0': 27 * unit, '1': 16 * unit}}
>>> grid_params = {'bounds_2': [[1.2], [3.5]],
# [[lower_bound], [upper_bound]]
# These describe the lower and upper
# bounds used to specify the 2-body spline
# fits.
'bounds_3': [[1.2, 1.2, 1.2], [3.5, 3.5, 3.5]],
# [[lower,lower,lower],[upper,upper,upper]]
# Values describe lower and upper bounds
# for the bondlength-bondlength-bondlength
# grid used to construct and fit 3-body
# kernels; note that for force MGPs
# bondlength-bondlength-costheta
# are the bounds used instead.
'bodies': [2, 3] # use 2+3 body
'grid_num_2': 64,# Fidelity of the grid
'grid_num_3': [16, 16, 16],# Fidelity of the grid
'svd_rank_2': 64, #Fidelity of uncertainty estimation
'svd_rank_3': 16**3,
'update': True, # if True: accelerating grids
# generating by saving intermediate
# coeff when generating grids
'load_grid': None # Used to load from file
}
'''
def __init__(self,
grid_params: dict,
struc_params: dict,
GP: GaussianProcess=None,
mean_only: bool=False,
container_only: bool=True,
lmp_file_name: str='lmp.mgp',
n_cpus: int =None,
n_sample:int =100,
autorun: bool = True):
# load all arguments as attributes
self.mean_only = mean_only
self.lmp_file_name = lmp_file_name
self.n_cpus = n_cpus
self.n_sample = n_sample
self.grid_params = grid_params
self.struc_params = struc_params
# arg_dict = inspect.getargvalues(inspect.currentframe())[3]
# del arg_dict['self'], arg_dict['GP']
# self.__dict__.update(arg_dict)
self.__dict__.update(grid_params)
# if GP exists, the GP setup overrides the grid_params setup
if GP is not None:
self.cutoffs = GP.cutoffs
self.bodies = []
if "two" in GP.kernel_name:
self.bodies.append(2)
self.kernel2b_info = get_2bkernel(GP)
if "three" in GP.kernel_name:
self.bodies.append(3)
self.kernel3b_info = get_3bkernel(GP)
self.build_bond_struc(struc_params)
self.maps_2 = []
self.maps_3 = []
self.build_map_container()
self.mean_only = mean_only
if not container_only and (GP is not None) and \
(len(GP.training_data) > 0) and autorun:
self.build_map(GP)
def build_map_container(self):
'''
construct an empty spline container without coefficients.
'''
if 2 in self.bodies:
for b_struc in self.bond_struc[0]:
map_2 = Map2body(self.grid_num_2, self.bounds_2,
b_struc, self.svd_rank_2,
self.mean_only, self.n_cpus, self.n_sample)
self.maps_2.append(map_2)
if 3 in self.bodies:
for b_struc in self.bond_struc[1]:
map_3 = Map3body(self.grid_num_3, self.bounds_3,
b_struc, self.svd_rank_3,
self.mean_only,
self.grid_params['load_grid'],
self.update, self.n_cpus, self.n_sample)
self.maps_3.append(map_3)
def build_map(self, GP):
'''
generate/load grids and get spline coefficients
'''
if 2 in self.bodies:
self.kernel2b_info = get_2bkernel(GP)
if 3 in self.bodies:
self.kernel3b_info = get_3bkernel(GP)
for map_2 in self.maps_2:
map_2.build_map(GP)
for map_3 in self.maps_3:
map_3.build_map(GP)
# write to lammps pair style coefficient file
self.write_lmp_file(self.lmp_file_name)
def build_bond_struc(self, struc_params):
'''
build a bond structure, used in grid generating
'''
cutoff = np.min(self.cutoffs)
cell = struc_params['cube_lat']
mass_dict = struc_params['mass_dict']
species_list = struc_params['species']
N_spc = len(species_list)
# 2 body (2 atoms (1 bond) config)
bond_struc_2 = []
spc_2 = []
spc_2_set = []
if 2 in self.bodies:
bodies = 2
for spc1_ind, spc1 in enumerate(species_list):
for spc2 in species_list[spc1_ind:]:
species = [spc1, spc2]
spc_2.append(species)
spc_2_set.append(set(species))
positions = [[(i+1)/(bodies+1)*cutoff, 0, 0]
for i in range(bodies)]
spc_struc = \
Structure(cell, species, positions, mass_dict)
spc_struc.coded_species = np.array(species)
bond_struc_2.append(spc_struc)
# 3 body (3 atoms (1 triplet) config)
bond_struc_3 = []
spc_3 = []
if 3 in self.bodies:
bodies = 3
for spc1_ind in range(N_spc):
spc1 = species_list[spc1_ind]
for spc2_ind in range(N_spc): # (spc1_ind, N_spc):
spc2 = species_list[spc2_ind]
for spc3_ind in range(N_spc): # (spc2_ind, N_spc):
spc3 = species_list[spc3_ind]
species = [spc1, spc2, spc3]
spc_3.append(species)
positions = [[(i+1)/(bodies+1)*cutoff, 0, 0]
for i in range(bodies)]
spc_struc = Structure(cell, species, positions,
mass_dict)
spc_struc.coded_species = np.array(species)
bond_struc_3.append(spc_struc)
# if spc1 != spc2:
# species = [spc2, spc3, spc1]
# spc_3.append(species)
# positions = [[(i+1)/(bodies+1)*cutoff, 0, 0] \
# for i in range(bodies)]
# spc_struc = Structure(cell, species, positions,
# mass_dict)
# spc_struc.coded_species = np.array(species)
# bond_struc_3.append(spc_struc)
# if spc2 != spc3:
# species = [spc3, spc1, spc2]
# spc_3.append(species)
# positions = [[(i+1)/(bodies+1)*cutoff, 0, 0] \
# for i in range(bodies)]
# spc_struc = Structure(cell, species, positions,
# mass_dict)
# spc_struc.coded_species = np.array(species)
# bond_struc_3.append(spc_struc)
self.bond_struc = [bond_struc_2, bond_struc_3]
self.spcs = [spc_2, spc_3]
self.spcs_set = [spc_2_set, spc_3]
def predict(self, atom_env: AtomicEnvironment, mean_only: bool=False)-> \
(float, 'ndarray','ndarray', float):
'''
predict force, variance, stress and local energy for given atomic environment
Args:
atom_env: atomic environment (with a center atom and its neighbors)
mean_only: if True: only predict force (variance is always 0)
Return:
force: 3d array of atomic force
variance: 3d array of the predictive variance
stress: 6d array of the virial stress
energy: the local energy (atomic energy)
'''
if self.mean_only: # if not build mapping for var
mean_only = True
# ---------------- predict for two body -------------------
f2 = vir2 = kern2 = v2 = e2 = 0
if 2 in self.bodies:
f2, vir2, kern2, v2, e2 = \
self.predict_multicomponent(2, atom_env, self.kernel2b_info,
self.spcs_set[0],
self.maps_2, mean_only)
# ---------------- predict for three body -------------------
f3 = vir3 = kern3 = v3 = e3 = 0
if 3 in self.bodies:
f3, vir3, kern3, v3, e3 = \
self.predict_multicomponent(3, atom_env, self.kernel3b_info,
self.spcs[1], self.maps_3,
mean_only)
force = f2 + f3
variance = kern2 + kern3 - | np.sum((v2 + v3)**2, axis=0) | numpy.sum |
"""
legacyhalos.awesome
===============
Miscellaneous code pertaining to the project of DESI Awesome Cluster Finder.
- Last update: 2021-08-25
"""
import os, time, shutil, pdb
import numpy as np
import astropy
import fitsio
import legacyhalos.io
ZCOLUMN = 'Z_BEST'
RACOLUMN = 'RA'
DECCOLUMN = 'DEC'
DIAMCOLUMN = 'RADIUS_MOSAIC' # [radius, arcsec]
GALAXYCOLUMN = 'ID_S16A'
REFIDCOLUMN = 'ID_S16A'
RADIUS_CLUSTER_KPC = 250.0 # default cluster radius
RADIUS_CLUSTER_LOWZ_KPC = 150.0 # default cluster radius
SBTHRESH = [23.0, 24.0, 25.0, 26.0] # surface brightness thresholds
APERTURES = [0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0] # multiples of MAJORAXIS
def get_galaxy_galaxydir(cat, datadir=None, htmldir=None, html=False):
"""Retrieve the galaxy name and the (nested) directory.
"""
import astropy
#import healpy as hp
from legacyhalos.misc import radec2pix
#nside = 8 # keep hard-coded
if datadir is None:
datadir = legacyhalos.io.legacyhalos_data_dir()
if htmldir is None:
htmldir = legacyhalos.io.legacyhalos_html_dir()
if not 'ID_S16A' in cat.colnames:
# need to handle the lowz catalog
print('Missing ID_S16A and NAME in catalog!')
raise ValueError()
if type(cat) is astropy.table.row.Row:
ngal = 1
galaxy = ['{:017d}'.format(cat[GALAXYCOLUMN])]
subdir = [galaxy[0][:4]]
#pixnum = [radec2pix(nside, cat[RACOLUMN], cat[DECCOLUMN])]
else:
ngal = len(cat)
galaxy = np.array(['{:017d}'.format(gid) for gid in cat[GALAXYCOLUMN]])
subdir = [gal[:4] for gal in galaxy]
#pixnum = radec2pix(nside, cat[RACOLUMN], cat[DECCOLUMN]).data
galaxydir = np.array([os.path.join(datadir, sdir, gal) for sdir, gal in zip(subdir, galaxy)])
#galaxydir = np.array([os.path.join(datadir, '{}'.format(nside), '{}'.format(pix), gal)
# for pix, gal in zip(pixnum, gal)])
if html:
htmlgalaxydir = np.array([os.path.join(htmldir, sdir, gal) for sdir, gal in zip(subdir, galaxy)])
#htmlgalaxydir = np.array([os.path.join(htmldir, '{}'.format(nside), '{}'.format(pix), gal)
# for pix, gal in zip(pixnum, galaxy)])
if ngal == 1:
galaxy = galaxy[0]
galaxydir = galaxydir[0]
if html:
htmlgalaxydir = htmlgalaxydir[0]
if html:
return galaxy, galaxydir, htmlgalaxydir
else:
return galaxy, galaxydir
def missing_files(args, sample, size=1, clobber_overwrite=None):
from astrometry.util.multiproc import multiproc
from legacyhalos.io import _missing_files_one
dependson = None
galaxy, galaxydir = get_galaxy_galaxydir(sample)
if args.coadds:
suffix = 'coadds'
filesuffix = '-custom-coadds.isdone'
elif args.pipeline_coadds:
suffix = 'pipeline-coadds'
if args.just_coadds:
filesuffix = '-pipeline-image-grz.jpg'
else:
filesuffix = '-pipeline-coadds.isdone'
elif args.ellipse:
suffix = 'ellipse'
filesuffix = '-custom-ellipse.isdone'
dependson = '-custom-coadds.isdone'
elif args.build_catalog:
suffix = 'build-catalog'
filesuffix = '-custom-ellipse.isdone'
#dependson = '-custom-ellipse.isdone'
elif args.htmlplots:
suffix = 'html'
if args.just_coadds:
filesuffix = '-custom-montage-grz.png'
else:
filesuffix = '-ccdpos.png'
#filesuffix = '-custom-maskbits.png'
galaxy, _, galaxydir = get_galaxy_galaxydir(sample, htmldir=args.htmldir, html=True)
elif args.htmlindex:
suffix = 'htmlindex'
filesuffix = '-custom-montage-grz.png'
galaxy, _, galaxydir = get_galaxy_galaxydir(sample, htmldir=args.htmldir, html=True)
else:
print('Nothing to do.')
return
# Make clobber=False for build_SGA and htmlindex because we're not making
# the files here, we're just looking for them. The argument args.clobber
# gets used downstream.
if args.htmlindex or args.build_catalog:
clobber = False
else:
clobber = args.clobber
if clobber_overwrite is not None:
clobber = clobber_overwrite
if type(sample) is astropy.table.row.Row:
ngal = 1
else:
ngal = len(sample)
indices = np.arange(ngal)
mp = multiproc(nthreads=args.nproc)
missargs = []
for gal, gdir in zip(np.atleast_1d(galaxy), np.atleast_1d(galaxydir)):
#missargs.append([gal, gdir, filesuffix, dependson, clobber])
checkfile = os.path.join(gdir, '{}{}'.format(gal, filesuffix))
if dependson:
missargs.append([checkfile, os.path.join(gdir, '{}{}'.format(gal, dependson)), clobber])
else:
missargs.append([checkfile, None, clobber])
todo = np.array(mp.map(_missing_files_one, missargs))
itodo = np.where(todo == 'todo')[0]
idone = np.where(todo == 'done')[0]
ifail = np.where(todo == 'fail')[0]
if len(ifail) > 0:
fail_indices = [indices[ifail]]
else:
fail_indices = [np.array([])]
if len(idone) > 0:
done_indices = [indices[idone]]
else:
done_indices = [np.array([])]
if len(itodo) > 0:
_todo_indices = indices[itodo]
todo_indices = np.array_split(_todo_indices, size) # unweighted
## Assign the sample to ranks to make the D25 distribution per rank ~flat.
#
## https://stackoverflow.com/questions/33555496/split-array-into-equally-weighted-chunks-based-on-order
#weight = np.atleast_1d(sample[DIAMCOLUMN])[_todo_indices]
#cumuweight = weight.cumsum() / weight.sum()
#idx = np.searchsorted(cumuweight, np.linspace(0, 1, size, endpoint=False)[1:])
#if len(idx) < size: # can happen in corner cases
# todo_indices = np.array_split(_todo_indices, size) # unweighted
#else:
# todo_indices = np.array_split(_todo_indices, idx) # weighted
else:
todo_indices = [np.array([])]
return suffix, todo_indices, done_indices, fail_indices
def mpi_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--nproc', default=1, type=int, help='number of multiprocessing processes per MPI rank.')
parser.add_argument('--mpi', action='store_true', help='Use MPI parallelism')
parser.add_argument('--first', type=int, help='Index of first object to process.')
parser.add_argument('--last', type=int, help='Index of last object to process.')
parser.add_argument('--galaxylist', type=np.int, nargs='*', default=None, help='List of galaxy names to process.')
parser.add_argument('--coadds', action='store_true', help='Build the custom coadds.')
parser.add_argument('--pipeline-coadds', action='store_true', help='Build the pipelinecoadds.')
parser.add_argument('--just-coadds', action='store_true', help='Just build the coadds and return (using --early-coadds in runbrick.py.')
parser.add_argument('--ellipse', action='store_true', help='Do the ellipse fitting.')
parser.add_argument('--integrate', action='store_true', help='Integrate the surface brightness profiles.')
parser.add_argument('--htmlplots', action='store_true', help='Build the HTML output.')
parser.add_argument('--htmlindex', action='store_true', help='Build HTML index.html page.')
parser.add_argument('--htmlhome', default='index.html', type=str, help='Home page file name (use in tandem with --htmlindex).')
parser.add_argument('--htmldir', type=str, help='Output directory for HTML files.')
parser.add_argument('--pixscale', default=0.262, type=float, help='pixel scale (arcsec/pix).')
parser.add_argument('--unwise', action='store_true', help='Build unWISE coadds or do forced unWISE photometry.')
parser.add_argument('--no-cleanup', action='store_false', dest='cleanup', help='Do not clean up legacypipe files after coadds.')
parser.add_argument('--sky-tests', action='store_true', help='Test choice of sky apertures in ellipse-fitting.')
parser.add_argument('--force', action='store_true', help='Use with --coadds; ignore previous pickle files.')
parser.add_argument('--count', action='store_true', help='Count how many objects are left to analyze and then return.')
parser.add_argument('--debug', action='store_true', help='Log to STDOUT and build debugging plots.')
parser.add_argument('--verbose', action='store_true', help='Enable verbose output.')
parser.add_argument('--clobber', action='store_true', help='Overwrite existing files.')
parser.add_argument('--build-refcat', action='store_true', help='Build the legacypipe reference catalog.')
parser.add_argument('--build-catalog', action='store_true', help='Build the final photometric catalog.')
args = parser.parse_args()
return args
def legacyhsc_cosmology(WMAP=False, Planck=False):
"""Establish the default cosmology for the project."""
if WMAP:
from astropy.cosmology import WMAP9 as cosmo
elif Planck:
from astropy.cosmology import Planck15 as cosmo
else:
from astropy.cosmology import FlatLambdaCDM
#params = dict(H0=70, Om0=0.3, Ob0=0.0457, Tcmb0=2.7255, Neff=3.046)
#sigma8 = 0.82
#ns = 0.96
#cosmo = FlatLambdaCDM(name='FlatLambdaCDM', **params)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, name='FlatLambdaCDM')
return cosmo
def cutout_radius_kpc(redshift, pixscale=None, radius_kpc=RADIUS_CLUSTER_KPC, cosmo=None):
"""Get a cutout radius of RADIUS_KPC [in pixels] at the redshift of the cluster.
"""
if cosmo is None:
cosmo = legacyhsc_cosmology()
arcsec_per_kpc = cosmo.arcsec_per_kpc_proper(redshift).value
radius = radius_kpc * arcsec_per_kpc # [float arcsec]
if pixscale:
radius = np.rint(radius / pixscale).astype(int) # [integer/rounded pixels]
return radius
def get_integrated_filename():
"""Return the name of the file containing the integrated photometry."""
integratedfile = os.path.join(hsc_dir(), 'integrated-flux.fits')
return integratedfile
def read_sample(first=None, last=None, galaxylist=None, verbose=False):
"""Read/generate the parent HSC sample by combining the low-z and intermediate-z
samples.
from astropy.table import Table, Column, vstack
s1 = Table(fitsio.read('low-z-shape-for-john.fits', upper=True))
s2 = Table(fitsio.read('s16a_massive_z_0.5_logm_11.4_decals_full_fdfc_bsm_ell.fits', upper=True))
s1out = s1['NAME', 'RA', 'DEC', 'Z', 'MEAN_E', 'MEAN_PA']
s1out.rename_column('Z', 'Z_BEST')
s1out.add_column(Column(name='ID_S16A', dtype=s2['ID_S16A'].dtype, length=len(s1out)), index=1)
s1out['ID_S16A'] = -1
s2out = s2['ID_S16A', 'RA', 'DEC', 'Z_BEST', 'MEAN_E', 'MEAN_PA']
s2out.add_column(Column(name='NAME', dtype=s1['NAME'].dtype, length=len(s2out)), index=0)
sout = vstack((s1out, s2out))
sout.write('hsc-sample-s16a-lowz.fits', overwrite=True)
"""
cosmo = legacyhsc_cosmology()
# Hack for MUSE proposal
#samplefile = os.path.join(hdir, 's18a_z0.07_0.12_rcmod_18.5_etg_muse_massive_0313.fits')
# intermediate-z sample only
samplefile = os.path.join(legacyhalos.io.legacyhalos_dir(), 's16a_massive_z_0.5_logm_11.4_decals_full_fdfc_bsm_ell.fits')
#samplefile = os.path.join(hdir, 's16a_massive_z_0.5_logm_11.4_dec_30_for_john.fits')
# low-z sample only
#samplefile = os.path.join(hdir, 'low-z-shape-for-john.fits')
# Investigate a subset of galaxies.
#cat1 = fitsio.read(os.path.join(hdir, 'hsc-sample-s16a-lowz.fits'), upper=True)
#cat2 = fitsio.read(os.path.join(hdir, 'DECaLS_negative_gal.fits'), upper=True)
#keep = np.isin(cat1['ID_S16A'], cat2['ID_S16A'])
#fitsio.write(os.path.join(hdir, 'temp-hsc-sample-s16a-lowz.fits'), cat1[keep], clobber=True)
# combined sample (see comment block above)
#if False:
#print('Temporary sample!!')
#samplefile = os.path.join(hdir, 'temp-hsc-sample-s16a-lowz.fits')
#samplefile = os.path.join(hdir, 'hsc-sample-s16a-lowz.fits')
if first and last:
if first > last:
print('Index first cannot be greater than index last, {} > {}'.format(first, last))
raise ValueError()
ext = 1
info = fitsio.FITS(samplefile)
nrows = info[ext].get_nrows()
# select a "test" subset
if False:
nrows = 200
rows = np.arange(nrows)
else:
rows = None
if first is None:
first = 0
if last is None:
last = nrows
if rows is None:
rows = np.arange(first, last)
else:
rows = rows[np.arange(first, last)]
else:
if last >= nrows:
print('Index last cannot be greater than the number of rows, {} >= {}'.format(last, nrows))
raise ValueError()
if rows is None:
rows = np.arange(first, last+1)
else:
rows = rows[np.arange(first, last+1)]
sample = astropy.table.Table(info[ext].read(rows=rows, upper=True))
if verbose:
if len(rows) == 1:
print('Read galaxy index {} from {}'.format(first, samplefile))
else:
print('Read galaxy indices {} through {} (N={}) from {}'.format(
first, last, len(sample), samplefile))
# pre-compute the diameter of the mosaic (=2*RADIUS_CLUSTER_KPC kpc) for each cluster
sample[DIAMCOLUMN] = cutout_radius_kpc(redshift=sample[ZCOLUMN], cosmo=cosmo, # diameter, [arcsec]
radius_kpc=2 * RADIUS_CLUSTER_KPC)
# Add an (internal) index number:
#sample.add_column(astropy.table.Column(name='INDEX', data=rows), index=0)
if galaxylist is not None:
if verbose:
print('Selecting specific galaxies.')
these = np.in1d(sample[GALAXYCOLUMN], galaxylist)
if np.count_nonzero(these) == 0:
print('No matching galaxies!')
return astropy.table.Table()
else:
sample = sample[these]
return sample
def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,
threshmask=0.001, r50mask=0.1, maxshift=5,
sigmamask=3.0, neighborfactor=3.0, verbose=False):
"""Wrapper to mask out all sources except the galaxy we want to ellipse-fit.
r50mask - mask satellites whose r50 radius (arcsec) is > r50mask
threshmask - mask satellites whose flux ratio is > threshmmask relative to
the central galaxy.
"""
import numpy.ma as ma
from copy import copy
from legacyhalos.mge import find_galaxy
from legacyhalos.misc import srcs2image, ellipse_mask
import matplotlib.pyplot as plt
from astropy.visualization import simple_norm
bands, refband = data['bands'], data['refband']
#residual_mask = data['residual_mask']
#nbox = 5
#box = np.arange(nbox)-nbox // 2
#box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2
xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]
# If the row-index of the central galaxy is not provided, use the source
# nearest to the center of the field.
if 'galaxy_indx' in data.keys():
galaxy_indx = np.atleast_1d(data['galaxy_indx'])
else:
galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +
(tractor.by - data['refband_width']/2)**2)])
data['galaxy_indx'] = np.atleast_1d(galaxy_indx)
data['galaxy_id'] = ''
#print('Import hack!')
#norm = simple_norm(img, 'log', min_percent=0.05, clip=True)
#import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm
## Get the PSF sources.
#psfindx = np.where(tractor.type == 'PSF')[0]
#if len(psfindx) > 0:
# psfsrcs = tractor.copy()
# psfsrcs.cut(psfindx)
#else:
# psfsrcs = None
def tractor2mge(indx, factor=1.0):
# Convert a Tractor catalog entry to an MGE object.
class MGEgalaxy(object):
pass
ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])
ba = (1 - ee) / (1 + ee)
pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))
pa = pa % 180
if tractor.shape_r[indx] < 1:
print('Galaxy half-light radius is < 1 arcsec!')
raise ValueError
majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]
mgegalaxy = MGEgalaxy()
mgegalaxy.xmed = tractor.by[indx]
mgegalaxy.ymed = tractor.bx[indx]
mgegalaxy.xpeak = tractor.by[indx]
mgegalaxy.ypeak = tractor.bx[indx]
mgegalaxy.eps = 1 - ba
mgegalaxy.pa = pa
mgegalaxy.theta = (270 - pa) % 180
mgegalaxy.majoraxis = majoraxis
objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True
mgegalaxy.majoraxis,
mgegalaxy.majoraxis * (1-mgegalaxy.eps),
np.radians(mgegalaxy.theta-90), xobj, yobj)
return mgegalaxy, objmask
# Now, loop through each 'galaxy_indx' from bright to faint.
data['mge'] = []
for ii, central in enumerate(galaxy_indx):
print('Determing the geometry for galaxy {}/{}.'.format(
ii+1, len(galaxy_indx)))
# [1] Determine the non-parametricc geometry of the galaxy of interest
# in the reference band. First, subtract all models except the galaxy
# and galaxies "near" it. Also restore the original pixels of the
# central in case there was a poor deblend.
largeshift = False
mge, centralmask = tractor2mge(central, factor=neighborfactor)
#plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('junk-mask.png') ; pdb.set_trace()
iclose = np.where([centralmask[np.int(by), np.int(bx)]
for by, bx in zip(tractor.by, tractor.bx)])[0]
srcs = tractor.copy()
srcs.cut(np.delete(np.arange(len(tractor)), iclose))
model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],
band=refband.lower(),
pixelized_psf=data['{}_psf'.format(refband.lower())])
img = data[refband].data - model
img[centralmask] = data[refband].data[centralmask]
mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])
#mask = np.logical_or(data[refband].mask, data['residual_mask'])
mask[centralmask] = False
img = ma.masked_array(img, mask)
ma.set_fill_value(img, fill_value)
mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('debug.png')
#plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')
##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')
#pdb.set_trace()
# Did the galaxy position move? If so, revert back to the Tractor geometry.
if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:
print('Large centroid shift! (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(
mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))
largeshift = True
mgegalaxy = copy(mge)
radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(
mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals
radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(
mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals
mge = {
'largeshift': largeshift,
'ra': tractor.ra[central], 'dec': tractor.dec[central],
'bx': tractor.bx[central], 'by': tractor.by[central],
#'mw_transmission_g': tractor.mw_transmission_g[central],
#'mw_transmission_r': tractor.mw_transmission_r[central],
#'mw_transmission_z': tractor.mw_transmission_z[central],
'ra_moment': radec_med[0], 'dec_moment': radec_med[1],
#'ra_peak': radec_med[0], 'dec_peak': radec_med[1]
}
for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):
mge[key] = np.float32(getattr(mgegalaxy, key))
if key == 'pa': # put into range [0-180]
mge[key] = mge[key] % np.float32(180)
data['mge'].append(mge)
#if False:
# #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')
# plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)
# plt.savefig('/mnt/legacyhalos-data/debug.png')
# [2] Create the satellite mask in all the bandpasses. Use srcs here,
# which has had the satellites nearest to the central galaxy trimmed
# out.
print('Building the satellite mask.')
satmask = np.zeros(data[refband].shape, bool)
for filt in bands:
cenflux = getattr(tractor, 'flux_{}'.format(filt))[central]
satflux = getattr(srcs, 'flux_{}'.format(filt))
if cenflux <= 0.0:
raise ValueError('Central galaxy flux is negative!')
satindx = np.where(np.logical_or(
(srcs.type != 'PSF') * (srcs.shape_r > r50mask) *
(satflux > 0.0) * ((satflux / cenflux) > threshmask),
srcs.ref_cat == 'R1'))[0]
#if np.isin(central, satindx):
# satindx = satindx[np.logical_not(np.isin(satindx, central))]
if len(satindx) == 0:
print('Warning! All satellites have been dropped from band {}!'.format(filt))
else:
satsrcs = srcs.copy()
#satsrcs = tractor.copy()
satsrcs.cut(satindx)
satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt)],
band=filt.lower(),
pixelized_psf=data['{}_psf'.format(filt)])
thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]
satmask = np.logical_or(satmask, thissatmask)
#if True:
# import matplotlib.pyplot as plt
# plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')
# #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')
# pdb.set_trace()
# [3] Build the final image (in each filter) for ellipse-fitting. First,
# subtract out the PSF sources. Then update the mask (but ignore the
# residual mask). Finally convert to surface brightness.
for filt in bands:
mask = np.logical_or(ma.getmask(data[filt]), satmask)
mask[centralmask] = False
#plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')
varkey = '{}_var'.format(filt)
imagekey = '{}_masked'.format(filt)
psfimgkey = '{}_psfimg'.format(filt)
thispixscale = filt2pixscale[filt]
if imagekey not in data.keys():
data[imagekey], data[varkey], data[psfimgkey] = [], [], []
img = ma.getdata(data[filt]).copy()
# Get the PSF sources.
psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]
if len(psfindx) > 0:
psfsrcs = tractor.copy()
psfsrcs.cut(psfindx)
else:
psfsrcs = None
if psfsrcs:
psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],
band=filt.lower(),
pixelized_psf=data['{}_psf'.format(filt.lower())])
if False:
#import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)
im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)
im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)
im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)
plt.savefig('qa-psf-{}.png'.format(filt.lower()))
#if filt == 'W4':# or filt == 'r':
# pdb.set_trace()
img -= psfimg
else:
psfimg = np.zeros((2, 2), 'f4')
data[psfimgkey].append(psfimg)
img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]
var = data['{}_var_'.format(filt)] / thispixscale**4 # [nanomaggies**2/arcsec**4]
# Fill with zeros, for fun--
ma.set_fill_value(img, fill_value)
#if ii == 1 and filt == 'r': #filt == 'W1' or
# plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('junk-img-{}.png'.format(filt.lower()))
# plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask-{}.png'.format(filt.lower()))
### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))
# pdb.set_trace()
data[imagekey].append(img)
data[varkey].append(var)
#test = data['r_masked'][0]
#plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')
#pdb.set_trace()
# Cleanup?
for filt in bands:
del data[filt]
del data['{}_var_'.format(filt)]
return data
def read_multiband(galaxy, galaxydir, galaxy_id, filesuffix='custom',
refband='r', bands=['g', 'r', 'z'], pixscale=0.262,
redshift=None, galex=False, unwise=False,
fill_value=0.0, sky_tests=False, verbose=False):
"""Read the multi-band images (converted to surface brightness) and create a
masked array suitable for ellipse-fitting.
"""
import fitsio
from astropy.table import Table
from astrometry.util.fits import fits_table
from legacypipe.bits import MASKBITS
from legacyhalos.io import _get_psfsize_and_depth, _read_image_data
#galaxy_id = np.atleast_1d(galaxy_id)
#if len(galaxy_id) > 1:
# raise ValueError('galaxy_id in read_multiband cannot be a >1-element vector for now!')
#galaxy_id = galaxy_id[0]
#assert(np.isscalar(galaxy_id))
# Dictionary mapping between optical filter and filename coded up in
# coadds.py, galex.py, and unwise.py, which depends on the project.
data, filt2imfile, filt2pixscale = {}, {}, {}
for band in bands:
filt2imfile.update({band: {'image': '{}-image'.format(filesuffix),
'model': '{}-model'.format(filesuffix),
'invvar': '{}-invvar'.format(filesuffix),
'psf': '{}-psf'.format(filesuffix),
}})
filt2pixscale.update({band: pixscale})
filt2imfile.update({'tractor': '{}-tractor'.format(filesuffix),
'sample': 'sample',
'maskbits': '{}-maskbits'.format(filesuffix),
})
# Do all the files exist? If not, bail!
missing_data = False
for filt in bands:
for ii, imtype in enumerate(filt2imfile[filt].keys()):
#if imtype == 'sky': # this is a dictionary entry
# continue
imfile = os.path.join(galaxydir, '{}-{}-{}.fits.fz'.format(galaxy, filt2imfile[filt][imtype], filt))
#print(imtype, imfile)
if os.path.isfile(imfile):
filt2imfile[filt][imtype] = imfile
else:
if verbose:
print('File {} not found.'.format(imfile))
missing_data = True
break
data['failed'] = False # be optimistic!
data['missingdata'] = False
data['filesuffix'] = filesuffix
if missing_data:
data['missingdata'] = True
return data, None
# Pack some preliminary info into the output dictionary.
data['bands'] = bands
data['refband'] = refband
data['refpixscale'] = np.float32(pixscale)
# We ~have~ to read the tractor catalog using fits_table because we will
# turn these catalog entries into Tractor sources later.
tractorfile = os.path.join(galaxydir, '{}-{}.fits'.format(galaxy, filt2imfile['tractor']))
if verbose:
print('Reading {}'.format(tractorfile))
cols = ['ra', 'dec', 'bx', 'by', 'type', 'ref_cat', 'ref_id',
'sersic', 'shape_r', 'shape_e1', 'shape_e2',
'flux_g', 'flux_r', 'flux_z',
'flux_ivar_g', 'flux_ivar_r', 'flux_ivar_z',
'nobs_g', 'nobs_r', 'nobs_z',
#'mw_transmission_g', 'mw_transmission_r', 'mw_transmission_z',
'psfdepth_g', 'psfdepth_r', 'psfdepth_z',
'psfsize_g', 'psfsize_r', 'psfsize_z']
#if galex:
# cols = cols+['flux_fuv', 'flux_nuv']
#if unwise:
# cols = cols+['flux_w1', 'flux_w1', 'flux_w1', 'flux_w1']
tractor = fits_table(tractorfile, columns=cols)
hdr = fitsio.read_header(tractorfile)
if verbose:
print('Read {} sources from {}'.format(len(tractor), tractorfile))
data.update(_get_psfsize_and_depth(tractor, bands, pixscale, incenter=False))
# Read the maskbits image and build the starmask.
maskbitsfile = os.path.join(galaxydir, '{}-{}.fits.fz'.format(galaxy, filt2imfile['maskbits']))
if verbose:
print('Reading {}'.format(maskbitsfile))
maskbits = fitsio.read(maskbitsfile)
# initialize the mask using the maskbits image
starmask = ( (maskbits & MASKBITS['BRIGHT'] != 0) | (maskbits & MASKBITS['MEDIUM'] != 0) |
(maskbits & MASKBITS['CLUSTER'] != 0) | (maskbits & MASKBITS['ALLMASK_G'] != 0) |
(maskbits & MASKBITS['ALLMASK_R'] != 0) | (maskbits & MASKBITS['ALLMASK_Z'] != 0) )
# Are we doing sky tests? If so, build the dictionary of sky values here.
# subsky - dictionary of additional scalar value to subtract from the imaging,
# per band, e.g., {'g': -0.01, 'r': 0.002, 'z': -0.0001}
if sky_tests:
#imfile = os.path.join(galaxydir, '{}-{}-{}.fits.fz'.format(galaxy, filt2imfile[refband]['image'], refband))
hdr = fitsio.read_header(filt2imfile[refband]['image'], ext=1)
nskyaps = hdr['NSKYANN'] # number of annuli
# Add a list of dictionaries to iterate over different sky backgrounds.
data.update({'sky': []})
for isky in np.arange(nskyaps):
subsky = {}
subsky['skysuffix'] = '{}-skytest{:02d}'.format(filesuffix, isky)
for band in bands:
refskymed = hdr['{}SKYMD00'.format(band.upper())]
skymed = hdr['{}SKYMD{:02d}'.format(band.upper(), isky)]
subsky[band] = refskymed - skymed # *add* the new correction
print(subsky)
data['sky'].append(subsky)
# Read the basic imaging data and masks.
data = _read_image_data(data, filt2imfile, starmask=starmask,
filt2pixscale=filt2pixscale,
fill_value=fill_value, verbose=verbose)
# Find the galaxies of interest.
samplefile = os.path.join(galaxydir, '{}-{}.fits'.format(galaxy, filt2imfile['sample']))
sample = Table(fitsio.read(samplefile))
print('Read {} sources from {}'.format(len(sample), samplefile))
# keep all objects
galaxy_indx = []
galaxy_indx = np.hstack([np.where(sid == tractor.ref_id)[0] for sid in sample[REFIDCOLUMN]])
assert(np.all(sample[REFIDCOLUMN] == tractor.ref_id[galaxy_indx]))
#data['galaxy_indx'] = []
#data['galaxy_id'] = []
#for galid in np.atleast_1d(galaxy_id):
# galindx = np.where((tractor.ref_cat == 'R1') * (tractor.ref_id == galid))[0]
# if len(galindx) != 1:
# raise ValueError('Problem finding the central galaxy {} in the tractor catalog!'.format(galid))
# data['galaxy_indx'].append(galindx[0])
# data['galaxy_id'].append(galid)
#
# # Is the flux and/or ivar negative (and therefore perhaps off the
# # footprint?) If so, drop it here.
# for filt in bands:
# cenflux = getattr(tractor, 'flux_{}'.format(filt))[galindx[0]]
# cenivar = getattr(tractor, 'flux_ivar_{}'.format(filt))[galindx[0]]
# if cenflux <= 0.0 or cenivar <= 0.0:
# print('Central galaxy flux is negative. Off footprint or gap in coverage?')
# data['failed'] = True
# return data, []
# Do we need to take into account the elliptical mask of each source??
srt = np.argsort(tractor.flux_r[galaxy_indx])[::-1]
galaxy_indx = galaxy_indx[srt]
print('Sort by flux! ', tractor.flux_r[galaxy_indx])
galaxy_id = tractor.ref_id[galaxy_indx]
data['galaxy_id'] = galaxy_id
data['galaxy_indx'] = galaxy_indx
# Now build the multiband mask.
data = _build_multiband_mask(data, tractor, filt2pixscale,
fill_value=fill_value,
verbose=verbose)
# Gather some additional info that we want propagated to the output ellipse
# catalogs.
if redshift:
allgalaxyinfo = []
for galaxy_id, galaxy_indx in zip(data['galaxy_id'], data['galaxy_indx']):
galaxyinfo = { # (value, units) tuple for the FITS header
'id_cent': (galaxy_id, ''),
'redshift': (redshift, '')}
allgalaxyinfo.append(galaxyinfo)
else:
allgalaxyinfo = None
return data, allgalaxyinfo
def call_ellipse(onegal, galaxy, galaxydir, pixscale=0.262, nproc=1,
filesuffix='custom', bands=['g', 'r', 'z'], refband='r',
input_ellipse=None,
sky_tests=False, unwise=False, verbose=False,
clobber=False, debug=False, logfile=None):
"""Wrapper on legacyhalos.mpi.call_ellipse but with specific preparatory work
and hooks for the legacyhalos project.
"""
import astropy.table
from copy import deepcopy
from legacyhalos.mpi import call_ellipse as mpi_call_ellipse
if type(onegal) == astropy.table.Table:
onegal = onegal[0] # create a Row object
galaxy_id = onegal[GALAXYCOLUMN]
if logfile:
from contextlib import redirect_stdout, redirect_stderr
with open(logfile, 'a') as log:
with redirect_stdout(log), redirect_stderr(log):
data, galaxyinfo = read_multiband(galaxy, galaxydir, galaxy_id, bands=bands,
filesuffix=filesuffix, refband=refband,
pixscale=pixscale, redshift=onegal[ZCOLUMN],
sky_tests=sky_tests, verbose=verbose)
else:
data, galaxyinfo = read_multiband(galaxy, galaxydir, galaxy_id, bands=bands,
filesuffix=filesuffix, refband=refband,
pixscale=pixscale, redshift=onegal[ZCOLUMN],
sky_tests=sky_tests, verbose=verbose)
maxsma, delta_logsma = None, 4
#maxsma, delta_logsma = 200, 10
if sky_tests:
from legacyhalos.mpi import _done
def _wrap_call_ellipse():
skydata = deepcopy(data) # necessary?
for isky in np.arange(len(data['sky'])):
skydata['filesuffix'] = data['sky'][isky]['skysuffix']
for band in bands:
# We want to *add* this delta-sky because it is defined as
# sky_annulus_0 - sky_annulus_isky
delta_sky = data['sky'][isky][band]
print(' Adjusting {}-band sky backgroud by {:4g} nanomaggies.'.format(band, delta_sky))
for igal in np.arange(len( | np.atleast_1d(data['galaxy_indx']) | numpy.atleast_1d |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
=================
desiutil.funcfits
=================
Module for fitting simple functions to 1D arrays
<NAME>, UC Santa Cruz
Fall 2015
"""
from __future__ import (print_function, absolute_import, division,
unicode_literals)
import numpy as np
import copy
import warnings
def func_fit(x, y, func, deg, xmin=None, xmax=None, w=None, **kwargs):
"""Simple function fit to 2 arrays.
Modified code originally from Ryan Cooke (PYPIT).
Parameters
----------
x : :class:`~numpy.ndarray`
Independent data values.
y : :class:`~numpy.ndarray`
Dependent data to fit.
func : :class:`str`
Name of the fitting function: polynomial, legendre, chebyshev.
deg : :class:`int` or :class:`dict`
Order of the fit.
xmin : :class:`float`, optional
Minimum value in the array (or the left limit for a
legendre/chebyshev polynomial).
xmax : :class:`float`, optional
Maximum value in the array (or the left limit for a
legendre/chebyshev polynomial).
w : :class:`~numpy.ndarray`, optional
Weights to be used in the fitting (weights = 1/sigma).
Returns
-------
:class:`dict`
Dictionary describing the Fit including the coefficients.
"""
# Normalize
if xmin is None or xmax is None:
if x.size == 1:
xmin, xmax = -1.0, 1.0
else:
xmin, xmax = x.min(), x.max()
xv = 2.0 * (x-xmin)/(xmax-xmin) - 1.0
# Fit
fitters = {'polynomial': np.polynomial.polynomial.polyfit,
'legendre': np.polynomial.legendre.legfit,
'chebyshev': np.polynomial.chebyshev.chebfit}
try:
fit = fitters[func](xv, y, deg, w=w)
except KeyError:
raise ValueError("Fitting function '{0:s}' is not implemented yet.".format(func))
# Finish
fit_dict = dict(coeff=fit, order=deg, func=func, xmin=xmin, xmax=xmax,
**kwargs)
return fit_dict
def func_val(x, fit_dict):
"""Get values from a fit_dict.
Modified code originally from Ryan Cooke (PYPIT).
Parameters
----------
x : :class:`~numpy.ndarray`
Evaluate the fit at these coordinates.
Returns
-------
:class:`~numpy.ndarray`
Array containing the values.
"""
xv = 2.0 * (x-fit_dict['xmin'])/(fit_dict['xmax']-fit_dict['xmin']) - 1.0
values = {'polynomial': np.polynomial.polynomial.polyval,
'legendre': np.polynomial.legendre.legval,
'chebyshev': np.polynomial.chebyshev.chebval}
try:
val = values[fit_dict['func']](xv, fit_dict['coeff'])
except KeyError:
raise ValueError("Fitting function '{0:s}' is not implemented yet.".format(fit_dict['func']))
return val
def iter_fit(xarray, yarray, func, order, weights=None, sigma=None,
max_rej=None, maxone=True, sig_rej=3.0, initialmask=None,
forceimask=False, xmin=None, xmax=None, niter=999, **kwargs):
"""A "robust" fit with iterative rejection is performed to the
`xarray`, `yarray` pairs.
Modified code originally from Ryan Cooke (PYPIT).
Parameters
----------
xarray : :class:`~numpy.ndarray`
Independent variable values.
yarray : :class:`~numpy.ndarray`
Dependent variable values.
func : :class:`str`
Name of the fitting function: polynomial, legendre, chebyshev.
order : :class:`int`
The order of the function to be used in the fitting.
sigma : :class:`~numpy.ndarray`, optional
Error in the yvalues. Used only for rejection.
weights : :class:`~numpy.ndarray`, optional
Weights to be used in the fitting (weights = 1/sigma).
maxone : :class:`bool`, optional [True]
If ``True``, only the most deviant point in a given iteration will
be removed.
sig_rej : :class:`float`, optional [3.0]
Confidence interval for rejection.
max_rej : :class:`int`, optional [None]
Maximum number of points to reject.
initialmask : :class:`~numpy.ndarray`
A mask can be supplied as input, these values will be masked for
the first iteration. 1 = value masked.
forceimask : :class:`bool`, optional [False]
If ``True``, the initialmask will be forced for all iterations.
niter : :class:`int`, optional [999]
Maximum number of iterations.
xmin : :class:`float`
Minimum value in the array (or the left limit for a
legendre/chebyshev polynomial).
xmax : :class:`float`
Maximum value in the array (or the right limit for a
legendre/chebyshev polynomial).
Returns
-------
:func:`tuple`
The tuple contains a dict containing the fit and a mask array
containing masked values.
"""
# Setup the initial mask
if initialmask is None:
mask = | np.zeros(xarray.size, dtype=np.int) | numpy.zeros |
# AUTOGENERATED! DO NOT EDIT! File to edit: 03_shape.ipynb (unless otherwise specified).
__all__ = ['getElemetType', 'tria_scheme', 'tetra_scheme', 'getGaussPoints', 'getShapeLine2', 'getShapeLine3',
'getShapeTria3', 'getShapeTria6', 'getShapeQuad4', 'getShapeQuad8', 'getShapeQuad9', 'getShapeTetra4',
'getShapeTetra10', 'getShapeHexa8', 'getShapeHexa20', 'getAllShapeFunctions']
# Cell
import numpy as np
from scipy.special.orthogonal import p_roots as gauss_scheme
np.set_printoptions(precision=4)
# Cell
def getElemetType(elemCoords):
"Determine the element type"
dict = {
"numDim_1": {
"numNodes_2": "Line2",
"numNodes_3": "Line3"
},
"numDim_2": {
"numNodes_3": "Tria3",
"numNodes_4": "Quad4",
"numNodes_6": "Tria6",
"numNodes_8": "Quad8",
"numNodes_9": "Quad9",
},
"numDim_3": {
"numNodes_4": "Tetra4",
"numNodes_8": "Hexa8",
"numNodes_10": "Tetra10",
"numNodes_20": "Hexa20"
},
}
try:
numNodes = elemCoords.shape[0]
numDim = elemCoords.shape[1] if elemCoords.shape[1] else 1
ElemType = dict.get(f"numDim_{numDim}").get(f"numNodes_{numNodes}")
if ElemType:
return ElemType
else:
raise NotImplementedError(
f"No {numDim}D element with {numNodes} nodes is available"
)
except NotImplementedError as error:
print(error)
except IndexError:
print("No valid coordinates array")
except AttributeError:
print("No valid coordinates array")
except TypeError:
print("No valid coordinates array")
# Cell
def tria_scheme(order):
if order is 1:
xi = [[1./3., 1./3.]]
weight = [[1.]]
elif order is 3:
r1 = 1./6.
r2 = 2./3.
w1 = 1./3.
xi = [[r1,r1],[r2,r1],[r1,r2]]
weight = [[w1],[w1],[w1]]
elif order is 4:
r1 = 1./5.
r2 = 3./5.
r3 = 1./3.
w1 = 0.52083333
w2 = 0.52083333
w3 = 0.52083333
w4 = -0.56250000
xi = [[r1,r1],[r2,r1],[r1,r2],[r3,r3]]
weight = [[w1],[w2],[w3],[w4]]
return xi, weight
# Cell
def tetra_scheme(order):
if order is 1:
xi = [[1./4., 1./4., 1./4.]]
weight = [[1.]]
elif order is 4:
r1 = 0.5854102
r2 = 0.1381966
w1 = 1./4.
xi = [[r1,r2,r2],[r2,r1,r2],[r2,r2,r1],[r2,r2,r2]]
weight = [[w1],[w1],[w1],[w1]]
elif order is 5:
r1 = 1./4.
r2 = 1./2.
r3 = 1./6.
w1 = 9./20.
w2 = -4./5.
xi = [[r2,r3,r3],[r3,r2,r3],[r3,r3,r2],[r3,r3,r3],[r1,r1,r1]]
weight = [[w1],[w1],[w1],[w1],[w2]]
return xi, weight
# Cell
def getGaussPoints(elemType, reduced=False):
point = []
weight = []
if "Line" in elemType:
stdOrder = 2 if "2" in elemType else 3
if reduced: stdOrder -= 1
ip, w = gauss_scheme(stdOrder)
point = [[ip[i]] for i in range(stdOrder)]
weight = [[w[i]] for i in range(stdOrder)]
elif "Tria" in elemType:
stdOrder = 1 if "3" in elemType else 4
if stdOrder == 4 and reduced: stdOrder = 3
point, weight = tria_scheme(stdOrder)
elif "Quad" in elemType:
stdOrder = 2 if "4" in elemType else 3
if reduced: stdOrder -= 1
ip, w = gauss_scheme(stdOrder)
point = [[ip[j], ip[i]] for i in range(stdOrder) for j in range(stdOrder)]
weight = [[w[j]*w[i]] for i in range(stdOrder) for j in range(stdOrder)]
elif "Tetra" in elemType:
stdOrder = 1 if "4" in elemType else 5
if stdOrder == 5 and reduced: stdOrder = 4
point, weight = tetra_scheme(stdOrder)
elif "Hexa" in elemType:
stdOrder = 2 if "8" in elemType else 3
if reduced: stdOrder -= 1
ip, w = gauss_scheme(stdOrder)
point = [[ip[k], ip[j], ip[i]] for i in range(stdOrder) for j in range(stdOrder) for k in range(stdOrder)]
weight = [[w[k]*w[j]*w[i]] for i in range(stdOrder) for j in range(stdOrder) for k in range(stdOrder)]
return np.array(point), np.array(weight)
# Cell
def getShapeLine2(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 1:
raise NotImplementedError("1D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
############################################################################################################
# Tuple with xi_a combinatory
xi_comb = [-1,1]
############################################################################################################
# Calculate shape functions
N = np.array([0.5*(1+sign*xi) for sign in xi_comb])
############################################################################################################
# Calculate derivatives of shape functions-> xi
dN = np.array([0.5*sign for sign in xi_comb])
return N, dN
# Cell
def getShapeLine3(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 1:
raise NotImplementedError("1D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
############################################################################################################
# Tuple with xi_a combinatory
xi_comb = [-1,1]
############################################################################################################
# Calculate shape functions
N_lateral = np.array([0.5*item*xi*(1+item*xi) for item in xi_comb])
N_middle = np.array([(1+xi)*(1-xi)])
N = np.hstack((N_lateral[0], N_middle, N_lateral[1]))
############################################################################################################
# Calculate derivatives of shape functions -> xi
dN_lateral = np.array([0.5*item*(1+2.*item*xi) for item in xi_comb])
dN_middle = np.array([-2.*xi])
dN = np.hstack((dN_lateral[0], dN_middle, dN_lateral[1]))
return N, dN
# Cell
def getShapeTria3(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
L1 = gaussPoint[0]
L2 = gaussPoint[1]
L3 = 1-L1-L2
############################################################################################################
# Calculate shape functions
N = np.array([L1, L2, L3])
############################################################################################################
# Calculate derivatives of shape functions-> xi
dN_dxi = np.array([1., 0., -1.])
# Calculate derivatives of shape functions-> eta
dN_deta = np.array([0., 1., -1.])
# Calculate derivatives of shape functions
dN = np.vstack((dN_dxi, dN_deta))
return N, dN
# Cell
def getShapeTria6(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
L1 = gaussPoint[0]
L2 = gaussPoint[1]
L3 = 1-gaussPoint[0]-gaussPoint[1]
############################################################################################################
# Calculate shape functions
N = np.array([L1*(2.*L1-1.), L2*(2.*L2-1.), L3*(2.*L3-1.), 4*L1*L2, 4*L2*L3, 4*L1*L3])
############################################################################################################
# Calculate derivatives of shape functions-> xi
dN_dxi = np.array([4.*L1-1, 0., -4.*L3+1, 4.*L2, -4.*L2, 4.*(L3-L1)])
# Calculate derivatives of shape functions-> eta
dN_deta = np.array([0., 4.*L2-1, -4.*L3+1, 4.*L1, 4.*(L3-L2), -4.*L1])
# Calculate derivatives of shape functions
dN = np.vstack((dN_dxi, dN_deta))
return N, dN
# Cell
def getShapeQuad4(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
eta = gaussPoint[1]
############################################################################################################
# Tuple with xi_a and eta_a combinatory
xi_eta_comb = [(-1,-1),(1,-1),(1,1),(-1,1)]
############################################################################################################
# Calculate shape functions
N = np.array([0.25*(1.0+sign[0]*xi)*(1.0+sign[1]*eta) for sign in xi_eta_comb])
############################################################################################################
# Calculate derivatives of shape functions-> xi
dN_dxi = np.array([0.25*sign[0]*(1+sign[1]*eta) for sign in xi_eta_comb])
# Calculate derivatives of shape functions-> eta
dN_deta = np.array([0.25*sign[1]*(1+sign[0]*xi) for sign in xi_eta_comb])
# Calculate derivatives of shape functions
dN = np.vstack((dN_dxi, dN_deta))
return N, dN
# Cell
def getShapeQuad8(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
eta = gaussPoint[1]
############################################################################################################
# Tuple with xi_a and eta_a combinatory
xi_eta_comb = [(-1,-1),(1,-1),(1,1),(-1,1)]
############################################################################################################
# Calculate shape functions
# Nodes -> 1,2,3,4
N_lateral = np.array([0.25*(1+sign[0]*xi)*(1+sign[1]*eta)*(sign[0]*xi+sign[1]*eta-1) for sign in xi_eta_comb])
# Nodes -> 5,7
N_middle_xi = np.array([0.5*(1-xi**2)*(1+sign*eta) for sign in [-1,1]])
# Nodes -> 6,8
N_middle_eta = | np.array([0.5*(1-eta**2)*(1+sign*xi) for sign in [1,-1]]) | numpy.array |
import os
import numpy as np
import pandas as pd
baseline_data_dir = "../data/baselines"
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2015_WP #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
class WP_2015(object):
def __init__(self,):
pass
@classmethod
def Fig_6(self, direction, sectors):
def turbine_array(direction):
assert direction in [270, 222, 312], \
"Invalid wind direction in WP_2015!"
if direction == 270:
wt_array_270 = np.array([1, 9, 17, 25, 33, 41, 49, 57])
wts_270 = np.zeros((8, 8))
for i in range(8):
wts_270[i, :] = wt_array_270 + i * 1
return wts_270.astype(np.int)
if direction == 222:
wt_array_222 = np.array([8, 15, 22, 29, 36])
wts_222 = np.zeros((8, 5))
wts_222[3, :] = wt_array_222
for i in range(3):
wts_222[i, :] = wt_array_222 - (3 - i)
for i in range(4):
wts_222[4 + i] = wt_array_222 + 8 * (i + 1)
return wts_222.astype(np.int)
else:
wt_array_312 = np.array([1, 10, 19, 28, 37])
wts_312 = | np.zeros((8, 5)) | numpy.zeros |
import numpy as np
class Benchmark:
def __init__(self, dim=None):
self.dim = dim
self.bounds = None
class Alpine01(Benchmark):
def __init__(self, dim=2):
super(Alpine01, self).__init__(dim)
self.bounds = np.array(list(zip([-10.0] * dim, [10.0] * dim)))
def __call__(self, x):
assert x.shape[1] == self.dim
return np.sum(np.abs(x * np.sin(x) + 0.1 * x), axis=1)
class HumpCamel6(Benchmark):
def __init__(self, dim=2):
assert dim == 2
super(HumpCamel6, self).__init__(dim)
self.bounds = np.array([[-2., 2.], [-1.5, 1.5]])
def __call__(self, x):
assert x.shape[1] == self.dim
return (4.-2.1* x[:,0] ** 2 + x[:,0] ** 4 / 3) * x[:,0] ** 2 + x[:,0] * x[:,1] + (-4+4* x[:,1] ** 2) * x[:,1] ** 2
class Langermann(Benchmark):
def __init__(self, dim=2):
assert dim == 2
super(Langermann, self).__init__(dim)
self.bounds = np.array([[0., 10.], [0., 10.]])
def __call__(self, x):
assert x.shape[1] == self.dim
a = np.array([3, 5, 2, 1, 7], dtype=float)
b = np.array([5, 2, 1, 4, 9], dtype=float)
c = np.array([1, 2, 5, 2, 3], dtype=float)
numerators = np.cos(np.pi * ((np.vstack([x[:,0]] * 5).T - a) ** 2 + (np.vstack([x[:,1]] * 5).T - b) ** 2)) * c
denominators = np.exp(((np.vstack([x[:,0]] * 5).T - a) ** 2 + (np.vstack([x[:,1]] * 5).T - b) ** 2) / np.pi)
return -(numerators / denominators).sum(axis=1)
class Qing(Benchmark):
def __init__(self, dim=2):
super(Qing, self).__init__(dim)
self.bounds = np.array(list(zip([-self.dim-0.5] * dim, [self.dim+0.5] * dim)))
def __call__(self, x):
assert x.shape[1] == self.dim
return np.sum((x ** 2 - np.arange(1., self.dim+1, 1)) ** 2, axis=1)
class PenHolder(Benchmark):
def __init__(self, dim=2):
assert dim == 2
super(PenHolder, self).__init__(dim)
self.bounds = np.array(list(zip([-11.0] * dim, [11.0] * dim)))
def __call__(self, x):
assert x.shape[1] == self.dim
a = np.abs(1. - (np.sqrt(x[:, 0] ** 2 + x[:, 1] ** 2) / np.pi))
b = np.cos(x[:, 0]) * np.cos(x[:, 1]) * np.exp(a)
return -np.exp(-np.abs(b) ** -1)
class Schwefel26(Benchmark):
def __init__(self, dim=2):
super(Schwefel26, self).__init__(dim)
self.bounds = np.array(list(zip([-500.0] * dim, [500.0] * dim)))
def __call__(self, x):
assert x.shape[1] == self.dim
return 418.982887 * self.dim - np.sum(x * np.sin(np.sqrt(np.abs(x))), axis=1)
class Tripod(Benchmark):
def __init__(self, dim=2):
assert dim == 2
super(Tripod, self).__init__(dim)
self.bounds = np.array(list(zip([-100.0] * dim, [100.0] * dim)))
def __call__(self, x):
assert x.shape[1] == self.dim
p1 = (x[:, 0] >= 0).astype(float)
p2 = (x[:, 1] >= 0).astype(float)
return (p2 * (1.0 + p1) + np.abs(x[:,0] + 50.0 * p2 * (1.0 - 2.0 * p1)) + \
np.abs(x[:,1] + 50.0 * (1.0 - 2.0 * p2)))
class HolderTable(Benchmark):
def __init__(self, dim=2):
assert dim == 2
super(HolderTable, self).__init__(dim)
self.bounds = np.array([(-10., 10.), (-10., 10.)])
def __call__(self, x):
assert x.shape[1] == self.dim
return -np.abs(np.sin(x[:, 0]) * np.cos(x[:, 1]) * np.exp(np.abs(1-np.linalg.norm(x, axis=1) / np.pi)))
class UrsemWaves(Benchmark):
def __init__(self, dim=2):
assert dim == 2
super(UrsemWaves, self).__init__(dim)
self.bounds = np.array([(-0.9, 1.2), (-1.2, 1.2)])
def __call__(self, x):
assert x.shape[1] == self.dim
u = -0.9 * x[:, 0] ** 2
v = (x[:, 1] ** 2 - 4.5 * x[:, 1] ** 2) * x[:, 0] * x[:, 1]
w = 4.7 * np.cos(3 * x[:, 0] - x[:, 1] ** 2 * (2 + x[:, 0])) * np.sin(2.5 * np.pi * x[:, 0])
return u + v + w
class VenterSobiezcczanskiSobieski(Benchmark):
def __init__(self, dim=2):
assert dim == 2
super(VenterSobiezcczanskiSobieski, self).__init__(dim)
self.bounds = np.array(list(zip([-50.0] * dim, [50.0] * dim)))
def __call__(self, x):
assert x.shape[1] == self.dim
u = x[:, 0] ** 2.0 - 100.0 * | np.cos(x[:, 0]) | numpy.cos |
from __future__ import division
import datetime
import os
import numpy as np
from scipy import linalg
import matplotlib
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
else:
matplotlib.use('Qt5Agg')
from matplotlib import rcParams
import matplotlib.pyplot as plt
# import bokeh.plotting as b_plt
# from bokeh.io import vplot, hplot, output_file, show
# from bokeh.models.tools import WheelZoomTool
from alg_tools_1d import dirac_recon_time, periodicSinc, distance
# for latex rendering
os.environ['PATH'] = os.environ['PATH'] + ':/usr/texbin' + \
':/opt/local/bin' + ':/Library/TeX/texbin/'
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = True
if __name__ == '__main__':
# various experiment settings
save_fig = True # save figure or not
fig_format = r'png' # file type used to save the figure, e.g., pdf, png, etc.
stop_cri = 'max_iter' # stopping criteria: 1) mse; or 2) max_iter
web_fig = False # generate html file for the figures
K = 5 # number of Diracs
M = K * 8 # number of Fourier samples (at least K)
tau = 1 # period of the Dirac stream
# number of time domain samples
L = (2 * M + 1)
Tmax = tau / L # the average sampling step size (had we used a uniform sampling setup)
# generate the random sampling time instances
t_samp = np.arange(0, L, dtype=float) * Tmax
t_samp += np.sign(np.random.randn(L)) * np.random.rand(L) * Tmax / 2.
# round t_samp to [0, tau)
t_samp -= np.floor(t_samp / tau) * tau
# generate parameters for the periodic stream of Diracs
B = (2. * M + 1.) / tau # bandwidth of the sampling filter
'''
# generate random values for Dirac amplitudes and locations
# amplitudes of the Diracs
ak = np.sign(np.random.randn(K)) * (1 + (np.random.rand(K) - 0.5) / 1.)
# locations of the Diracs
if K == 1:
tk = np.random.rand()
else:
a = 4. / L
uk = np.random.exponential(scale=1. / K, size=(K - 1, 1))
tk = np.cumsum(a + (1. - K * a) * (1 - 0.1 * np.random.rand()) / uk.sum() * uk)
tk = np.sort(np.hstack((np.random.rand() * tk[0] / 2., tk)) + (1 - tk[-1]) / 2.) * tau
# save Dirac parameter
time_stamp = datetime.datetime.now().strftime("%-d-%-m_%H_%M")
file_name = './data/Dirac_Data_' + time_stamp + '.npz'
np.savez(file_name, tk=tk, ak=ak, K=K, time_stamp=time_stamp)
'''
# load saved data
time_stamp = '20-12_02_22'
stored_param = np.load('./data/Dirac_Data_' + time_stamp + '.npz')
tk = stored_param['tk']
ak = stored_param['ak']
print('time stamp: ' + time_stamp +
'\n=======================================\n')
# compute the noiseless Fourier series coefficients
tk_grid, m_grid_gt = np.meshgrid(tk, np.arange(-np.floor(B * tau / 2.), 1 + np.floor(B * tau / 2.)))
x_hat_noiseless = 1. / tau * np.dot(np.exp(-2j * np.pi / tau * m_grid_gt * tk_grid), ak)
m_grid, t_samp_grid = np.meshgrid(np.arange(-np.floor(B * tau / 2.), 1 + np.floor(B * tau / 2.)), t_samp)
# build the linear transformation matrix that links x_hat with the samples
G = 1. / B * np.exp(2j * np.pi / tau * m_grid * t_samp_grid)
y_ell_noiseless = np.real(np.dot(G, x_hat_noiseless))
# add noise
P = 5
noise = np.random.randn(L)
noise = noise / linalg.norm(noise) * linalg.norm(y_ell_noiseless) * 10 ** (-P / 20.)
y_ell = y_ell_noiseless + noise
# noise energy, in the noiseless case 1e-10 is considered as 0
noise_level = np.max([1e-10, linalg.norm(noise)])
max_ini = 100 # maximum number of random initialisations
# FRI reconstruction
xhat_recon, min_error, c_opt, ini = dirac_recon_time(G, y_ell, K, noise_level, max_ini, stop_cri)
print(r'Noise level: {0:.2e}'.format(noise_level))
print(r'Minimum approximation error |a - Gb|_2: {0:.2e}'.format(min_error))
# reconstruct Diracs' locations tk
z = np.roots(c_opt)
z = z / np.abs(z)
tk_recon = np.real(tau * 1j / (2 * np.pi) * np.log(z))
tk_recon = np.sort(tk_recon - np.floor(tk_recon / tau) * tau)
# reconstruct amplitudes ak
Phi_recon = periodicSinc(np.pi * B * (np.reshape(t_samp, (-1, 1), order='F') -
np.reshape(tk_recon, (1, -1), order='F')),
B * tau)
ak_recon = np.real(linalg.lstsq(Phi_recon, y_ell)[0])
# location estimation error
t_error = distance(tk_recon, tk)[0]
# plot reconstruction
plt.close()
fig = plt.figure(num=1, figsize=(5.5, 2.5), dpi=90)
# sub-figure 1
ax1 = plt.axes([0.125, 0.59, 0.85, 0.31])
markerline211_1, stemlines211_1, baseline211_1 = \
ax1.stem(tk, ak, label='Original Diracs')
plt.setp(stemlines211_1, linewidth=1.5, color=[0, 0.447, 0.741])
plt.setp(markerline211_1, marker='^', linewidth=1.5, markersize=8,
markerfacecolor=[0, 0.447, 0.741], mec=[0, 0.447, 0.741])
plt.setp(baseline211_1, linewidth=0)
markerline211_2, stemlines211_2, baseline211_2 = \
plt.stem(tk_recon, ak_recon, label='Estimated Diracs')
plt.setp(stemlines211_2, linewidth=1.5, color=[0.850, 0.325, 0.098])
plt.setp(markerline211_2, marker='*', linewidth=1.5, markersize=10,
markerfacecolor=[0.850, 0.325, 0.098], mec=[0.850, 0.325, 0.098])
plt.setp(baseline211_2, linewidth=0)
plt.axhline(0, color='k')
plt.xlim([0, tau])
plt.ylim([1.17 * np.min(np.concatenate((ak, ak_recon, np.array(0)[np.newaxis]))),
1.17 * np.max(np.concatenate((ak, ak_recon, np.array(0)[np.newaxis])))])
# plt.xlabel(r'$t$', fontsize=12)
plt.ylabel('amplitudes', fontsize=12)
ax1.yaxis.set_label_coords(-0.095, 0.5)
plt.legend(numpoints=1, loc=0, fontsize=9, framealpha=0.3,
handletextpad=.2, columnspacing=0.6, labelspacing=0.05, ncol=2)
t_error_pow = np.int(np.floor(np.log10(t_error)))
if np.isinf(P):
plt.title(r'$K={0}$, $L={1}$, '
r'$\mbox{{SNR}}=\mbox{{inf }}$dB, '
r'$t_{{\mbox{{\footnotesize err}}}}={2:.2f}\times10^{other}$'.format(repr(K), repr(L),
t_error / 10 ** t_error_pow,
other='{' + str(
t_error_pow) + '}'),
fontsize=12)
else:
plt.title(r'$K={0}$, $L={1}$, '
r'$\mbox{{SNR}}={2}$dB, '
r'$t_{{\mbox{{\footnotesize err}}}}={3:.2f}\times10^{other}$'.format(repr(K), repr(L), repr(P),
t_error / 10 ** t_error_pow,
other='{' + str(
t_error_pow) + '}'),
fontsize=12)
# sub-figure 2
t_plt = np.linspace(0, tau, num=np.max([10 * L, 1000]))
m_plt_grid, t_plt_grid = np.meshgrid(np.arange(- | np.floor(B * tau / 2.) | numpy.floor |
import numpy as np
from scipy.spatial import distance
import matplotlib.pyplot as plt
import itertools
import sys
import math
import random
import pandas
# todo combine data gen and neural network into one script
size_training_set = 20000.
nx_sensor = 5.
ny_sensor = 5.
nz_sensor = 1.
number_training_jammers = 400
nx_quadrants = 9.
ny_quadrants = 9.
x_min_quadrant = 0.
x_max_quadrant = 10000.
y_min_quadrant = 0.
y_max_quadrant = 10000.
x_length_quadrant = (x_max_quadrant - x_min_quadrant)/nx_quadrants
y_length_quadrant = (y_max_quadrant - y_min_quadrant)/ny_quadrants
quad_labels = range(0,int(nx_quadrants*ny_quadrants))
quad_labels = np.array(quad_labels)
quad_labels = np.reshape(quad_labels, (nx_quadrants,ny_quadrants))
# np.random.seed(42)
nx = 20
ny = 20
nz = 1
nP = 1
num_quads = int(nx_quadrants*ny_quadrants)
max_combo = 3
min_combo = 1
max_jammers = nx*ny
# quad_combos = []
# for L in range(min_combo, max_combo+1):
# for subset in itertools.combinations(range(0,num_quads),L):
# quad_combos.append(list(subset))
# print("Quad Combo Level Complete: %d" % L)
# print(len(quad_combos))
def nCr(n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
combo_1 = max_jammers
combo_2 = nCr(max_jammers,2)
x_j = | np.linspace(100, 6900, nx) | numpy.linspace |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import PIL.Image
from lmnet.data_processor import Processor
def resize(image, size=[256, 256]):
"""Resize an image.
Args:
image (np.ndarray): an image numpy array.
size: [height, width]
"""
width = size[1]
height = size[0]
if width == image.shape[1] and height == image.shape[0]:
return image
image = PIL.Image.fromarray(np.uint8(image))
image = image.resize([width, height])
image = np.array(image)
assert image.shape[0] == height
assert image.shape[1] == width
return image
def square(image, gt_boxes, fill=127.5):
"""Square an image.
Args:
image: An image numpy array.
gt_boxes: Python list ground truth boxes in the image. shape is [num_boxes, 5(x, y, width, height)].
fill: Fill blank by this number. (Default value = 127.5)
"""
origin_width = image.shape[1]
origin_height = image.shape[0]
diff = abs(origin_width - origin_height)
if diff == 0:
return image, gt_boxes
if origin_width < origin_height:
top = bottom = 0
size = origin_height
result = np.full((size, size, image.shape[2]), fill, dtype=image.dtype)
if diff % 2 == 0:
left = right = int(diff / 2)
else:
left = diff // 2
right = left + 1
result[:, left:-right, :] = image
else:
left = right = 0
size = origin_width
result = np.full((size, size, image.shape[2]), fill, dtype=image.dtype)
if diff % 2 == 0:
top = bottom = int(diff / 2)
else:
top = diff // 2
bottom = top + 1
result[top:-bottom, :, :] = image
if gt_boxes is not None and len(gt_boxes) != 0:
gt_boxes[:, 0] = gt_boxes[:, 0] + left
gt_boxes[:, 1] = gt_boxes[:, 1] + top
gt_boxes[:, 2] = gt_boxes[:, 2]
gt_boxes[:, 3] = gt_boxes[:, 3]
return result, gt_boxes
def resize_with_gt_boxes(image, gt_boxes, size=(256, 256)):
"""Resize an image and gt_boxes.
Args:
image (np.ndarray): An image numpy array.
gt_boxes (np.ndarray): Ground truth boxes in the image. shape is [num_boxes, 5(x, y, width, height)].
size: [height, width]
"""
origin_width = image.shape[1]
origin_height = image.shape[0]
width = size[1]
height = size[0]
resized_image = resize(image, (height, width))
if gt_boxes is None:
return resized_image, None
resized_gt_boxes = gt_boxes.copy()
scale = [height / origin_height, width / origin_width]
if gt_boxes is not None and len(gt_boxes) != 0:
resized_gt_boxes[:, 0] = gt_boxes[:, 0] * scale[1]
resized_gt_boxes[:, 1] = gt_boxes[:, 1] * scale[0]
resized_gt_boxes[:, 2] = gt_boxes[:, 2] * scale[1]
resized_gt_boxes[:, 3] = gt_boxes[:, 3] * scale[0]
# move boxes beyond boundary of image for scaling error.
resized_gt_boxes[:, 0] = np.minimum(resized_gt_boxes[:, 0],
width - resized_gt_boxes[:, 2])
resized_gt_boxes[:, 1] = np.minimum(resized_gt_boxes[:, 1],
height - resized_gt_boxes[:, 3])
return resized_image, resized_gt_boxes
def resize_keep_ratio_with_gt_boxes(image, gt_boxes, size=(256, 256)):
"""Resize keeping ratio an image and gt_boxes.
Args:
image (np.ndarray): An image numpy array.
gt_boxes (list): Python list ground truth boxes in the image. shape is
[num_boxes, 5(x, y, width, height)].
size: [height, width]
"""
origin_width = image.shape[1]
origin_height = image.shape[0]
if origin_width < origin_height:
height = size[0]
width = int(origin_width * height / origin_height)
else:
width = size[1]
height = int(origin_height * width / origin_width)
resized_image = resize(image, (height, width))
scale = [height / origin_height, width / origin_width]
if gt_boxes is not None and len(gt_boxes) != 0:
gt_boxes[:, 0] = gt_boxes[:, 0] * scale[1]
gt_boxes[:, 1] = gt_boxes[:, 1] * scale[0]
gt_boxes[:, 2] = gt_boxes[:, 2] * scale[1]
gt_boxes[:, 3] = gt_boxes[:, 3] * scale[0]
return resized_image, gt_boxes
def resize_with_joints(image, joints, image_size):
"""Resize image with joints to target image_size.
Args:
image: a numpy array of shape (height, width, 3).
joints: a numpy array of shape (num_joints, 3).
image_size: a tuple, (new_height, new_width).
Returns:
resized_image: a numpy array of shape (new_height, new_width, 3).
new_joints: a numpy array of shape (num_joints, 3).
"""
original_height, original_width, _ = image.shape
scale_height = image_size[0] / original_height
scale_width = image_size[1] / original_width
resized_image = resize(image, image_size)
new_joints = joints.copy()
new_joints[:, 0] *= scale_width
new_joints[:, 1] *= scale_height
return resized_image, new_joints
def per_image_standardization(image):
"""Image standardization per image.
https://www.tensorflow.org/api_docs/python/image/image_adjustments#per_image_standardization
Args:
image: An image numpy array.
"""
image = image.astype(np.float32)
mean = image.mean()
stddev = np.std(image)
adjusted_stddev = max(stddev, 1.0 / np.sqrt(image.size))
image -= mean
image /= adjusted_stddev
return image
def per_image_linear_quantize(image, bit):
r"""Linear quantize per image.
.. math::
\mathbf{Y} =
\frac{\text{round}\big(\frac{\mathbf{X}}{max\_value} \cdot (2^{bit}-1)\big)}{2^{bit}-1} \cdot max\_value
Args:
image: An image numpy array.
bit: Quantize bit.
"""
min_value = | np.min(image) | numpy.min |
#coding=utf-8
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import Image, NavSatFix
from map_generator.msg import tjy
from nav_msgs.msg import Path
import numpy as np
import time
from googleplaces import GooglePlaces
import googlemaps
import time
import sys
import math
from math import cos,sin,tan,sqrt
from visualization_msgs.msg import Marker
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
C_EARTH = 6378137.0
class GoogleMaps(object):
def __init__(self):
self._GOOGLE_MAPS_KEY = "<KEY>"
self._Google_Places = GooglePlaces(self._GOOGLE_MAPS_KEY)
self._Google_Geocod = googlemaps.Client(key=self._GOOGLE_MAPS_KEY)
def _nearby_search(self, lng, lat, language, radius, result=None):
if result is None:
nearby_query_result = self._Google_Places.nearby_search(language=language,
lat_lng={'lat': lat, 'lng': lng}, radius=radius)
else:
if result.has_next_page_token:
#print(result.next_page_token)
nearby_query_result = self._Google_Places.nearby_search(
pagetoken=result.next_page_token, lat_lng={'lat': lat, 'lng': lng}, radius=radius)
else:
nearby_query_result = None
return nearby_query_result
def get_all_data(self, lng, lat, language='en', radius=100):
count = 0
list_return_info = []
list_nearby_search_result = self._nearby_search(lng, lat, language, radius)
while(list_nearby_search_result is not None):
for place in list_nearby_search_result.places:
# Returned places from a query are place summaries.
print(place.name)
print(place.geo_location['lng'])
print(place.geo_location['lat'])
print(count)
count = count+1
list_return_info.append({"name":place.name, "lng":place.geo_location['lng'], "lat":place.geo_location['lat']})
#print place.place_id
# The following method has to make a further API call.
#place.get_details()
# Referencing any of the attributes below, prior to making a call to
# get_details() will raise a googleplaces.GooglePlacesAttributeError.
#print place.details # A dict matching the JSON response from Google.
#print place.local_phone_number
#print place.international_phone_number
#print place.website
#print place.url
# Are there any additional pages of results?
list_nearby_search_result = self._nearby_search(lng, lat, language, radius, list_nearby_search_result)
return list_return_info
class Transform(object):
def __init__(self):
self.R = None
self.t = None
def centroid_point(self, samples):
means = np.mean(samples, axis=0)
return means
def transform_lamda(self, A, B):
A_norm = np.sum(A*A,axis=1)
B_norm = | np.sum(B*B,axis=1) | numpy.sum |
import os
import random
import sys
import joblib
import math
import lightgbm as lgb
import xgboost as xgb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.svm as svm
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from catboost import CatBoostClassifier
from sklearn.metrics import (accuracy_score, average_precision_score,
classification_report, confusion_matrix, f1_score,
precision_recall_curve, roc_auc_score, roc_curve)
from sklearn.model_selection import GroupKFold
from sklearn.naive_bayes import GaussianNB
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
import keras.callbacks as kcallbacks
from utilClass import RocAucMetricCallback
from utils import series_to_supervised
from tqdm import tqdm
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
sys.path.append('..')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
class AtoA:
def __init__(self, mode, type='df', seed=2021, scale='all'):
self.seed = seed
self.window = 0 # 在线数据读取中滑动窗口的长度
self.win_df = pd.DataFrame() # 在线数据读取中始终维持一个长度为最大滑动窗口的dataframe
self.mode = mode # 离线或在线
self.type = type # df 或者 dcs
self.scale = scale # 模型量级
self.current_row = None # 当前时刻样本
self.pre_row = None # 上一时刻样本
# dogfight特征工程工具函数
def FE_DF(self, data):
""" DF特征工程
Args:
data (dataframe): 原始数据
Returns:
DataFrame: 特征工程后数据
"""
data = data.sort_values(by=['id'])
if self.scale == 'all':
# 计算敌机的速度,先用diffh函数得到和上一时刻xyz差值,然后除以时间得到速度
for f in ['x', 'y', 'z']:
data['enemy_v_{}'.format(f)] = data.groupby('id')[
'enemy_{}'.format(f)].diff(1) / 0.02
# 敌我两机加速度,先用diffh函数得到和上一时刻v_x,v_y,v_z差值,然后除以时间得到加速度
for f in ['v_x', 'v_y', 'v_z']:
data[f'my_{f}_acc'] = data.groupby(
'id')[f'my_{f}'].diff() / 0.2
data[f'enemy_{f}_acc'] = data.groupby(
'id')[f'enemy_{f}'].diff() / 0.2
# 敌我两机速度与位置交互式差值
for f in ['x', 'y', 'z', 'v_x', 'v_y', 'v_z']:
data[f'{f}_me_minus'] = data[f'my_{f}'] - data[f'enemy_{f}']
# 飞机之间的距离
data['distance'] = ((data['my_x'] - data['enemy_x'])**2 +
(data['my_y'] - data['enemy_y'])**2 +
(data['my_z'] - data['enemy_z'])**2)**0.5
# 瞄准夹角
data['cos'] = ((data['my_v_x'] * (data['enemy_x'] - data['my_x'])) +
(data['my_v_y'] * (data['enemy_y'] - data['my_y'])) +
(data['my_v_z'] * (data['enemy_z'] - data['my_z'])))
# 合速度
data['speedAll'] = ((data['my_v_x']**2 + data['my_v_y']**2 +
data['my_v_z']**2)**0.5)
# 夹角cos值
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 缺失值补0
data.fillna(0, inplace=True)
return data
def online_FE_DF(self, row_dict):
""" DF在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
# 将字典转为dataframe格式
data = pd.DataFrame(row_dict, index=[0])
# 飞机之间的距离
data['distance'] = ((data['my_x'] - data['enemy_x'])**2 +
(data['my_y'] - data['enemy_y'])**2 +
(data['my_z'] - data['enemy_z'])**2)**0.5
# 瞄准夹角
data['cos'] = ((data['my_v_x'] * (data['enemy_x'] - data['my_x'])) +
(data['my_v_y'] * (data['enemy_y'] - data['my_y'])) +
(data['my_v_z'] * (data['enemy_z'] - data['my_z'])))
# 合速度
data['speedAll'] = ((data['my_v_x']**2 + data['my_v_y']**2 +
data['my_v_z']**2)**0.5)
# 夹角cos值
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 缺失值补0
data.fillna(0, inplace=True)
return data
# DCS特征工程工具函数
def FE_DCS(self, data_):
""" DCS特征工程
Args:
data (dataframe): 原始数据
Returns:
DataFrame: 特征工程后数据
"""
data = data_.copy(deep=True)
if self.mode == 'offline':
# 如果是离线训练,需要根据id进行数据分组
data = data.sort_values(by=['id'])
# 飞机之间的距离
data['distance'] = (
(data['my_position_x'] - data['enemy_position_x'])**2 +
(data['my_position_y'] - data['enemy_position_y'])**2 +
(data['my_position_z'] - data['enemy_position_z'])**2)**0.5
# 向量乘法,向量 a = (x,y,z) b = (x2,y2,z2) c = (x3,y3,z3),a代表我机速度向量
# b代表位置向量,c代表敌机位置向量,我机中心到敌机中心向量d = c - b
# d与a之间cos = d×a/(|d|*|a|)
data['cos'] = ((data['my_speed_x'] *
(data['enemy_position_x'] - data['my_position_x'])) +
(data['my_speed_y'] *
(data['enemy_position_y'] - data['my_position_y'])) +
(data['my_speed_z'] *
(data['enemy_position_z'] - data['my_position_z'])))
# 速度向量
data['speedAll'] = ((data['my_speed_x']**2 + data['my_speed_y']**2 +
data['my_speed_z']**2)**0.5)
# 向量之间夹角
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 敌我两机位置交互式差值
for f in ['position_x', 'position_y', 'position_z']:
data[f'{f}_diff'] = data[f'my_{f}'] - data[f'enemy_{f}']
return data
@staticmethod
def _caculate_speed_connect_cos(x, y, z, enemy_x, enemy_y, enemy_z,
speed_x, speed_y, speed_z):
"""
计算我敌连线矢量与我机速度矢量夹角
Args:
x, y, z: 我机坐标
enemy_x, enemy_y, enemy_z:敌机坐标
speed_x, speed_y, speed_z: 我机或敌机速度
Returns:
speed_connect_cos:我敌连线矢量与速度矢量夹角余弦值
"""
connect_vec = np.array([enemy_x - x, enemy_y - y, enemy_z - z])
my_speed_vec = np.array([speed_x, speed_y, speed_z])
speed_connect_cos = connect_vec.dot(my_speed_vec) / np.sqrt(
connect_vec.dot(connect_vec) * my_speed_vec.dot(my_speed_vec))
return speed_connect_cos
@staticmethod
def _caculate_speed_cos(speed_x, speed_y, speed_z, enemy_speed_x,
enemy_speed_y, enemy_speed_z):
"""
计算我机速度矢量与敌机速度矢量夹角
Args:
speed_x, speed_y, speed_z:我机速度
enemy_speed_x, enemy_speed_y, enemy_speed_z: 敌机速度
Returns:
speed_cos:敌机速度与我机速度矢量夹角余弦值
"""
my_speed_vec = np.array([speed_x, speed_y, speed_z])
enemy_speed_vec = np.array(
[enemy_speed_x, enemy_speed_y, enemy_speed_z])
speed_cos = my_speed_vec.dot(enemy_speed_vec) / np.sqrt(
my_speed_vec.dot(my_speed_vec) *
enemy_speed_vec.dot(enemy_speed_vec))
return speed_cos
def FE_DCS_new(self, data_):
"""新DCS任务特征工程
Args:
data_ (dataframe): 原始数据
Returns:
data: 特征工程后数据
"""
data = data_.copy()
data = data.sort_values(by=['id', 'ISO time'])
data.reset_index(drop=True, inplace=True)
data.rename(columns={
'U': 'x',
'V': 'z',
'Altitude': 'y',
'enemy_U': 'enemy_x',
'enemy_V': 'enemy_z',
'enemy_Altitude': 'enemy_y',
},
inplace=True)
if self.mode == 'offline':
if self.scale == 'all':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data.groupby('id')['x'].diff(),
'speed_y': data.groupby('id')['y'].diff(),
'speed_z': data.groupby('id')['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x':
data.groupby('id')['enemy_x'].diff(),
'enemy_speed_y':
data.groupby('id')['enemy_y'].diff(),
'enemy_speed_z':
data.groupby('id')['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦值
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 我机朝向处理
data['Heading'] = data['Heading'] % 360
# 计算相对位置与速度
for f in [
'x', 'y', 'z', 'speed_x', 'speed_y', 'speed_z', 'speed'
]:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 计算是否领先追逐
data['is_lead_chase'] = data.apply(
lambda x: self._is_lead_chase(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'],
x['enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'],
x['enemy_speed_x'], x['enemy_speed_y'], x[
'enemy_speed_z'], x['speed_connect_cos'], x[
'enemy_speed_connect_cos']),
axis=1)
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
elif self.scale == 'light':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data.groupby('id')['x'].diff(),
'speed_y': data.groupby('id')['y'].diff(),
'speed_z': data.groupby('id')['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x':
data.groupby('id')['enemy_x'].diff(),
'enemy_speed_y':
data.groupby('id')['enemy_y'].diff(),
'enemy_speed_z':
data.groupby('id')['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 计算相对位置
for f in ['z', 'speed']:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 计算是否领先追逐
data['is_lead_chase'] = data.apply(
lambda x: self._is_lead_chase(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'],
x['enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'],
x['enemy_speed_x'], x['enemy_speed_y'], x[
'enemy_speed_z'], x['speed_connect_cos'], x[
'enemy_speed_connect_cos']),
axis=1)
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
if self.mode == 'online':
if self.scale == 'all':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data['x'].diff(),
'speed_y': data['y'].diff(),
'speed_z': data['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x': data['enemy_x'].diff(),
'enemy_speed_y': data['enemy_y'].diff(),
'enemy_speed_z': data['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦值
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 我机朝向处理
data['Heading'] = data['Heading'] % 360
# 计算相对位置与速度
for f in [
'x', 'y', 'z', 'speed_x', 'speed_y', 'speed_z', 'speed'
]:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
elif self.scale == 'light':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data['x'].diff(),
'speed_y': data['y'].diff(),
'speed_z': data['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x': data['enemy_x'].diff(),
'enemy_speed_y': data['enemy_y'].diff(),
'enemy_speed_z': data['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 计算相对位置
for f in ['y', 'speed']:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
# DCS在线特征工程
def online_FE_DCS(self, row_dict):
""" DCS在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
# 字典转dataframe
row = pd.DataFrame(row_dict, index=[0])
# 调用离线特征工程函数
FE_row = self.FE_DCS(row)
return FE_row
# DCS在线特征工程
def online_FE_DCS_new(self, row_dict):
""" AtoA在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
row = pd.DataFrame(row_dict, index=[0])
self.current_row = row
if self.pre_row is None:
FE_row = self.online_FE_DCS(self.current_row)
else:
window = pd.concat([self.pre_row, self.current_row], axis=0)
FE_row = self.online_FE_DCS(window)[-1:]
self.pre_row = self.current_row
return FE_row
def train_val_split(self, df_train, percent=0.8):
""" 数据集划分
划分数据集为训练集与测试
Args:
df_train(dataframe): 原始数据
percent(int): 切分比例
Returns:
train(dataframe): 训练集
val_data(dataframe): 验证集
"""
# 获取所有id
all_ids = df_train['id'].values.tolist()
# id去重
all_ids = list(set(all_ids))
# 每次 set 的结果都不一样,所以要先排序,防止结果不可复现
all_ids.sort()
# random.seed 只能生效一次,所以每次 random.sample 之前都要设置
random.seed(self.seed)
# 训练集id采样
train_ids = random.sample(all_ids, int(len(all_ids) * percent))
# 获取验证集id
val_ids = list(set(all_ids) - set(train_ids))
# 根据id获取训练数据
train_data = df_train[df_train['id'].isin(train_ids)]
# 根据id获取验证数据
val_data = df_train[df_train['id'].isin(val_ids)]
# 连续序列数据,但是是以单个样本建模的情况下,需要 shuffle 打乱
train_data = train_data.sample(
frac=1, random_state=self.seed).reset_index(drop=True)
return train_data, val_data
def smote(self, data_):
data = data_.copy()
over = SMOTE(sampling_strategy=0.2, random_state=self.seed)
under = RandomUnderSampler(sampling_strategy=1.0,
random_state=self.seed)
steps = [('o', over), ('u', under)]
pipeline = Pipeline(steps=steps)
X, y = pipeline.fit_resample(
data[[i for i in data.columns if i not in ['label']]],
data['label'])
return pd.concat([X, y], axis=1)
def _feature_name(self):
""" 获取保留列名
Returns:
feature_names(list): 列名信息
"""
# 固定顺序,否则模型预测会出错
if self.type == 'df':
if self.scale == 'all':
feature_names = [
'my_x', 'my_y', 'my_z', 'my_v_x', 'my_v_y', 'my_v_z',
'my_rot_x', 'my_rot_y', 'my_rot_z', 'enemy_x', 'enemy_y',
'enemy_z', 'enemy_v_x', 'enemy_v_y', 'enemy_v_z',
'my_v_x_acc', 'enemy_v_x_acc', 'my_v_y_acc',
'enemy_v_y_acc', 'my_v_z_acc', 'enemy_v_z_acc',
'x_me_minus', 'y_me_minus', 'z_me_minus', 'v_x_me_minus',
'v_y_me_minus', 'v_z_me_minus', 'distance', 'cos',
'speedAll', 'cosValue'
]
else:
feature_names = ['cosValue', 'speedAll', 'distance']
elif self.type == 'dcs':
if self.scale == 'all':
feature_names = [
'z', 'Roll', 'Pitch', 'Yaw', 'x', 'y', 'Heading',
'enemy_z', 'enemy_x', 'enemy_y', 'speed_x', 'speed_y',
'speed_z', 'enemy_speed_x', 'enemy_speed_y',
'enemy_speed_z', 'distance', 'speed', 'speed_connect_cos',
'enemy_speed_connect_cos', 'relative_x', 'relative_z',
'relative_y', 'relative_speed_x', 'relative_speed_y',
'relative_speed_z', 'relative_speed', 'speed_cos'
]
elif self.scale == 'light':
feature_names = [
'distance', 'speed_connect_cos', 'enemy_speed_connect_cos',
'relative_y', 'speed_cos'
]
else:
feature_names = [
'z', 'Roll', 'Pitch', 'Yaw', 'x', 'y', 'Heading',
'enemy_z', 'enemy_x', 'enemy_y'
]
return feature_names
# 留出法数据
def _hold_out(self, raw_train, percent_train):
""" 获取留出法的训练数据
Args:
raw_train(dataframe): 原始数据
percent_train(int): 训练集占比
Returns:
train(dataframe): 训练集
val(dataframe): 验证集
"""
# 获取保留的列名
feature_names = self._feature_name()
# 切分训练集、验证集
train_data, val_data = self.train_val_split(raw_train,
percent=percent_train)
if self.type == 'dcs':
train_data = self.smote(train_data)
# 获取训练验证数据和标签数据
X_train, X_val, y_train, y_val = train_data[feature_names], val_data[
feature_names], train_data['label'], val_data['label']
return X_train, X_val, y_train, y_val
# k折交叉验证数据
def _k_fold(self, raw_train, k):
""" 获取交叉验证数据
Args:
raw_train(dataframe): 原始数据
k(int): 交叉折数
Returns:
train(dataframe): k折交叉验证的训练集
val(dataframe): 验证集
"""
# 获取保留列名
feature_names = self._feature_name()
# 根据id分组
groups = list(raw_train['id'])
# 分组交叉验证
gkf = GroupKFold(n_splits=k)
data_list = []
# 获取交叉验证数据
for train_index, val_index in gkf.split(raw_train[feature_names],
raw_train['label'],
groups=groups):
# 根据index索引获取每一折数据
X_train, y_train, X_val, y_val = raw_train.iloc[train_index][feature_names], \
raw_train.iloc[train_index]['label'], \
raw_train.iloc[val_index][feature_names], \
raw_train.iloc[val_index]['label']
# 将数据加入列表保存
data_list.append([X_train, X_val, y_train, y_val])
# 返回列表
return data_list
def _bootstrap(self, raw_train):
""" 获取提升法数据
Args:
raw_train(dataframe): 原始数据
Returns:
train(dataframe): 提升法训练集
val(dataframe): 验证集
"""
# 获取保留列名
feature_names = self._feature_name()
# 获取所有数据id,并去重
ids = pd.DataFrame(set(raw_train['id']), columns=['id'], index=None)
random.seed(self.seed)
# 根据id采样
train_group_ids = ids.sample(frac=1.0,
replace=True,
random_state=self.seed)
# 总id减去训练集的id,得到验证集id
val_group_ids = ids.loc[ids.index.difference(
train_group_ids.index)].copy()
# 创建两个dataframe
train_data = pd.DataFrame()
val_data = pd.DataFrame()
# 获取训练与验证数据id号
train_group_ids = list(train_group_ids['id'])
val_group_ids = list(val_group_ids['id'])
# 根据id获取数据
for train_group_id in train_group_ids:
train_data = train_data.append(
raw_train[raw_train['id'] == train_group_id])
for val_group_id in val_group_ids:
val_data = val_data.append(
raw_train[raw_train['id'] == val_group_id])
# 切分训练数据与真实标签
X_train, X_val, y_train, y_val = train_data[feature_names], val_data[
feature_names], train_data['label'], val_data['label']
return X_train, X_val, y_train, y_val
# 定义LSTM模型
def _lstm(self, n_steps, n_features):
model = Sequential()
model.add(
LSTM(units=100,
activation='relu',
input_shape=(n_steps, n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
return model
# 定义lightgbm模型
def _lgb(self):
lgb_model = lgb.LGBMClassifier(objective='binary',
boosting_type='gbdt',
num_leaves=32,
max_depth=6,
learning_rate=0.01,
n_estimators=100000,
subsample=0.8,
feature_fraction=0.6,
reg_alpha=10,
reg_lambda=12,
random_state=self.seed,
is_unbalance=True,
metric='auc')
return lgb_model
def _xgb(self):
xgb_model = xgb.XGBClassifier(booster='gbtree',
objective='binary:logistic',
eval_metric='auc',
silent=0,
eta=0.01,
gamma=0.1,
max_depth=6,
min_child_weight=3,
subsample=0.7,
colsample_bytree=0.5,
reg_alpha=0,
reg_lambda=1,
n_estimators=100000,
seed=2021)
return xgb_model
# 定义svm模型
@staticmethod
def _svm():
svm_model = svm.SVC(C=1.0,
kernel='rbf',
degree=3,
gamma='auto',
coef0=0.0,
shrinking=True,
probability=True,
tol=0.001,
class_weight=None,
verbose=True,
max_iter=1000,
decision_function_shape='ovr',
random_state=None)
return svm_model
# 定义集成模型
@staticmethod
def _ensemble():
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(n_estimators=50, random_state=1)
clf3 = CatBoostClassifier(iterations=100,
depth=5,
learning_rate=0.5,
loss_function='Logloss',
logging_level='Verbose')
ensemble_model = VotingClassifier(estimators=[('lr', clf1),
('rf', clf2),
('gnb', clf3)],
voting='soft')
return ensemble_model
def _train_lstm(self, raw_train, n_steps, val_type, k, percent_train=0.8):
""" 训练lstm模型
Args:
raw_train(dataframe): 原始数据
n_steps: 前向依赖时间步
val_type(string): 验证方式
k(int): 交叉折数
percent_train(float): 训练集比例
Returns:
model: 训练模型
importance(dataframe): 特征重要度
best_thread(float): 最佳阈值
"""
# 获取保留列名
if val_type == 'hold-out': # 留出法
X_train, X_val, y_train, y_val = self._hold_out(
raw_train, percent_train)
# 数据准备
X_train = series_to_supervised(X_train, n_steps)
X_val = series_to_supervised(X_val, n_steps)
y_train = y_train[n_steps - 1:]
y_val = y_val[n_steps - 1:]
lstm_model = self._lstm(n_steps=n_steps,
n_features=X_train.shape[-1])
# 模型训练,使用早停策略
my_callbacks = [
RocAucMetricCallback(), # include it before EarlyStopping!
kcallbacks.EarlyStopping(monitor='val_auc',
patience=3,
verbose=1,
mode='max')
]
lstm_model.fit(X_train,
y_train,
epochs=20,
batch_size=256,
validation_data=(X_val, y_val),
verbose=True,
shuffle=False,
callbacks=my_callbacks)
# 模型预测
pred_val = lstm_model.predict(X_val)
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return lstm_model, None, best_thread
elif val_type == 'k-fold':
# 创建模型保存列表
model_list = []
# 创建最佳阈值列表
BC_list = []
# 获取k折数据
data_list = self._k_fold(raw_train, k)
# 实现可变变量名
names = locals()
for member, i in zip(data_list, range(k)):
X_train, X_val, y_train, y_val = member[0], member[1], member[
2], member[3]
y_train = to_categorical(y_train.values.tolist(),
num_classes=None)
y_val = to_categorical(y_val.values.tolist(), num_classes=None)
# 获取lgb模型并且编号
names['lstm_%s' % i] = self._lstm(n_steps_in=20,
n_features=len(
X_train.columns))
# 模型训练,使用早停策略
names['lstm_%s' % i].fit(X_train,
y_train,
epochs=100,
batch_size=256,
validation_data=(X_val, y_val),
verbose=True,
shuffle=False)
# 预测验证集
pred_val = names['lstm_%s' % i].predict(X_val)[:, 1]
# 获取最佳阈值
names['best_thread % s' % i] = self._BC_thread_search(
y_val, pred_val)
# 保存k个模型
model_list.append(names['lstm_%s' % i])
# 保存k个阈值
BC_list.append(names['best_thread % s' % i])
return model_list, None, BC_list
elif val_type == 'bootstrap':
# 获取提升法数据
X_train, X_val, y_train, y_val = self._bootstrap(raw_train)
y_train = to_categorical(y_train.values.tolist(), num_classes=None)
y_val = to_categorical(y_val.values.tolist(), num_classes=None)
# 获取lgb模型
lstm_model = self._lstm(n_steps_in=20,
n_features=len(X_train.columns))
# 模型训练,使用早停策略
lstm_model.fit(X_train,
y_train,
epochs=100,
batch_size=256,
validation_data=(X_val, y_val),
verbose=True,
shuffle=False)
# 模型预测
pred_val = lstm_model.predict(X_val)[:, :1]
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return lstm_model, None, best_thread
else:
print("无该验证方法!")
exit(-1)
def _train_xgb(self, raw_train, val_type, k, percent_train=0.8):
# 获取保留列名
feature_names = self._feature_name()
if val_type == 'hold-out': # 留出法
X_train, X_val, y_train, y_val = self._hold_out(
raw_train, percent_train)
xgb_model = self._xgb()
xgb_model.fit(X_train,
y_train,
eval_set=[(X_train, y_train), (X_val, y_val)],
verbose=100,
early_stopping_rounds=50)
# 获取特征重要性
df_importance = pd.DataFrame({
'column':
feature_names,
'importance':
xgb_model.feature_importances_,
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
# 最大最小归一
df_importance['importance'] = (
df_importance['importance'] -
df_importance['importance'].min()) / (
df_importance['importance'].max() -
df_importance['importance'].min())
# 模型预测
pred_val = xgb_model.predict_proba(X_val)[:, 1]
X_val['pred_prob'] = pred_val
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return xgb_model, df_importance, best_thread
elif val_type == 'k-fold':
# 创建模型保存列表
model_list = []
# 创建阈值保存列表
BC_list = []
# 创建重要性保存列表
importance_list = []
# 获取k折数据
data_list = self._k_fold(raw_train, k)
# 实现可变变量名
names = locals()
for member, i in zip(data_list, range(k)):
X_train, X_val, y_train, y_val = member[0], member[1], member[
2], member[3]
# 获取lgb模型并且编号
names['lgb_%s' % i] = self._xgb()
# 模型训练,使用早停策略
names['lgb_%s' % i].fit(X_train,
y_train,
eval_set=[(X_train, y_train),
(X_val, y_val)],
verbose=100,
early_stopping_rounds=50)
# 获取特征重要性
df_importance = pd.DataFrame({
'column':
feature_names,
'importance':
names['lgb_%s' % i].feature_importances_,
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
# 预测验证集
pred_val = names['lgb_%s' % i].predict_proba(X_val)[:, 1]
# 获取最佳阈值
names['best_thread % s' % i] = self._BC_thread_search(
y_val, pred_val)
# 保存k个模型
model_list.append(names['lgb_%s' % i])
# 保存k个重要性
importance_list.append(df_importance)
# 保存k个阈值
BC_list.append(names['best_thread % s' % i])
mean_dict = dict()
# 获取平均特征重要度
for feat in feature_names:
mean_dict[feat] = 0
for df_importance in importance_list:
for feat in feature_names:
mean_dict[feat] += int(
df_importance[df_importance['column'] ==
feat]['importance'].values[0])
for feat in feature_names:
mean_dict[feat] /= k
# 重要度排序
mean_imp_df = pd.DataFrame({
'column': feature_names,
'importance': list(mean_dict.values()),
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
# 最大最小归一
mean_imp_df['importance'] = (
df_importance['importance'] -
df_importance['importance'].min()) / (
df_importance['importance'].max() -
df_importance['importance'].min())
# 获取平均最佳阈值
mean_BC = np.array(BC_list).mean()
return model_list, mean_imp_df, mean_BC
elif val_type == 'bootstrap':
# 获取提升法数据
X_train, X_val, y_train, y_val = self._bootstrap(raw_train)
# 获取lgb模型
xgb_model = self._xgb()
# 模型训练,使用早停策略
xgb_model.fit(X_train,
y_train,
eval_set=[(X_train, y_train), (X_val, y_val)],
verbose=100,
early_stopping_rounds=50)
# 获取模型的特征重要性特征
df_importance = pd.DataFrame({
'column':
feature_names,
'importance':
xgb_model.feature_importances_,
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
# 最大最小归一
df_importance['importance'] = (
df_importance['importance'] -
df_importance['importance'].min()) / (
df_importance['importance'].max() -
df_importance['importance'].min())
# 模型预测
pred_val = xgb_model.predict_proba(X_val)[:, :1]
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return xgb_model, df_importance, best_thread
else:
print("无该验证方法!")
exit(-1)
def _train_lgb(self, raw_train, val_type, k, percent_train=0.8):
""" 训练lightgbm模型
Args:
raw_train(dataframe): 原始数据
val_type(string): 验证方式
k(int): 交叉折数
percent_train(float): 训练集比例
Returns:
model: 训练模型
importance(dataframe): 特征重要度
best_thread(float): 最佳阈值
"""
# 获取保留列名
feature_names = self._feature_name()
if val_type == 'hold-out': # 留出法
X_train, X_val, y_train, y_val = self._hold_out(
raw_train, percent_train)
# 获取lgb模型
lgb_model = self._lgb()
# 模型训练,使用早停策略
lgb_model.fit(X_train,
y_train,
eval_names=['train', 'valid'],
eval_set=[(X_train, y_train), (X_val, y_val)],
verbose=100,
early_stopping_rounds=50)
# 获取特征重要性
df_importance = pd.DataFrame({
'column':
feature_names,
'importance':
lgb_model.feature_importances_,
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
# 最大最小归一
df_importance['importance'] = (
df_importance['importance'] -
df_importance['importance'].min()) / (
df_importance['importance'].max() -
df_importance['importance'].min())
print(df_importance)
# 模型预测
pred_val = lgb_model.predict_proba(X_val)[:, 1]
X_val['pred_prob'] = pred_val
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return lgb_model, df_importance, best_thread
elif val_type == 'k-fold':
# 创建模型保存列表
model_list = []
# 创建阈值保存列表
BC_list = []
# 创建重要性保存列表
importance_list = []
# 获取k折数据
data_list = self._k_fold(raw_train, k)
# 实现可变变量名
names = locals()
for member, i in zip(data_list, range(k)):
X_train, X_val, y_train, y_val = member[0], member[1], member[
2], member[3]
# 获取lgb模型并且编号
names['lgb_%s' % i] = self._lgb()
# 模型训练,使用早停策略
names['lgb_%s' % i].fit(X_train,
y_train,
eval_names=['train', 'valid'],
eval_set=[(X_train, y_train),
(X_val, y_val)],
verbose=100,
early_stopping_rounds=50)
# 获取特征重要性
df_importance = pd.DataFrame({
'column':
feature_names,
'importance':
names['lgb_%s' % i].feature_importances_,
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
# 预测验证集
pred_val = names['lgb_%s' % i].predict_proba(X_val)[:, 1]
# 获取最佳阈值
names['best_thread % s' % i] = self._BC_thread_search(
y_val, pred_val)
# 保存k个模型
model_list.append(names['lgb_%s' % i])
# 保存k个重要性
importance_list.append(df_importance)
# 保存k个阈值
BC_list.append(names['best_thread % s' % i])
mean_dict = dict()
# 获取平均特征重要度
for feat in feature_names:
mean_dict[feat] = 0
for df_importance in importance_list:
for feat in feature_names:
mean_dict[feat] += int(
df_importance[df_importance['column'] ==
feat]['importance'].values[0])
for feat in feature_names:
mean_dict[feat] /= k
# 重要度排序
mean_imp_df = pd.DataFrame({
'column': feature_names,
'importance': list(mean_dict.values()),
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
# 最大最小归一
mean_imp_df['importance'] = (
df_importance['importance'] -
df_importance['importance'].min()) / (
df_importance['importance'].max() -
df_importance['importance'].min())
# 获取平均最佳阈值
mean_BC = np.array(BC_list).mean()
return model_list, mean_imp_df, mean_BC
elif val_type == 'bootstrap':
# 获取提升法数据
X_train, X_val, y_train, y_val = self._bootstrap(raw_train)
# 获取lgb模型
lgb_model = self._lgb()
# 模型训练,使用早停策略
lgb_model.fit(X_train,
y_train,
eval_names=['train', 'valid'],
eval_set=[(X_train, y_train), (X_val, y_val)],
verbose=100,
early_stopping_rounds=50)
# 获取模型的特征重要性特征
df_importance = pd.DataFrame({
'column':
feature_names,
'importance':
lgb_model.feature_importances_,
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
# 最大最小归一
df_importance['importance'] = (
df_importance['importance'] -
df_importance['importance'].min()) / (
df_importance['importance'].max() -
df_importance['importance'].min())
# 模型预测
pred_val = lgb_model.predict_proba(X_val)[:, :1]
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return lgb_model, df_importance, best_thread
else:
print("无该验证方法!")
exit(-1)
def _train_nb(self, raw_train, val_type, k, percent_train=0.8):
""" 训练朴素贝叶斯模型
Args:
raw_train(dataframe): 原始数据
val_type(string): 验证方式
k(int): 交叉折数
percent_train(float): 训练集比例
Returns:
model: 训练模型
best_thread(float): 最佳阈值
"""
if val_type == 'hold-out': # 留出法
# 获取训练集、验证集
X_train, X_val, y_train, y_val = self._hold_out(
raw_train, percent_train)
# 获取朴素贝叶斯模型
gnb_model = GaussianNB()
# 模型训练
gnb_model.fit(X_train, y_train)
# 模型预测
pred_val = gnb_model.predict_proba(X_val)[:, :1]
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return gnb_model, None, best_thread
elif val_type == 'k-fold':
# 创建模型保存列表
model_list = []
# 创建阈值保存列表
BC_list = []
# 获取k折交叉验证数据
data_list = self._k_fold(raw_train, k)
# 实现可变变量名
names = locals()
for member, i in zip(data_list, range(k)):
X_train, X_val, y_train, y_val = member[0], member[1], member[
2], member[3]
names['gnb_%s' % i] = GaussianNB()
# 模型训练
names['gnb_%s' % i].fit(X_train, y_train)
# 模型预测
pred_val = names['gnb_%s' % i].predict_proba(X_val)[:, 1]
# 阈值搜索
names['best_thread % s' % i] = self._BC_thread_search(
y_val, pred_val)
# 模型加入列表
model_list.append(names['gnb_%s' % i])
# 阈值加入列表
BC_list.append(names['best_thread % s' % i])
# 平均最佳阈值
mean_BC = np.array(BC_list).mean()
return model_list, None, mean_BC
elif val_type == 'bootstrap':
# 提升法获取训练、验证数据
X_train, X_val, y_train, y_val = self._bootstrap(raw_train)
# 获取模型
gnb_model = GaussianNB()
# 模型训练
gnb_model.fit(X_train, y_train)
# 模型预测
pred_val = gnb_model.predict_proba(X_val)[:, :1]
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return gnb_model, None, best_thread
else:
print("无该验证方法!")
exit(-1)
def _train_linearReg(self, raw_train, val_type, k, percent_train=0.8):
""" 训练线性回归模型
Args:
raw_train(dataframe): 原始数据
val_type(string): 验证方式
k(int): 交叉折数
percent_train(float): 训练集比例
Returns:
model: 训练模型
best_thread(float): 最佳阈值
"""
if val_type == 'hold-out': # 留出法
# 获取训练集、验证集
X_train, X_val, y_train, y_val = self._hold_out(
raw_train, percent_train)
# 获取线性回归模型
linear_model = LinearRegression()
# 模型训练
linear_model.fit(X_train, y_train)
# 模型预测
pred_val = linear_model.predict(X_val)
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return linear_model, None, best_thread
elif val_type == 'k-fold':
# 创建模型保存列表
model_list = []
# 阈值保存列表
BC_list = []
# k折数据
data_list = self._k_fold(raw_train, k)
# 实现可变变量名
names = locals()
for member, i in zip(data_list, range(k)):
X_train, X_val, y_train, y_val = member[0], member[1], member[
2], member[3]
# 获取模型并编号
names['linear_%s' % i] = LinearRegression()
# 模型训练
names['linear_%s' % i].fit(X_train, y_train)
# 模型预测
pred_val = names['linear_%s' % i].predict(X_val)
# 阈值搜索
names['best_thread % s' % i] = self._BC_thread_search(
y_val, pred_val)
# 模型加入列表
model_list.append(names['linear_%s' % i])
# 阈值加入列表
BC_list.append(names['best_thread % s' % i])
# 平均最佳阈值
mean_BC = np.array(BC_list).mean()
return model_list, None, mean_BC
elif val_type == 'bootstrap':
# 提升法获取训练、验证数据
X_train, X_val, y_train, y_val = self._bootstrap(raw_train)
linear_model = LinearRegression()
# 模型训练
linear_model.fit(X_train, y_train)
# 模型预测
pred_val = linear_model.predict(X_val)
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return linear_model, None, best_thread
else:
print("无该验证方法!")
exit(-1)
def _train_logisticReg(self, raw_train, val_type, k, percent_train=0.8):
""" 训练逻辑回归模型
Args:
raw_train(dataframe): 原始数据
val_type(string): 验证方式
k(int): 交叉折数
percent_train(float): 训练集比例
Returns:
model: 训练模型
best_thread(float): 最佳阈值
"""
if val_type == 'hold-out': # 留出法
# 获取训练集、验证集
X_train, X_val, y_train, y_val = self._hold_out(
raw_train, percent_train)
# 获取逻辑回归模型
logistic_model = LogisticRegression(C=1.0, penalty='l2', tol=0.01)
# 模型训练
logistic_model.fit(X_train, y_train)
# 模型预测
pred_val = logistic_model.predict_proba(X_val)[:, :1]
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return logistic_model, None, best_thread
elif val_type == 'k-fold':
# 模型保存列表
model_list = []
# 阈值保存列表
BC_list = []
# k交叉数据
data_list = self._k_fold(raw_train, k)
# 实现可变变量名
names = locals()
for member, i in zip(data_list, range(k)):
X_train, X_val, y_train, y_val = member[0], member[1], member[
2], member[3]
# 创建模型并命名
names['logistic_%s' % i] = LogisticRegression(C=1.0,
penalty='l2',
tol=0.01)
# 模型训练
names['logistic_%s' % i].fit(X_train, y_train)
# 模型预测
pred_val = names['logistic_%s' % i].predict_proba(X_val)[:, 1]
# 阈值搜索
names['best_thread % s' % i] = self._BC_thread_search(
y_val, pred_val)
# 加入模型列表
model_list.append(names['logistic_%s' % i])
# 加入阈值列表
BC_list.append(names['best_thread % s' % i])
# 平均最佳阈值
mean_BC = np.array(BC_list).mean()
return model_list, None, mean_BC
elif val_type == 'bootstrap':
# 提升法获取训练、验证数据
X_train, X_val, y_train, y_val = self._bootstrap(raw_train)
# 获取模型
logistic_model = LogisticRegression(C=1.0, penalty='l2', tol=0.01)
# 模型训练
logistic_model.fit(X_train, y_train)
# 模型预测
pred_val = logistic_model.predict_proba(X_val)[:, :1]
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return logistic_model, None, best_thread
else:
print("无该验证方法!")
exit(-1)
def _train_svm(self, raw_train, val_type, k, percent_train=0.8):
""" 训练支持向量机模型
Args:
raw_train(dataframe): 原始数据
val_type(string): 验证方式
k(int): 交叉折数
percent_train(float): 训练集比例
Returns:
model: 训练模型
best_thread(float): 最佳阈值
"""
if val_type == 'hold-out': # 留出法
# 留出法获得训练集、验证集
X_train, X_val, y_train, y_val = self._hold_out(
raw_train, percent_train)
# 获取模型
svm_model = self._svm()
# 模型训练
svm_model.fit(X_train, y_train)
# 模型预测
pred_val = svm_model.predict_proba(X_val)[:, :1]
# 搜寻最佳阈值
best_thread = self._BC_thread_search(y_val, pred_val)
return svm_model, None, best_thread
elif val_type == 'k-fold':
# 模型保存列表
model_list = []
# 阈值保存列表
BC_list = []
# k交叉数据
data_list = self._k_fold(raw_train, k)
# 实现可变变量名
names = locals()
for member, i in zip(data_list, range(k)):
X_train, X_val, y_train, y_val = member[0], member[1], member[
2], member[3]
# 获取模型并命名
names['svm_%s' % i] = self._svm()
# 模型训练
names['svm_%s' % i].fit(X_train, y_train)
# 模型预测
pred_val = names['svm_%s' % i].predict_proba(X_val)[:, 1]
# 阈值搜索
names['best_thread % s' % i] = self._BC_thread_search(
y_val, pred_val)
# 模型保存
model_list.append(names['svm_%s' % i])
# 阈值保存
BC_list.append(names['best_thread % s' % i])
# 平均最佳阈值
mean_BC = | np.array(BC_list) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A python class to read Kongsberg KMALL data format for swath mapping
bathymetric echosounders.
"""
import pandas as pd
import sys
import numpy as np
import struct
import datetime
import argparse
import os
import re
import bz2
import copy
from pyproj import Proj
from scipy import stats
class kmall():
""" A class for reading a Kongsberg KMALL data file. """
def __init__(self, filename=None):
self.verbose = 0
self.filename = filename
self.FID = None
self.file_size = None
self.header_size = None
self.Index = None
self.pingDataCheck = None
self.navDataCheck = None
self.datagram_ident_search = self._build_startbytesearch()
self.read_methods = [method_name for method_name in dir(self) if method_name[0:4] == 'read']
self.datagram_ident = None
self.datagram_data = None
self.read_method = None
self.eof = False
def decode_datagram(self):
"""
Assumes the file pointer is at the correct position to read the size of the dgram and the identifier
Stores the datagram identifier and the read method as attributes. read method is the name of the class
method that we would use to read the datagram
"""
self.datagram_ident = None
self.read_method = None
if self.FID is None:
self.OpenFiletoRead()
if self.file_size is None: # need file size to determine end of file, init if not done already
filelen = self._initialize_sequential_read(0, 0)
num_bytes = self.FID.read(4)
dgram = self.FID.read(4)
if not self.FID.tell() == self.file_size: # end of file
self.FID.seek(-8, 1)
is_valid_identifier = self.datagram_ident_search.search(dgram, 0)
# dgram passes first check, starts with # and is 3 capital letters after
if is_valid_identifier:
# now compare dgram identifier with the last three letters of each read method to find the right one
self.datagram_ident = dgram[-3:].decode()
read_method = [rm for rm in self.read_methods if rm[-3:] == self.datagram_ident]
if not len(read_method) > 1:
self.read_method = read_method[0]
else:
raise ValueError('Found multiple valid read methods for {}: {}'.format(dgram, read_method))
else:
raise ValueError('Did not find valid datagram identifier: {}'.format(dgram))
else:
self.eof = True
def read_datagram(self):
"""
Reads the datagram data and stores the data in self.datagram_data
Will always translate the installation parameters record (translate=True)
To get the first record:
km = kmall.kmall(r"C:\\Users\\zzzz\\Downloads\\0007_20190513_154724_ASVBEN.kmall")
km.decode_datagram()
km.read_datagram()
Or to get the first MRZ record:
km = kmall.kmall(r"C:\\Users\\zzzz\\Downloads\\0007_20190513_154724_ASVBEN.kmall")
while not km.eof:
km.decode_datagram()
if km.datagram_ident != 'MRZ':
km.skip_datagram()
else:
km.read_datagram()
break
"""
if self.read_method is not None: # is None when decode fails or is at the end of file
if self.read_method in ['read_EMdgmIIP', 'read_EMdgmIOP']:
self.datagram_data = getattr(self, self.read_method)(translate=True)
else:
self.datagram_data = getattr(self, self.read_method)()
def skip_datagram(self):
"""
After decoding, use this to skip to the next datagram if you don't want to read this one
"""
if self.read_method is not None:
format_to_unpack = "1I"
numbytes = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))[0]
self.FID.seek(numbytes - struct.Struct(format_to_unpack).size, 1)
def read_first_datagram(self, datagram_identifier):
"""
Uses read_datagram to quickly read the first instance of a datagram in a file
datagram_identifier is a 3 letter string identifier, ex: 'IIP' or 'MRZ'
"""
self.datagram_data = None
self.eof = False
if self.FID is None:
self.OpenFiletoRead()
else:
self.FID.seek(0)
while not self.eof:
self.decode_datagram()
if self.datagram_ident != datagram_identifier:
self.skip_datagram()
else:
self.read_datagram()
break
if self.datagram_data is None:
print('Unable to find {} in file'.format(datagram_identifier))
return self.datagram_data
def read_EMdgmHeader(self):
"""
Read general datagram header.
:return: A dictionary containing EMdgmHeader ('header').
"""
# LMD tested.
dg = {}
format_to_unpack = "1I4s2B1H2I"
self.header_size = struct.Struct(format_to_unpack).size
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Datagram length in bytes. The length field at the start (4 bytes) and end
# of the datagram (4 bytes) are included in the length count.
dg['numBytesDgm'] = fields[0]
# Array of length 4. Multibeam datagram type definition, e.g. #AAA
dg['dgmType'] = fields[1]
# Datagram version.
dg['dgmVersion'] = fields[2]
# System ID. Parameter used for separating datagrams from different echosounders
# if more than one system is connected to SIS/K-Controller.
dg['systemID'] = fields[3]
# Echo sounder identity, e.g. 122, 302, 710, 712, 2040, 2045, 850.
dg['echoSounderID'] = fields[4]
# UTC time in seconds + Nano seconds remainder. Epoch 1970-01-01.
dg['dgtime'] = fields[5] + fields[6] / 1.0E9
dg['dgdatetime'] = datetime.datetime.utcfromtimestamp(dg['dgtime'])
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmIIP(self, translate=False):
"""
Read #IIP - installation parameters and sensor format settings.
If translate is True, the returned install_txt will be a dict with human readable key: value pairs.
self.read_datagram will always use translate=True
:return: A dictionary containging EMdgmIIP.
"""
# LMD tested.
dg = {}
dg['header'] = self.read_EMdgmHeader()
format_to_unpack = "3H1B"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Size in bytes of body part struct. Used for denoting size of rest of the datagram.
dg['numBytesCmnPart'] = fields[0]
# Information. For future use.
dg['info'] = fields[1]
# Status. For future use.
dg['status'] = fields[2]
# Installation settings as text format. Parameters separated by ; and lines separated by , delimiter.
tmp = self.FID.read(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size)
i_text = tmp.decode('UTF-8')
if translate:
i_text = self.translate_installation_parameters_todict(i_text)
dg['install_txt'] = i_text
# remainder = total bytes - (header bytes + data bytes)
expected_unknown_size = dg['header']['numBytesDgm'] - (self.header_size + dg['numBytesCmnPart'])
# Skip unknown fields.
self.FID.seek(expected_unknown_size, 1)
return dg
def read_EMdgmIOP(self, translate=False):
"""
Read #IOP - runtime parameters, exactly as chosen by operator in K-Controller/SIS menus.
If translate is True, the returned runtime_txt will be a dict with human readable key: value pairs.
self.read_datagram will always use translate=True
:return: A dictionary containing EMdgmIOP.
"""
# LMD tested.
dg = {}
dg['header'] = self.read_EMdgmHeader()
format_to_unpack = "3H"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Size in bytes of body part struct. Used for denoting size of rest of the datagram.
dg['numBytesCmnPart'] = fields[0]
# Information. For future use.
dg['info'] = fields[1]
# Status. For future use.
dg['status'] = fields[2]
# Runtime parameters as text format. Parameters separated by ; and lines separated by , delimiter.
# Text strings refer to names in menus of the K-Controller/SIS.
tmp = self.FID.read(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size)
rt_text = tmp.decode('UTF-8')
# print(rt_text)
if translate:
rt_text = self.translate_runtime_parameters_todict(rt_text)
dg['runtime_txt'] = rt_text
# remainder = total bytes - (header bytes + data bytes)
expected_unknown_size = dg['header']['numBytesDgm'] - (self.header_size + dg['numBytesCmnPart'])
# Skip unknown fields.
self.FID.seek(expected_unknown_size, 1)
return dg
def read_EMdgmIB(self):
"""
Read #IB - results from online built-in test (BIST). Definition used for three different BIST datagrams,
i.e. #IBE (BIST Error report), #IBR (BIST reply) or #IBS (BIST short reply).
:return: A dictionary containing EMdgmIB.
"""
# LMD added, untested.
# TODO: Test with file containing BIST.
print("WARNING: You are using an incomplete, untested function: read_EMdgmIB.")
dg = {}
dg['header'] = self.read_EMdgmHeader()
format_to_unpack = "1H3B1b1B"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Size in bytes of body part struct. Used for denoting size of rest of the datagram.
dg['numBytesCmnPart'] = fields[0]
# 0 = last subset of the message; 1 = more messages to come
dg['BISTInfo'] = fields[1]
# 0 = plain text; 1 = use style sheet
dg['BISTStyle'] = fields[2]
# The BIST number executed.
dg['BISTNumber'] = fields[3]
# 0 = BIST executed with no errors; positive number = warning; negative number = error
dg['BISTStatus'] = fields[4]
# Result of the BIST. Starts with a synopsis of the result, followed by detailed descriptions.
tmp = FID.read(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size)
bist_text = tmp.decode('UTF-8')
# print(bist_text)
dg['BISTText'] = bist_text
# Skip unknown fields.
self.FID.seek(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size, 1)
'''
if self.verbose > 2:
self.print_datagram(dg)
'''
return dg
def read_EMdgmMpartition(self):
"""
Read multibeam (M) datagrams - data partition info. General for all M datagrams.
Kongsberg documentation: "If a multibeam depth datagram (or any other large datagram) exceeds the limit of a
UDP package (64 kB), the datagram is split into several datagrams =< 64 kB before sending from the PU.
The parameters in this struct will give information of the partitioning of datagrams. K-Controller/SIS merges
all UDP packets/datagram parts to one datagram, and store it as one datagram in the .kmall files. Datagrams
stored in .kmall files will therefore always have numOfDgm = 1 and dgmNum = 1, and may have size > 64 kB.
The maximum number of partitions from PU is given by MAX_NUM_MWC_DGMS and MAX_NUM_MRZ_DGMS."
:return: A dictionary containing EMdgmMpartition ('partition').
"""
# LMD tested.
dg = {}
format_to_unpack = "2H"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Number of datagram parts to re-join to get one Multibeam datagram. E.g. 3.
dg['numOfDgms'] = fields[0]
# Datagram part number, e.g. 2 (of 3).
dg['dgmNum'] = fields[1]
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmMbody(self):
"""
Read multibeam (M) datagrams - body part. Start of body of all M datagrams.
Contains information of transmitter and receiver used to find data in datagram.
:return: A dictionary containing EMdgmMbody ('cmnPart').
"""
# LMD tested.
dg = {}
format_to_unpack = "2H8B"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Used for denoting size of current struct.
dg['numBytesCmnPart'] = fields[0]
# A ping is made of one or more RX fans and one or more TX pulses transmitted at approximately the same time.
# Ping counter is incremented at every set of TX pulses
# (one or more pulses transmitted at approximately the same time).
dg['pingCnt'] = fields[1]
# Number of rx fans per ping gives information of how many #MRZ datagrams are generated per ping.
# Combined with swathsPerPing, number of datagrams to join for a complete swath can be found.
dg['rxFansPerPing'] = fields[2]
# Index 0 is the aft swath, port side.
dg['rxFanIndex'] = fields[3]
# Number of swaths per ping. A swath is a complete set of across track data.
# A swath may contain several transmit sectors and RX fans.
dg['swathsPerPing'] = fields[4]
# Alongship index for the location of the swath in multi swath mode. Index 0 is the aftmost swath.
dg['swathAlongPosition'] = fields[5]
# Transducer used in this tx fan. Index: 0 = TRAI_TX1; 1 = TRAI_TX2 etc.
dg['txTransducerInd'] = fields[6]
# Transducer used in this rx fan. Index: 0 = TRAI_RX1; 1 = TRAI_RX2 etc.
dg['rxTransducerInd'] = fields[7]
# Total number of receiving units.
dg['numRxTransducers'] = fields[8]
# For future use. 0 - current algorithm, >0 - future algorithms.
dg['algorithmType'] = fields[9]
# Skip unknown fields.
self.FID.seek(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size, 1)
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmMRZ_pingInfo(self):
"""
Read #MRZ - ping info. Information on vessel/system level,
i.e. information common to all beams in the current ping.
:return: A dictionary containing EMdgmMRZ_pingInfo ('pingInfo').
"""
# LMD tested.
dg = {}
format_to_unpack_a = "2H1f6B1H11f2h2B1H1I3f2H1f2H6f4B"
fields = struct.unpack(format_to_unpack_a, self.FID.read(struct.Struct(format_to_unpack_a).size))
# Number of bytes in current struct.
dg['numBytesInfoData'] = fields[0]
# Byte alignment.
dg['padding0'] = fields[1]
# # # # # Ping Info # # # # #
# Ping rate. Filtered/averaged.
dg['pingRate_Hz'] = fields[2]
# 0 = Eqidistance; 1 = Equiangle; 2 = High density
dg['beamSpacing'] = fields[3]
# Depth mode. Describes setting of depth in K-Controller. Depth mode influences the PUs choice of pulse length
# and pulse type. If operator has manually chosen the depth mode to use, this is flagged by adding 100 to the
# mode index. 0 = Very Shallow; 1 = Shallow; 2 = Medium; 3 = Deep; 4 = Deeper; 5 = Very Deep; 6 = Extra Deep;
# 7 = Extreme Deep
dg['depthMode'] = fields[4]
# For advanced use when depth mode is set manually. 0 = Sub depth mode is not used (when depth mode is auto).
dg['subDepthMode'] = fields[5]
# Achieved distance between swaths, in percent relative to required swath distance.
# 0 = function is not used; 100 = achieved swath distance equals required swath distance.
dg['distanceBtwSwath'] = fields[6]
# Detection mode. Bottom detection algorithm used. 0 = normal; 1 = waterway; 2 = tracking;
# 3 = minimum depth; If system running in simulation mode: detectionmode + 100 = simulator.
dg['detectionMode'] = fields[7]
# Pulse forms used for current swath. 0 = CW; 1 = mix; 2 = FM
dg['pulseForm'] = fields[8]
# TODO: Kongsberg documentation lists padding1 as "Ping rate. Filtered/averaged." This appears to be incorrect.
# In testing, padding1 prints all zeros. I'm assuming this is for byte alignment, as with other 'padding' cases.
# Byte alignment.
dg['padding1'] = fields[9]
# Ping frequency in hertz. E.g. for EM 2040: 200 000 Hz, 300 000 Hz or 400 000 Hz.
# If values is less than 100, it refers to a code defined below:
# -1 = Not used; 0 = 40 - 100 kHz, EM 710, EM 712; 1 = 50 - 100 kHz, EM 710, EM 712;
# 2 = 70 - 100 kHz, EM 710, EM 712; 3 = 50 kHz, EM 710, EM 712; 4 = 40 kHz, EM 710, EM 712;
# 180 000 - 400 000 = 180-400 kHz, EM 2040C (10 kHz steps)
# 200 000 = 200 kHz, EM 2040; 300 000 = 300 kHz, EM 2040; 400 000 = 400 kHz, EM 2040
dg['frequencyMode_Hz'] = fields[10]
# Lowest centre frequency of all sectors in this swath. Unit hertz. E.g. for EM 2040: 260 000 Hz.
dg['freqRangeLowLim_Hz'] = fields[11]
# Highest centre frequency of all sectors in this swath. Unit hertz. E.g. for EM 2040: 320 000 Hz.
dg['freqRangeHighLim_Hz'] = fields[12]
# Total signal length of the sector with longest tx pulse. Unit second.
dg['maxTotalTxPulseLength_sec'] = fields[13]
# Effective signal length (-3dB envelope) of the sector with longest effective tx pulse. Unit second.
dg['maxEffTxPulseLength_sec'] = fields[14]
# Effective bandwidth (-3dB envelope) of the sector with highest bandwidth.
dg['maxEffTxBandWidth_Hz'] = fields[15]
# Average absorption coefficient, in dB/km, for vertical beam at current depth. Not currently in use.
dg['absCoeff_dBPerkm'] = fields[16]
# Port sector edge, used by beamformer, Coverage is refered to z of SCS.. Unit degree.
dg['portSectorEdge_deg'] = fields[17]
# Starboard sector edge, used by beamformer. Coverage is referred to z of SCS. Unit degree.
dg['starbSectorEdge_deg'] = fields[18]
# Coverage achieved, corrected for raybending. Coverage is referred to z of SCS. Unit degree.
dg['portMeanCov_deg'] = fields[19]
# Coverage achieved, corrected for raybending. Coverage is referred to z of SCS. Unit degree.
dg['stbdMeanCov_deg'] = fields[20]
# Coverage achieved, corrected for raybending. Coverage is referred to z of SCS. Unit meter.
dg['portMeanCov_m'] = fields[21]
# Coverage achieved, corrected for raybending. Unit meter.
dg['starbMeanCov_m'] = fields[22]
# Modes and stabilisation settings as chosen by operator. Each bit refers to one setting in K-Controller.
# Unless otherwise stated, default: 0 = off, 1 = on/auto.
# Bit: 1 = Pitch stabilisation; 2 = Yaw stabilisation; 3 = Sonar mode; 4 = Angular coverage mode;
# 5 = Sector mode; 6 = Swath along position (0 = fixed, 1 = dynamic); 7-8 = Future use
dg['modeAndStabilisation'] = fields[23]
# Filter settings as chosen by operator. Refers to settings in runtime display of K-Controller.
# Each bit refers to one filter setting. 0 = off, 1 = on/auto.
# Bit: 1 = Slope filter; 2 = Aeration filter; 3 = Sector filter;
# 4 = Interference filter; 5 = Special amplitude detect; 6-8 = Future use
dg['runtimeFilter1'] = fields[24]
# Filter settings as chosen by operator. Refers to settings in runtime display of K-Controller. 4 bits used per filter.
# Bits: 1-4 = Range gate size: 0 = small, 1 = normal, 2 = large
# 5-8 = Spike filter strength: 0 = off, 1= weak, 2 = medium, 3 = strong
# 9-12 = Penetration filter: 0 = off, 1 = weak, 2 = medium, 3 = strong
# 13-16 = Phase ramp: 0 = short, 1 = normal, 2 = long
dg['runtimeFilter2'] = fields[25]
# Pipe tracking status. Describes how angle and range of top of pipe is determined.
# 0 = for future use; 1 = PU uses guidance from SIS.
dg['pipeTrackingStatus'] = fields[26]
# Transmit array size used. Direction along ship. Unit degree.
dg['transmitArraySizeUsed_deg'] = fields[27]
# Receiver array size used. Direction across ship. Unit degree.
dg['receiveArraySizeUsed_deg'] = fields[28]
# Operator selected tx power level re maximum. Unit dB. E.g. 0 dB, -10 dB, -20 dB.
dg['transmitPower_dB'] = fields[29]
# For marine mammal protection. The parameters describes time remaining until max source level (SL) is achieved.
# Unit %.
dg['SLrampUpTimeRemaining'] = fields[30]
# Byte alignment.
dg['padding2'] = fields[31]
# Yaw correction angle applied. Unit degree.
dg['yawAngle_deg'] = fields[32]
# # # # # Info of Tx Sector Data Block # # # # #
# Number of transmit sectors. Also called Ntx in documentation. Denotes how
# many times the struct EMdgmMRZ_txSectorInfo is repeated in the datagram.
dg['numTxSectors'] = fields[33]
# Number of bytes in the struct EMdgmMRZ_txSectorInfo, containing tx sector
# specific information. The struct is repeated numTxSectors times.
dg['numBytesPerTxSector'] = fields[34]
# # # # # Info at Time of Midpoint of First Tx Pulse # # # # #
# Heading of vessel at time of midpoint of first tx pulse. From active heading sensor.
dg['headingVessel_deg'] = fields[35]
# At time of midpoint of first tx pulse. Value as used in depth calculations.
# Source of sound speed defined by user in K-Controller.
dg['soundSpeedAtTxDepth_mPerSec'] = fields[36]
# Tx transducer depth in meters below waterline, at time of midpoint of first tx pulse.
# For the tx array (head) used by this RX-fan. Use depth of TX1 to move depth point (XYZ)
# from water line to transducer (reference point of old datagram format).
dg['txTransducerDepth_m'] = fields[37]
# Distance between water line and vessel reference point in meters. At time of midpoint of first tx pulse.
# Measured in the surface coordinate system (SCS).See Coordinate systems 'Coordinate systems' for definition.
# Used this to move depth point (XYZ) from vessel reference point to waterline.
dg['z_waterLevelReRefPoint_m'] = fields[38]
# Distance between *.all reference point and *.kmall reference point (vessel referenece point) in meters,
# in the surface coordinate system, at time of midpoint of first tx pulse. Used this to move depth point (XYZ)
# from vessel reference point to the horisontal location (X,Y) of the active position sensor's reference point
# (old datagram format).
dg['x_kmallToall_m'] = fields[39]
# Distance between *.all reference point and *.kmall reference point (vessel referenece point) in meters,
# in the surface coordinate system, at time of midpoint of first tx pulse. Used this to move depth point (XYZ)
# from vessel reference point to the horisontal location (X,Y) of the active position sensor's reference point
# (old datagram format).
dg['y_kmallToall_m'] = fields[40]
# Method of position determination from position sensor data:
# 0 = last position received; 1 = interpolated; 2 = processed.
dg['latLongInfo'] = fields[41]
# Status/quality for data from active position sensor. 0 = valid data, 1 = invalid data, 2 = reduced performance
dg['posSensorStatus'] = fields[42]
# Status/quality for data from active attitude sensor. 0 = valid data, 1 = invalid data, 2 = reduced performance
dg['attitudeSensorStatus'] = fields[43]
# Padding for byte alignment.
dg['padding3'] = fields[44]
# For some reason, it doesn't work to do this all in one step, but it works broken up into two steps. *shrug*
format_to_unpack_b = "2d1f"
fields = struct.unpack(format_to_unpack_b, self.FID.read(struct.Struct(format_to_unpack_b).size))
# Latitude (decimal degrees) of vessel reference point at time of midpoint of first tx pulse.
# Negative on southern hemisphere. Parameter is set to define UNAVAILABLE_LATITUDE if not available.
dg['latitude_deg'] = fields[0]
# Longitude (decimal degrees) of vessel reference point at time of midpoint of first tx pulse.
# Negative on western hemisphere. Parameter is set to define UNAVAILABLE_LONGITUDE if not available.
dg['longitude_deg'] = fields[1]
# Height of vessel reference point above the ellipsoid, derived from active GGA sensor.
# ellipsoidHeightReRefPoint_m is GGA height corrected for motion and installation offsets
# of the position sensor.
dg['ellipsoidHeightReRefPoint_m'] = fields[2]
# Skip unknown fields.
self.FID.seek(dg['numBytesInfoData'] - struct.Struct(format_to_unpack_a).size
- struct.Struct(format_to_unpack_b).size, 1)
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmMRZ_txSectorInfo(self):
"""
Read #MRZ - sector info. Information specific to each transmitting sector.
sectorInfo is repeated numTxSectors (Ntx)- times in datagram.
:return: A dictionary containing EMdgmMRZ_txSectorInfo ('sectorInfo').
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD tested.
dg = {}
format_to_unpack = "4B7f2B1H"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# TX sector index number, used in the sounding section. Starts at 0.
dg['txSectorNumb'] = fields[0]
# TX array number. Single TX, txArrNumber = 0.
dg['txArrNumber'] = fields[1]
# Default = 0. E.g. for EM2040, the transmitted pulse consists of three sectors, each transmitted from separate
# txSubArrays. Orientation and numbers are relative the array coordinate system. Sub array installation offsets
# can be found in the installation datagram, #IIP. 0 = Port subarray; 1 = middle subarray; 2 = starboard subarray
dg['txSubArray'] = fields[2]
# Byte alignment.
dg['padding0'] = fields[3]
# Transmit delay of the current sector/subarray. Delay is the time from the midpoint of the current transmission
# to midpoint of the first transmitted pulse of the ping, i.e. relative to the time used in the datagram header.
dg['sectorTransmitDelay_sec'] = fields[4]
# Along ship steering angle of the TX beam (main lobe of transmitted pulse),
# angle referred to transducer array coordinate system. Unit degree.
dg['tiltAngleReTx_deg'] = fields[5]
# Unit dB re 1 microPascal.
dg['txNominalSourceLevel_dB'] = fields[6]
# 0 = no focusing applied.
dg['txFocusRange_m'] = fields[7]
# Centre frequency. Unit hertz.
dg['centreFreq_Hz'] = fields[8]
# FM mode: effective bandwidth; CW mode: 1/(effective TX pulse length)
dg['signalBandWidth_Hz'] = fields[9]
# Also called pulse length. Unit second.
dg['totalSignalLength_sec'] = fields[10]
# Transmit pulse is shaded in time (tapering). Amplitude shading in %.
# cos2- function used for shading the TX pulse in time.
dg['pulseShading'] = fields[11]
# Transmit signal wave form. 0 = CW; 1 = FM upsweep; 2 = FM downsweep.
dg['signalWaveForm'] = fields[12]
# Byte alignment.
dg['padding1'] = fields[13]
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmMRZ_rxInfo(self):
"""
Read #MRZ - receiver specific information. Information specific to the receiver unit used in this swath.
:return: A dictionary containing EMdgmMRZ_rxInfo ('rxInfo').
"""
# LMD tested.
dg = {}
format_to_unpack = "4H4f4H"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Bytes in current struct.
dg['numBytesRxInfo'] = fields[0]
# Maximum number of main soundings (bottom soundings) in this datagram, extra detections
# (soundings in water column) excluded. Also referred to as Nrx. Denotes how many bottom points
# (or loops) given in the struct EMdgmMRZ_sounding_def.
dg['numSoundingsMaxMain'] = fields[1]
# Number of main soundings of valid quality. Extra detections not included.
dg['numSoundingsValidMain'] = fields[2]
# Bytes per loop of sounding (per depth point), i.e. bytes per loops of the struct EMdgmMRZ_sounding_def.
dg['numBytesPerSounding'] = fields[3]
# Sample frequency divided by water column decimation factor. Unit hertz.
dg['WCSampleRate'] = fields[4]
# Sample frequency divided by seabed image decimation factor. Unit hertz.
dg['seabedImageSampleRate'] = fields[5]
# Backscatter level, normal incidence. Unit dB.
dg['BSnormal_dB'] = fields[6]
# Backscatter level, oblique incidence. Unit dB.
dg['BSoblique_dB'] = fields[7]
# extraDetectionAlarmFlag = sum of alarm flags. Range 0-10.
dg['extraDetectionAlarmFlag'] = fields[8]
# Sum of extradetection from all classes. Also refered to as Nd.
dg['numExtraDetections'] = fields[9]
# Range 0-10.
dg['numExtraDetectionClasses'] = fields[10]
# Number of bytes in the struct EMdgmMRZ_extraDetClassInfo_def.
dg['numBytesPerClass'] = fields[11]
# Skip unknown fields.
self.FID.seek(dg['numBytesRxInfo'] - struct.Struct(format_to_unpack).size, 1)
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmMRZ_extraDetClassInfo(self):
"""
Read #MRZ - extra detection class information. To be entered in loop numExtraDetectionClasses times.
:return: A dictionary containing EMdgmMRZ_extra DetClassInfo ('extraDetClassInfo').
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# TODO: Need to test with file containing extra detections.
dg = {}
format_to_unpack = "1H1b1B"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Number of extra detection in this class.
dg['numExtraDetInClass'] = fields[0]
# Byte alignment.
dg['padding'] = fields[1]
# 0 = no alarm; 1 = alarm.
dg['alarmFlag'] = fields[2]
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmMRZ_sounding(self):
"""
Read #MRZ - data for each sounding, e.g. XYZ, reflectivity, two way travel time etc. Also contains
information necessary to read seabed image following this datablock (number of samples in SI etc.).
To be entered in loop (numSoundingsMaxMain + numExtraDetections) times.
:return: A dictionary containing EMdgmMRZ_sounding ('sounding').
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD tested.
dg = {}
format_to_unpack = "1H8B1H6f2H18f4H"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Sounding index. Cross reference for seabed image.
# Valid range: 0 to (numSoundingsMaxMain+numExtraDetections)-1, i.e. 0 - (Nrx+Nd)-1.
dg['soundingIndex'] = fields[0]
# Transmitting sector number. Valid range: 0-(Ntx-1), where Ntx is numTxSectors.
dg['txSectorNumb'] = fields[1]
# # # # # D E T E C T I O N I N F O # # # # #
# Bottom detection type. Normal bottom detection, extra detection, or rejected.
# 0 = normal detection; 1 = extra detection; 2 = rejected detection
# In case 2, the estimated range has been used to fill in amplitude samples in the seabed image datagram.
dg['detectionType'] = fields[2]
# Method for determining bottom detection, e.g. amplitude or phase.
# 0 = no valid detection; 1 = amplitude detection; 2 = phase detection; 3-15 for future use.
dg['detectionMethod'] = fields[3]
# For Kongsberg use.
dg['rejectionInfo1'] = fields[4]
# For Kongsberg use.
dg['rejectionInfo2'] = fields[5]
# For Kongsberg use.
dg['postProcessingInfo'] = fields[6]
# Only used by extra detections. Detection class based on detected range.
# Detection class 1 to 7 corresponds to value 0 to 6. If the value is between 100 and 106,
# the class is disabled by the operator. If the value is 107, the detections are outside the treshhold limits.
dg['detectionClass'] = fields[7]
# Detection confidence level.
dg['detectionConfidenceLevel'] = fields[8]
# Byte alignment.
dg['padding'] = fields[9]
# Unit %. rangeFactor = 100 if main detection.
dg['rangeFactor'] = fields[10]
# Estimated standard deviation as % of the detected depth. Quality Factor (QF) is
# calculated from IFREMER Quality Factor (IFQ): QF=Est(dz)/z=100*10^-IQF
dg['qualityFactor'] = fields[11]
# Vertical uncertainty, based on quality factor (QF, qualityFactor).
dg['detectionUncertaintyVer_m'] = fields[12]
# Horizontal uncertainty, based on quality factor (QF, qualityFactor).
dg['detectionUncertaintyHor_m'] = fields[13]
# Detection window length. Unit second. Sample data range used in final detection.
dg['detectionWindowLength_sec'] = fields[14]
# Measured echo length. Unit second.
dg['echoLength_sec'] = fields[15]
# # # # # W A T E R C O L U M N P A R A M E T E R S # # # # #
# Water column beam number. Info for plotting soundings together with water column data.
dg['WCBeamNumb'] = fields[16]
# Water column range. Range of bottom detection, in samples.
dg['WCrange_samples'] = fields[17]
# Water column nominal beam angle across. Re vertical.
dg['WCNomBeamAngleAcross_deg'] = fields[18]
# # # # # REFLECTIVITY DATA (BACKSCATTER (BS) DATA) # # # # #
# Mean absorption coefficient, alfa. Used for TVG calculations. Value as used. Unit dB/km.
dg['meanAbsCoeff_dbPerkm'] = fields[19]
# Beam intensity, using the traditional KM special TVG.
dg['reflectivity1_dB'] = fields[20]
# Beam intensity (BS), using TVG = X log(R) + 2 alpha R. X (operator selected) is common to all beams in
# datagram. Alpha (variabel meanAbsCoeff_dBPerkm) is given for each beam (current struct).
# BS = EL - SL - M + TVG + BScorr, where EL= detected echo level (not recorded in datagram),
# and the rest of the parameters are found below.
dg['reflectivity2_dB'] = fields[21]
# Receiver sensitivity (M), in dB, compensated for RX beampattern
# at actual transmit frequency at current vessel attitude.
dg['receiverSensitivityApplied_dB'] = fields[22]
# Source level (SL) applied (dB): SL = SLnom + SLcorr, where SLnom = Nominal maximum SL,
# recorded per TX sector (variable txNominalSourceLevel_dB in struct EMdgmMRZ_txSectorInfo_def) and
# SLcorr = SL correction relative to nominal TX power based on measured high voltage power level and
# any use of digital power control. SL is corrected for TX beampattern along and across at actual transmit
# frequency at current vessel attitude.
dg['sourceLevelApplied_dB'] = fields[23]
# Backscatter (BScorr) calibration offset applied (default = 0 dB).
dg['BScalibration_dB'] = fields[24]
# Time Varying Gain (TVG) used when correcting reflectivity.
dg['TVG_dB'] = fields[25]
# # # # # R A N G E A N D A N G L E D A T A # # # # #
# Angle relative to the RX transducer array, except for ME70,
# where the angles are relative to the horizontal plane.
dg['beamAngleReRx_deg'] = fields[26]
# Applied beam pointing angle correction.
dg['beamAngleCorrection_deg'] = fields[27]
# Two way travel time (also called range). Unit second.
dg['twoWayTravelTime_sec'] = fields[28]
# Applied two way travel time correction. Unit second.
dg['twoWayTravelTimeCorrection_sec'] = fields[29]
# # # # # G E O R E F E R E N C E D D E P T H P O I N T S # # # # #
# Distance from vessel reference point at time of first tx pulse in ping, to depth point.
# Measured in the surface coordinate system (SCS), see Coordinate systems for definition. Unit decimal degrees.
dg['deltaLatitude_deg'] = fields[30]
# Distance from vessel reference point at time of first tx pulse in ping, to depth point.
# Measured in the surface coordinate system (SCS), see Coordinate systems for definition. Unit decimal degrees.
dg['deltaLongitude_deg'] = fields[31]
# Vertical distance z. Distance from vessel reference point at time of first tx pulse in ping, to depth point.
# Measured in the surface coordinate system (SCS), see Coordinate systems for definition.
dg['z_reRefPoint_m'] = fields[32]
# Horizontal distance y. Distance from vessel reference point at time of first tx pulse in ping, to depth point.
# Measured in the surface coordinate system (SCS), see Coordinate systems for definition.
dg['y_reRefPoint_m'] = fields[33]
# Horizontal distance x. Distance from vessel reference point at time of first tx pulse in ping, to depth point.
# Measured in the surface coordinate system (SCS), see Coordinate systems for definition.
dg['x_reRefPoint_m'] = fields[34]
# Beam incidence angle adjustment (IBA) unit degree.
dg['beamIncAngleAdj_deg'] = fields[35]
# For future use.
dg['realTimeCleanInfo'] = fields[36]
# # # # # S E A B E D I M A G E # # # # #
# Seabed image start range, in sample number from transducer. Valid only for the current beam.
dg['SIstartRange_samples'] = fields[37]
# Seabed image. Number of the centre seabed image sample for the current beam.
dg['SIcentreSample'] = fields[38]
# Seabed image. Number of range samples from the current beam, used to form the seabed image.
dg['SInumSamples'] = fields[39]
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmMRZ(self):
"""
A method to read a full #MRZ datagram.
Kongsberg documentation: "The datagram also contains seabed image data. Depths points (x,y,z) are calculated
in meters, georeferred to the position of the vessel reference point at the time of the first transmitted pulse
of the ping. The depth point coordinates x and y are in the surface coordinate system (SCS), and are also given
as delta latitude and delta longitude, referred to origo of the VCS/SCS, at the time of the midpoint of the
first transmitted pulse of the ping (equals time used in the datagram header timestamp). See Coordinate systems
for introduction to spatial reference points and coordinate systems. Reference points are also described in
Reference points and offsets."
:return: A dictionary including full MRZ datagram information including EMdgmHeader ('header'), EMdgmMpartition
('Mpart'), EMdgmbody ('Mbody'), EMdgmMRZ_pingInfo ('pingInfo'), EMdgmMRZ_txSectorInfo ('txSectorInfo'),
EMdgmMRZ_rxInfo ('rxinfo'), EMdgmMRZ_sounding ('soundings'), and ('SIsample_desidB').
"""
# LMD tested.
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['partition'] = self.read_EMdgmMpartition()
dg['cmnPart'] = self.read_EMdgmMbody()
dg['pingInfo'] = self.read_EMdgmMRZ_pingInfo()
# Read TX sector info for each sector
txSectorInfo = []
for sector in range(dg['pingInfo']['numTxSectors']):
txSectorInfo.append(self.read_EMdgmMRZ_txSectorInfo())
dg['txSectorInfo'] = self.listofdicts2dictoflists(txSectorInfo)
# Read reInfo
dg['rxInfo'] = self.read_EMdgmMRZ_rxInfo()
# Read extra detect metadata if they exist.
extraDetClassInfo = []
for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):
extraDetClassInfo.append(self.read_EMdgmMRZ_extraDetClassInfo())
dg['extraDetClassInfo'] = self.listofdicts2dictoflists(extraDetClassInfo)
# Read the sounding data.
soundings = []
Nseabedimage_samples = 0
for record in range(dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain']):
soundings.append(self.read_EMdgmMRZ_sounding())
Nseabedimage_samples += soundings[record]['SInumSamples']
dg['sounding'] = self.listofdicts2dictoflists(soundings)
# Read the seabed imagery.
# Seabed image sample amplitude, in 0.1 dB. Actual number of seabed image samples (SIsample_desidB) to be found
# by summing parameter SInumSamples in struct EMdgmMRZ_sounding_def for all beams. Seabed image data are raw
# beam sample data taken from the RX beams. The data samples are selected based on the bottom detection ranges.
# First sample for each beam is the one with the lowest range. The centre sample from each beam is geo
# referenced (x, y, z data from the detections). The BS corrections applied at the centre sample are the same
# as used for reflectivity2_dB (struct EMdgmMRZ_sounding_def).
format_to_unpack = str(Nseabedimage_samples) + "h"
dg['SIsample_desidB'] = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmMWCtxInfo(self):
"""
Read #MWC - data block 1: transmit sectors, general info for all sectors.
:return: A dictionary containing EMdgmMWCtxInfo.
"""
# LMD added, tested.
dg = {}
format_to_unpack = "3H1h1f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Number of bytes in current struct.
dg['numBytesTxInfo'] = fields[0]
# Number of transmitting sectors (Ntx). Denotes the number of times
# the struct EMdgmMWCtxSectorData is repeated in the datagram.
dg['numTxSectors'] = fields[1]
# Number of bytes in EMdgmMWCtxSectorData.
dg['numBytesPerTxSector'] = fields[2]
# Byte alignment.
dg['padding'] = fields[3]
# Heave at vessel reference point, at time of ping, i.e. at midpoint of first tx pulse in rxfan.
dg['heave_m'] = fields[4]
# Skip unknown fields.
self.FID.seek(dg['numBytesTxInfo'] - struct.Struct(format_to_unpack).size, 1)
'''
if self.verbose > 2:
self.print_datagram(dg)
'''
return dg
def read_EMdgmMWCtxSectorData(self):
"""
Read #MWC - data block 1: transmit sector data, loop for all i = numTxSectors.
:return: A dictionary containing EMdgmMWCtxSectorData
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD added, tested.
dg = {}
format_to_unpack = "3f1H1h"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Along ship steering angle of the TX beam (main lobe of transmitted pulse), angle referred to transducer face.
# Angle as used by beamformer (includes stabilisation). Unit degree.
dg['tiltAngleReTx_deg'] = fields[0]
# Centre frequency of current sector. Unit hertz.
dg['centreFreq_Hz'] = fields[1]
# Corrected for frequency, sound velocity and tilt angle. Unit degree.
dg['txBeamWidthAlong_deg'] = fields[2]
# Transmitting sector number.
dg['txSectorNum'] = fields[3]
# Byte alignment.
dg['padding'] = fields[4]
'''
if self.verbose > 2:
self.print_datagram(dg)
'''
return dg
def read_EMdgmMWCrxInfo(self):
"""
Read #MWC - data block 2: receiver, general info.
:return: A dictionary containing EMdgmMWCrxInfo.
"""
# LMD added, tested.
dg = {}
format_to_unpack = "2H3B1b2f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Number of bytes in current struct.
dg['numBytesRxInfo'] = fields[0]
# Number of beams in this datagram (Nrx).
dg['numBeams'] = fields[1]
# Bytes in EMdgmMWCrxBeamData struct, excluding sample amplitudes (which have varying lengths).
dg['numBytesPerBeamEntry'] = fields[2]
# 0 = off; 1 = low resolution; 2 = high resolution.
dg['phaseFlag'] = fields[3]
# Time Varying Gain function applied (X). X log R + 2 Alpha R + OFS + C, where X and C is documented
# in #MWC datagram. OFS is gain offset to compensate for TX source level, receiver sensitivity etc.
dg['TVGfunctionApplied'] = fields[4]
# Time Varying Gain offset used (OFS), unit dB. X log R + 2 Alpha R + OFS + C, where X and C is documented
# in #MWC datagram. OFS is gain offset to compensate for TX source level, receiver sensitivity etc.
dg['TVGoffset_dB'] = fields[5]
# The sample rate is normally decimated to be approximately the same as the bandwidth of the transmitted pulse.
# Unit hertz.
dg['sampleFreq_Hz'] = fields[6]
# Sound speed at transducer, unit m/s.
dg['soundVelocity_mPerSec'] = fields[7]
# Skip unknown fields.
self.FID.seek(dg['numBytesRxInfo'] - struct.Struct(format_to_unpack).size, 1)
'''
if self.verbose > 2:
self.print_datagram(dg)
'''
return dg
def read_EMdgmMWCrxBeamData(self):
"""
Read #MWC - data block 2: receiver, specific info for each beam.
:return: A dictionary containing EMdgmMWCrxBeamData.
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD added, partially tested.
# TODO: Test with water column data, phaseFlag = 1 and phaseFlag = 2 to ensure this continues to function properly.
dg = {}
format_to_unpack = "1f4H"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['beamPointAngReVertical_deg'] = fields[0]
dg['startRangeSampleNum'] = fields[1]
# Two way range in samples. Approximation to calculated distance from tx to bottom detection
# [meters] = soundVelocity_mPerSec * detectedRangeInSamples / (sampleFreq_Hz * 2).
# The detected range is set to zero when the beam has no bottom detection.
dg['detectedRangeInSamples'] = fields[2]
dg['beamTxSectorNum'] = fields[3]
# Number of sample data for current beam. Also denoted Ns.
dg['numSampleData'] = fields[4]
# Pointer to start of array with Water Column data. Length of array = numSampleData.
# Sample amplitudes in 0.5 dB resolution. Size of array is numSampleData * int8_t.
# Amplitude array is followed by phase information if phaseFlag >0.
# Use (numSampleData * int8_t) to jump to next beam, or to start of phase info for this beam, if phase flag > 0.
format_to_unpack = str(dg['numSampleData']) + "b"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['sampleAmplitude05dB_p'] = fields
'''
if self.verbose > 2:
self.print_datagram(dg)
'''
return dg
def read_EMdgmMWCrxBeamPhase1(self, numSampleData):
"""
Read #MWC - Beam sample phase info, specific for each beam and water column sample.
numBeams * numSampleData = (Nrx * Ns) entries. Only added to datagram if phaseFlag = 1.
Total size of phase block is numSampleData * int8_t.
:return: A dictionary containing EMdgmCrxBeamPhase1.
"""
# LMD added, untested.
# TODO: Test with water column data, phaseFlag = 1 to complete/test this function.
# print("WARNING: You are using an incomplete, untested function: read_EMdgmMWCrxBeamPhase1.")
dg = {}
format_to_unpack = str(numSampleData) + "b"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Rx beam phase in 180/128 degree resolution.
dg['rxBeamPhase'] = fields
return dg
def read_EMdgmMWCrxBeamPhase2(self, numSampleData):
"""
Read #MWC - Beam sample phase info, specific for each beam and water column sample.
numBeams * numSampleData = (Nrx * Ns) entries. Only added to datagram if phaseFlag = 2.
Total size of phase block is numSampleData * int16_t.
:return: A dictionary containing EMdgmCrxBeamPhase2.
"""
# LMD added, untested.
# TODO: Test with water column data, phaseFlag = 2 to complete/test this function.
# print("WARNING: You are using an incomplete, untested function: read_EMdgmMWCrxBeamPhase2.")
dg = {}
format_to_unpack = str(numSampleData) + "h"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Rx beam phase in 0.01 degree resolution.
dg['rxBeamPhase'] = fields
return dg
def read_EMdgmMWC(self):
"""
Read #MWC - Multibeam Water Column Datagram. Entire datagram containing several sub structs.
:return: A dictionary containing EMdgmMWC.
"""
# LMD added, partially tested.
# NOTE: Tested with phaseFlag = 0.
# TODO: Test with water column data, phaseFlag = 1 and phaseFlag = 2 to fully complete/test this function.
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['partition'] = self.read_EMdgmMpartition()
dg['cmnPart'] = self.read_EMdgmMbody()
dg['txInfo'] = self.read_EMdgmMWCtxInfo()
# Read TX sector info for each sector
txSectorData = []
for sector in range(dg['txInfo']['numTxSectors']):
txSectorData.append(self.read_EMdgmMWCtxSectorData())
dg['sectorData'] = self.listofdicts2dictoflists(txSectorData)
dg['rxInfo'] = self.read_EMdgmMWCrxInfo()
# Pointer to beam related information. Struct defines information about data for a beam. Beam information is
# followed by sample amplitudes in 0.5 dB resolution . Amplitude array is followed by phase information if
# phaseFlag >0. These data defined by struct EMdgmMWCrxBeamPhase1_def (int8_t) or struct
# EMdgmMWCrxBeamPhase2_def (int16_t) if indicated in the field phaseFlag in struct EMdgmMWCrxInfo_def.
# Length of data block for each beam depends on the operators choice of phase information (see table):
'''
phaseFlag: Beam Block Size:
0 numBytesPerBeamEntry + numSampleData * size(sampleAmplitude05dB_p)
1 numBytesPerBeamEntry + numSampleData * size(sampleAmplitude05dB_p)
+ numSampleData * size(EMdgmMWCrxBeamPhase1_def)
2 numBytesPerBeamEntry + numSampleData * size(sampleAmplitude05dB_p)
+ numSampleData * size(EMdgmMWCrxBeamPhase2_def)
'''
rxBeamData = []
rxPhaseInfo = []
for idx in range(dg['rxInfo']['numBeams']):
rxBeamData.append(self.read_EMdgmMWCrxBeamData())
if dg['rxInfo']['phaseFlag'] == 0:
pass
elif dg['rxInfo']['phaseFlag'] == 1:
# TODO: Test with water column data, phaseFlag = 1 to complete/test this function.
rxPhaseInfo.append(self.read_EMdgmMWCrxBeamPhase1(rxBeamData[idx]['numSampleData']))
elif dg['rxInfo']['phaseFlag'] == 2:
# TODO: Test with water column data, phaseFlag = 2 to complete/test this function.
rxPhaseInfo.append(self.read_EMdgmMWCrxBeamPhase2(rxBeamData[idx]['numSampleData']))
else:
print("ERROR: phaseFlag error in read_EMdgmMWC function.")
dg['beamData'] = self.listofdicts2dictoflists(rxBeamData)
# TODO: Should this be handled in a different way? By this method, number of fields in dg is variable.
if dg['rxInfo']['phaseFlag'] == 1 or dg['rxInfo']['phaseFlag'] == 2:
dg['phaseInfo'] = self.listofdicts2dictoflists(rxPhaseInfo)
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmScommon(self):
"""
Read sensor (S) output datagram - common part for all external sensors.
:return: A dictionary containing EMdgmScommon ('cmnPart').
"""
# LMD added, tested.
dg = {}
format_to_unpack = "4H"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Size in bytes of current struct. Used for denoting size of rest of
# datagram in cases where only one datablock is attached.
dg['numBytesCmnPart'] = fields[0]
# Sensor system number, as indicated when setting up the system in K-Controller installation menu. E.g.
# position system 0 refers to system POSI_1 in installation datagram #IIP. Check if this sensor system is
# active by using #IIP datagram. #SCL - clock datagram:
'''
Bit: Sensor system:
0 Time syncronisation from clock data
1 Time syncronisation from active position data
2 1 PPS is used
'''
dg['sensorSystem'] = fields[1]
# Sensor status. To indicate quality of sensor data is valid or invalid. Quality may be invalid even if sensor
# is active and the PU receives data. Bit code vary according to type of sensor.
# Bits 0 -7 common to all sensors and #MRZ sensor status:
'''
Bit: Sensor data:
0 0 = Data OK; 1 = Data OK and sensor is chosen as active;
#SCL only: 1 = Valid data and 1PPS OK
1 0
2 0 = Data OK; 1 = Reduced performance;
#SCL only: 1 = Reduced performance, no time synchronisation of PU
3 0
4 0 = Data OK; 1 = Invalid data
5 0
6 0 = Velocity from sensor; 1 = Velocity calculated by PU
7 0
'''
# For #SPO (position) and CPO (position compatibility) datagrams, bit 8 - 15:
'''
Bit: Sensor data:
8 0
9 0 = Time from PU used (system); 1 = Time from datagram used (e.g. from GGA telegram)
10 0 = No motion correction; 1 = With motion correction
11 0 = Normal quality check; 1 = Operator quality check. Data always valid.
12 0
13 0
14 0
15 0
'''
dg['sensorStatus'] = fields[2]
dg['padding'] = fields[3]
# Skip unknown fields.
#self.FID.seek(dg['numBytesCmnPart'] - struct.Struct(format_to_unpack).size, 1)
return dg
def read_EMdgmSPOdataBlock(self, length):
"""
Read #SPO - Sensor position data block. Data from active sensor is corrected data for position system
installation parameters. Data is also corrected for motion (roll and pitch only) if enabled by K-Controller
operator. Data given both decoded and corrected (active sensors), and raw as received from sensor in text
string.
:return: A dictionary containing EMdgmSPOdataBlock ('sensorData').
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD added, tested.
dg = {}
format_to_unpack = "2I1f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# UTC time from position sensor. Unit seconds. Epoch 1970-01-01. Nanosec part to be added for more exact time.
dg['timeFromSensor_sec'] = fields[0]
# UTC time from position sensor. Unit nano seconds remainder.
dg['timeFromSensor_nanosec'] = fields[1]
dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['timeFromSensor_sec']
+ dg['timeFromSensor_nanosec'] / 1.0E9)
# Only if available as input from sensor. Calculation according to format.
dg['posFixQuality_m'] = fields[2]
# For some reason, it doesn't work to do this all in one step, but it works broken up into two steps. *shrug*
format_to_unpack = "2d3f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Motion corrected (if enabled in K-Controller) data as used in depth calculations. Referred to vessel
# reference point. Unit decimal degree. Parameter is set to define UNAVAILABLE_LATITUDE if sensor inactive.
dg['correctedLat_deg'] = fields[0]
# Motion corrected (if enabled in K-Controller) data as used in depth calculations. Referred to vessel
# reference point. Unit decimal degree. Parameter is set to define UNAVAILABLE_LONGITUDE if sensor inactive.
dg['correctedLong_deg'] = fields[1]
# Speed over ground. Unit m/s. Motion corrected (if enabled in K-Controller) data as used in depth calculations.
# If unavailable or from inactive sensor, value set to define UNAVAILABLE_SPEED.
dg['speedOverGround_mPerSec'] = fields[2]
# Course over ground. Unit degree. Motion corrected (if enabled in K-Controller) data as used in depth
# calculations. If unavailable or from inactive sensor, value set to define UNAVAILABLE_COURSE.
dg['courseOverGround_deg'] = fields[3]
# Height of vessel reference point above the ellipsoid. Unit meter.
# Motion corrected (if enabled in K-Controller) data as used in depth calculations.
# If unavailable or from inactive sensor, value set to define UNAVAILABLE_ELLIPSOIDHEIGHT.
dg['ellipsoidHeightReRefPoint_m'] = fields[4]
# TODO: This is an array of (max?) length MAX_SPO_DATALENGTH; do something else here?
# TODO: Get MAX_SPO_DATALENGTH from datagram instead of hard-coding in format_to_unpack.
# TODO: This works for now, but maybe there is a smarter way?
# MB changed to calculate remaining bytes from whats been read
# using MAX_SPO_DATALENGTH was over reading if SPO packets close to the
# end of the file. Calcing remainder works and reads full raw string
# Position data as received from sensor, i.e. uncorrected for motion etc.
pos_data_len = length - struct.Struct("2I1f2d3f").size
format_to_unpack = "%ds" % pos_data_len
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
#tmp = fields[5]
#dg['posDataFromSensor'] = tmp[0:tmp.find(b'\r\n')]
dg['posDataFromSensor'] = fields[0]
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmSPO(self):
"""
Read #SPO - Struct of position sensor datagram. From Data from active sensor will be motion corrected if
indicated by operator. Motion correction is applied to latitude, longitude, speed, course and ellipsoidal
height. If the sensor is inactive, the fields will be marked as unavailable, defined by the parameters define
UNAVAILABLE_LATITUDE etc.
:return: A dictionary of dictionaries, including EMdgmHeader ('header'), EMdgmScommon ('cmnPart'), and
EMdgmSPOdataBlock ('sensorData').
"""
# LMD added, tested.
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['cmnPart'] = self.read_EMdgmScommon()
## Data block length is balance of datagram
data_block_len = dg['header']['numBytesDgm'] - (self.FID.tell()-start)
dg['sensorData'] = self.read_EMdgmSPOdataBlock(data_block_len)
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmSKMinfo(self):
"""
Read sensor (S) output datagram - info of KMB datagrams.
:return: A dictionary containing EMdgmSKMinfo ('infoPart').
"""
# LMD tested.
dg = {}
format_to_unpack = "1H2B4H"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Size in bytes of current struct. Used for denoting size of rest of datagram
# in cases where only one datablock is attached.
dg['numBytesInfoPart'] = fields[0]
# Attitude system number, as numbered in installation parameters.
# E.g. system 0 referes to system ATTI_1 in installation datagram #IIP.
dg['sensorSystem'] = fields[1]
# Sensor status. Summarise the status fields of all KM binary samples added in this datagram (status in struct
# KMbinary_def). Only available data from input sensor format is summarised. Available data found in
# sensorDataContents. Bits 0 -7 common to all sensors and #MRZ sensor status:
'''
Sensor Status:
Bit: 0 0 Data OK, 1 Data OK and Sensor is active
Bit: 1 0
Bit: 2 0 Data OK, 1 Data Reduced Performance
Bit: 3 0
Bit: 4 0 Data OK, 1 Invalid Data
Bit: 5 0
Bit: 6 0 Velocity from Sensor, 1 Velocity from PU
'''
dg['sensorStatus'] = fields[2]
# Format of raw data from input sensor, given in numerical code according to table below.
'''
Code: Sensor Format:
1: KM Binary Sensor Format
2: EM 3000 data
3: Sagem
4: Seapath binary 11
5: Seapath binary 23
6: Seapath binary 26
7: POS/MV Group 102/103
8: Coda Octopus MCOM
'''
dg['sensorInputFormat'] = fields[3]
# Number of KM binary sensor samples added in this datagram.
dg['numSamplesArray'] = fields[4]
# Length in bytes of one whole KM binary sensor sample.
dg['numBytesPerSample'] = fields[5]
# Field to indicate which information is available from the input sensor, at the given sensor format.
# 0 = not available; 1 = data is available
# The bit pattern is used to determine sensorStatus from status field in #KMB samples. Only data available from
# sensor is check up against invalid/reduced performance in status, and summaries in sensorStatus.
# E.g. the binary 23 format does not contain delayed heave. This is indicated by setting bit 6 in
# sensorDataContents to 0. In each sample in #KMB output from PU, the status field (struct KMbinary_def) for
# INVALID delayed heave (bit 6) is set to 1. The summaries sensorStatus in struct EMdgmSKMinfo_def will then
# be sets to 0 if all available data is ok. Expected data field in sensor input:
'''
Indicates what data is available in the given sensor format
Bit: Sensor Data:
0 Horizontal posistion and velocity
1 Roll and pitch
2 Heading
3 Heave and vertical velocity
4 Acceleration
5 Error fields
6 Delayed Heave
'''
dg['sensorDataContents'] = fields[6]
# Skip unknown fields.
self.FID.seek(dg['numBytesInfoPart'] - struct.Struct(format_to_unpack).size, 1)
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_KMdelayedHeave(self):
"""
Read #SKM - delayed heave. Included if available from sensor.
:return: A dictionary containing KMdelayedHeave.
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD tested with 'empty' delayed heave fields.
# TODO: Test with data containing delayed heave.
dg = {}
format_to_unpack = "2I1f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['time_sec'] = fields[0]
dg['time_nanosec'] = fields[1]
dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['time_sec'] + dg['time_nanosec'] / 1.0E9)
# Delayed heave. Unit meter.
dg['delayedHeave_m'] = fields[2]
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_KMbinary(self):
"""
Read #SKM - sensor attitude data block. Data given timestamped, not corrected.
See Coordinate Systems for definition of positive angles and axis.
:return: A dictionary containing KMbinary.
"""
# LMD tested.
dg = {}
format_to_unpack = "4B"
fields = self.FID.read(struct.Struct(format_to_unpack).size)
# KMB
dg['dgmType'] = fields.decode('utf-8')
format_to_unpack = "2H3I"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Datagram length in bytes. The length field at the start (4 bytes)
# and end of the datagram (4 bytes) are included in the length count.
dg['numBytesDgm'] = fields[0]
# Datagram version.
dg['dgmVersion'] = fields[1]
# UTC time from inside KM sensor data. Unit second. Epoch 1970-01-01 time.
# Nanosec part to be added for more exact time.
dg['time_sec'] = fields[2]
# Nano seconds remainder. Nanosec part to be added to time_sec for more exact time.
# If time is unavailable from attitude sensor input, time of reception on serial port is added to this field.
dg['time_nanosec'] = fields[3]
dg['dgtime'] = dg['time_sec'] + dg['time_nanosec'] / 1.0E9
dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['dgtime'])
# Bit pattern for indicating validity of sensor data, and reduced performance.
# The status word consists of 32 single bit flags numbered from 0 to 31, where 0 is the least significant bit.
# Bit number 0-7 indicate if from a sensor data is invalid: 0 = valid data, 1 = invalid data.
# Bit number 16-> indicate if data from sensor has reduced performance: 0 = valid data, 1 = reduced performance.
'''
Invalid data: | Reduced performance:
Bit: Sensor data: | Bit: Sensor data:
0 Horizontal position and velocity | 16 Horizontal position and velocity
1 Roll and pitch | 17 Roll and pitch
2 Heading | 18 Heading
3 Heave and vertical velocity | 19 Heave and vertical velocity
4 Acceleration | 20 Acceleration
5 Error fields | 21 Error fields
6 Delayed heave | 22 Delayed heave
'''
dg['status'] = fields[4]
format_to_unpack = "2d"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# # # # # P O S I T I O N # # # # #
# Position in decimal degrees.
dg['latitude_deg'] = fields[0]
# Position in decimal degrees.
dg['longitude_deg'] = fields[1]
format_to_unpack = "21f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['ellipsoidHeight_m'] = fields[0]
# # # # # A T T I T U D E # # # # #
dg['roll_deg'] = fields[1]
dg['pitch_deg'] = fields[2]
dg['heading_deg'] = fields[3]
dg['heave_m'] = fields[4]
# # # # # R A T E S # # # # #
dg['rollRate'] = fields[5]
dg['pitchRate'] = fields[6]
dg['yawRate'] = fields[7]
# # # # # V E L O C I T I E S # # # # #
dg['velNorth'] = fields[8]
dg['velEast'] = fields[9]
dg['velDown'] = fields[10]
# # # # # ERRORS IN DATA. SENSOR DATA QUALITY, AS STANDARD DEVIATIONS # # # # #
dg['latitudeError_m'] = fields[11]
dg['longitudeError_m'] = fields[12]
dg['ellipsoidalHeightError_m'] = fields[13]
dg['rollError_deg'] = fields[14]
dg['pitchError_deg'] = fields[15]
dg['headingError_deg'] = fields[16]
dg['heaveError_m'] = fields[17]
# # # # # A C C E L E R A T I O N # # # # #
dg['northAcceleration'] = fields[18]
dg['eastAcceleration'] = fields[19]
dg['downAcceleration'] = fields[20]
# In testing, it appears 'numBytesDgm' = KMbinary + KMdelayedHeave.
# We will run into errors here if we use this method to skip unknown fields.
# Skip unknown fields
# self.FID.seek(dg['numBytesDgm'] - struct.Struct(format_to_unpack).size, 1)
if self.verbose > 2:
self.print_datagram(dg)
return dg
def read_EMdgmSKMsample(self, dgInfo):
"""
Read #SKM - all available data. An implementation of the KM Binary sensor input format.
:param dgInfo: A dictionary containing EMdgmSKMinfo (output of function read_EMdgmSKMinfo).
:return: A dictionary of lists, containing EMdgmSKMsample ('sample').
This includes keys 'KMdefault' and 'delayedHeave'.
"""
# LMD tested.
# TODO: Can add code to omit delayed heave if it is not included.
dg = {}
km_binary_data = []
km_heave_data = []
for idx in range(dgInfo['numSamplesArray']):
km_binary_data.append(self.read_KMbinary())
km_heave_data.append(self.read_KMdelayedHeave())
# Convert list of dictionaries to dictionary of lists.
dg['KMdefault'] = self.listofdicts2dictoflists(km_binary_data)
dg['delayedHeave'] = self.listofdicts2dictoflists(km_heave_data)
return dg
def read_EMdgmSKM(self):
"""
Read #SKM - data from attitude and attitude velocity sensors. Datagram may contain several sensor measurements.
The number of samples in datagram is listed in numSamplesArray in the struct EMdgmSKMinfo_def. Time given in
datagram header, is time of arrival of data on serial line or on network. Time inside #KMB sample is time from
the sensors data. If input is other than KM binary sensor input format, the data are converted to the KM binary
format by the PU. All parameters are uncorrected. For processing of data, installation offsets, installation
angles and attitude values are needed to correct the data for motion.
:return: A dictionary containing EMdgmSKM.
"""
# LMD tested.
start = self.FID.tell()
# LMD implementation:
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['infoPart'] = self.read_EMdgmSKMinfo()
dg['sample'] = self.read_EMdgmSKMsample(dg['infoPart'])
# VES implementation:
'''
dgH = self.read_EMdgmHeader()
dgInfo = self.read_EMdgmSKMinfo()
dgSamples = self.read_EMdgmSKMsample(dgInfo)
dg = {**dgH, **dgInfo, **dgSamples}
'''
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmSVPpoint(self):
"""
Read #SVP - Sound Velocity Profile. Data from one depth point contains information specified in this struct.
:return: A dictionary containing EMdgmSVPpoint.
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD added, tested.
dg = {}
format_to_unpack = "2f1I2f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Depth at which measurement is taken. Unit m. Valid range from 0.00 m to 12000 m.
dg['depth_m'] = fields[0]
# Measured sound velocity from profile. Unit m/s. For a CTD profile, this will be the calculated sound velocity.
dg['soundVelocity_mPerSec'] = fields[1]
# Former absorption coefficient. Voided.
dg['padding'] = fields[2]
# Water temperature at given depth. Unit Celsius. For a Sound velocity profile (S00), this will be set to 0.00.
dg['temp_C'] = fields[3]
# Salinity of water at given depth. For a Sound velocity profile (S00), this will be set to 0.00.
dg['salinity'] = fields[4]
return dg
def read_EMdgmSVP(self):
"""
Read #SVP - Sound Velocity Profile. Data from sound velocity profile or from CTD profile.
Sound velocity is measured directly or estimated, respectively.
:return: A dictionary containing EMdgmSVP.
"""
# LMD added, tested.
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
format_to_unpack = "2H4s1I"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Size in bytes of body part struct. Used for denoting size of rest of datagram.
dg['numBytesCmnPart'] = fields[0]
# Number of sound velocity samples.
dg['numSamples'] = fields[1]
# Sound velocity profile format:
'''
'S00' = sound velocity profile
'S01' = CTD profile
'''
dg['sensorFormat'] = fields[2]
# Time extracted from the Sound Velocity Profile. Parameter is set to zero if not found.
dg['time_sec'] = fields[3]
dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['time_sec'])
format_to_unpack = "2d"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Latitude in degrees. Negative if southern hemisphere. Position extracted from the Sound Velocity Profile.
# Parameter is set to define UNAVAILABLE_LATITUDE if not available.
dg['latitude_deg'] = fields[0]
# Longitude in degrees. Negative if western hemisphere. Position extracted from the Sound Velocity Profile.
# Parameter is set to define UNAVAILABLE_LONGITUDE if not available.
dg['longitude_deg'] = fields[1]
# SVP point samples, repeated numSamples times.
sensorData = []
for record in range(dg['numSamples']):
sensorData.append(self.read_EMdgmSVPpoint())
dg['sensorData'] = self.listofdicts2dictoflists(sensorData)
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmSVTinfo(self):
"""
Read part of Sound Velocity at Transducer datagram.
:return: A dictionary containing EMdgmSVTinfo.
"""
# LMD added, tested.
dg = {}
format_to_unpack = "6H2f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Size in bytes of current struct. Used for denoting size of rest of datagram in cases where only one
# datablock is attached.
dg['numBytesInfoPart'] = fields[0]
# Sensor status. To indicate quality of sensor data is valid or invalid. Quality may be invalid even if sensor
# is active and the PU receives data. Bit code vary according to type of sensor.
# Bits 0-7 common to all sensors and #MRZ sensor status:
'''
Bit: Sensor data:
0 0 Data OK; 1 Data OK and sensor chosen is active
1 0
2 0 Data OK; 1 Reduced Performance
3 0
4 0 Data OK; 1 Invalid Data
5 0
6 0
'''
dg['sensorStatus'] = fields[1]
# Format of raw data from input sensor, given in numerical code according to table below.
'''
Code: Sensor format:
1 AML NMEA
2 AML SV
3 AML SVT
4 AML SVP
5 Micro SV
6 Micro SVT
7 Micro SVP
8 Valeport MiniSVS
9 KSSIS 80
10 KSSIS 43
'''
dg['sensorInputFormat'] = fields[2]
# Number of sensor samples added in this datagram.
dg['numSamplesArray'] = fields[3]
# Length in bytes of one whole SVT sensor sample.
dg['numBytesPerSample'] = fields[4]
# Field to indicate which information is available from the input sensor, at the given sensor format.
# 0 = not available; 1 = data is available
# Expected data field in sensor input:
'''
Bit: Sensor data:
0 Sound Velocity
1 Temperature
2 Pressure
3 Salinity
'''
dg['sensorDataContents'] = fields[5]
# Time parameter for moving median filter. Unit seconds.
dg['filterTime_sec'] = fields[6]
# Offset for measured sound velocity set in K-Controller. Unit m/s.
dg['soundVelocity_mPerSec_offset'] = fields[7]
# Skip unknown fields.
self.FID.seek(dg['numBytesInfoPart'] - struct.Struct(format_to_unpack).size, 1)
return dg
def read_EMdgmSVTsample(self):
"""
Read #SVT - Sound Velocity at Transducer. Data sample.
:return: A dictionary containing EMdgmSVTsample.
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD added, tested.
dg = {}
format_to_unpack = "2I4f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Time in second. Epoch 1970-01-01. time_nanosec part to be added for more exact time.
dg['time_sec'] = fields[0]
# Nano seconds remainder. time_nanosec part to be added to time_sec for more exact time.
dg['time_nanosec'] = fields[1]
dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['time_sec'] + dg['time_nanosec'] / 1.0E9)
# Measured sound velocity from sound velocity probe. Unit m/s.
dg['soundVelocity_mPerSec'] = fields[2]
# Water temperature from sound velocity probe. Unit Celsius.
dg['temp_C'] = fields[3]
# Pressure. Unit Pascal.
dg['pressure_Pa'] = fields[4]
# Salinity of water. Measured in g salt/kg sea water.
dg['salinity'] = fields[5]
return dg
def read_EMdgmSVT(self):
"""
Read #SVT - Sound Velocity at Transducer. Data for sound velocity and temperature are measured directly
on the sound velocity probe.
:return: A dictionary containing EMdgmSVT.
"""
# LMD added, tested.
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['infoPart'] = self.read_EMdgmSVTinfo()
sensorData = []
for record in range(dg['infoPart']['numSamplesArray']):
sensorData.append(self.read_EMdgmSVTsample())
dg['sensorData'] = self.listofdicts2dictoflists(sensorData)
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmSCLdataFromSensor(self):
"""
Read part of clock datagram giving offsets and the raw input in text format.
:return: A dictionary containing EMdgmSCLdataFromSensor.
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD tested.
dg = {}
format_to_unpack = "1f1i64s"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# Offset in seconds from K-Controller operator input.
dg['offset_sec'] = fields[0]
# Clock deviation from PU. Difference between time stamp at receive of sensor data and time in the clock
# source. Unit nanoseconds. Difference smaller than +/- 1 second if 1PPS is active and sync from ZDA.
dg['clockDevPU_nanosec'] = fields[1]
# TODO: This is an array of (max?) length MAX_SCL_DATALENGTH; do something else here?
# TODO: Get MAX_SCL_DATALENGTH from datagram instead of hard-coding in format_to_unpack.
# TODO: This works for now, but maybe there is a smarter way?
# Position data as received from sensor, i.e. uncorrected for motion etc.
tmp = fields[2]
dg['dataFromSensor'] = tmp[0:tmp.find(b'\x00\x00L')]
return dg
def read_EMdgmSCL(self):
"""
Read #SCL - Clock datagram.
:return: A dictionary containing EMdgmSCL.
"""
# LMD tested.
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['cmnPart'] = self.read_EMdgmScommon()
dg['sensData'] = self.read_EMdgmSCLdataFromSensor()
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmSDEdataFromSensor(self):
"""
# WARNING: INCOMPLETE
Read part of depth datagram giving depth as used, offsets,
scale factor and data as received from sensor (uncorrected).
:return: A dictionary containing EMdgmSDEdataFromSensor
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD added, untested.
# TODO: Test with depth data to complete this function!
print("WARNING: You are using an incomplete, untested function: read_EMdgmSDEdataFromSensor.")
dg = {}
format_to_unpack = "3f2d32s"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['depthUsed_m'] = fields[0]
dg['offset'] = fields[1]
dg['scale'] = fields[2]
dg['latitude_deg'] = fields[3]
dg['longitude_deg'] = fields[4]
# TODO: This is an array of (max?) length MAX_SDE_DATALENGTH; do something else here?
# TODO: Get MAX_SDE_DATALENGTH from datagram instead of hard-coding in format_to_unpack.
# TODO: Test with depth data to complete this function!
tmp = fields[5]
# dg['dataFromSensor'] = ...
return dg
def read_EMdgmSDE(self):
"""
Read #SDE - Depth datagram.
:return: A dictionary containing EMdgmSDE.
"""
# LMD added, untested.
# TODO: Test with depth data!
print("WARNING: You are using an incomplete, untested function: read_EMdgmSDE.")
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['cmnPart'] = self.read_EMdgmScommon()
dg['sensorData'] = self.read_EMdgmSDEdataFromSensor()
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmSHIdataFromSensor(self):
"""
# WARNING: INCOMPLETE
Read part of Height datagram, giving corrected and uncorrected data as received from sensor.
:return: A dictionary containing EMdgmSHIdataFromSensor.
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD added, untested.
# TODO: Test with height data to complete this function!
print("WARNING: You are using an incomplete, untested function: read_EMdgmSHIdataFromSensor.")
dg = {}
format_to_unpack = "1H1f32s"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['sensorType'] = fields[0]
dg['heightUsed_m'] = fields[1]
# TODO: This is an array of (max?) length MAX_SHI_DATALENGTH; do something else here?
# TODO: Get MAX_SHI_DATALENGTH from datagram instead of hard-coding in format_to_unpack.
# TODO: Test with height data to complete this function!
tmp = fields[2]
# dg['dataFromSensor'] = ...
print("DG: ", dg)
return dg
def read_EMdgmSHI(self):
"""
Read #SHI - Height datagram.
:return: A dictionary containing EMdgmSHI.
"""
# LMD added, untested.
# TODO: Test with height data!
print("WARNING: You are using an incomplete, untested function: read_EMdgmSHI.")
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['cmnPart'] = self.read_EMdgmScommon()
dg['sensData'] = self.read_EMdgmSHIdataFromSensor()
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmCPOdataBlock(self, length):
"""
Read #CPO - Compatibility sensor position compatibility data block. Data from active sensor is referenced to
position at antenna footprint at water level. Data is corrected for motion ( roll and pitch only) if enabled
by K-Controller operator. Data given both decoded and corrected (active sensors), and raw as received from
sensor in text string.
:return: A dictionary containing EMdgmCPOdataBlock.
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD tested.
dg = {}
format_to_unpack = "2I1f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['timeFromSensor_sec'] = fields[0]
dg['timeFromSensor_nanosec'] = fields[1]
dg['datetime'] = datetime.datetime.utcfromtimestamp(dg['timeFromSensor_sec']
+ dg['timeFromSensor_nanosec'] / 1.0E9)
dg['posFixQuality'] = fields[2]
# For some reason, it doesn't work to do this all in one step, but it works broken up into two steps. *shrug*
format_to_unpack = "2d3f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['correctedLat_deg'] = fields[0]
dg['correctedLong_deg'] = fields[1]
dg['speedOverGround_mPerSec'] = fields[2]
dg['courseOverGround_deg'] = fields[3]
dg['ellipsoidHeightReRefPoint_m'] = fields[4]
pos_data_len = length - struct.Struct("2I1f2d3f").size
format_to_unpack = "%ds" % pos_data_len
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
# TODO: This is an array of(max?) length MAX_CPO_DATALENGTH; do something else here?
# TODO: Get MAX_CPO_DATALENGTH from datagram instead of hard-coding in format_to_unpack.
# TODO: This works for now, but maybe there is a smarter way?
dg['posDataFromSensor'] = fields[0]
return dg
def read_EMdgmCPO(self):
"""
Read #CPO - Struct of compatibility position sensor datagram. Data from active sensor will be motion corrected
if indicated by operator. Motion correction is applied to latitude, longitude, speed, course and ellipsoidal
height. If the sensor is inactive, the fields will be marked as unavailable, defined by the parameters
define UNAVAILABLE_LATITUDE etc.
:return: A dictionary containing EMdgmCPO.
"""
# LMD tested.
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['cmnPart'] = self.read_EMdgmScommon()
## Data block length is balance of datagram
data_block_len = dg['header']['numBytesDgm'] - (self.FID.tell()-start)
dg['sensorData'] = self.read_EMdgmCPOdataBlock( data_block_len )
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmCHEdata(self):
"""
Read #CHE - Heave compatibility data part. Heave reference point is at transducer instead of at vessel
reference point.
:return: A dictionary containing EMdgmCHEdata.
"""
# NOTE: There's no fields for the number of bytes in this record. Odd.
# LMD added, tested.
dg = {}
format_to_unpack = "1f"
fields = struct.unpack(format_to_unpack, self.FID.read(struct.Struct(format_to_unpack).size))
dg['heave_m'] = fields[0]
return dg
def read_EMdgmCHE(self):
"""
Read #CHE - Struct of compatibility heave sensor datagram. Used for backward compatibility with .all datagram
format. Sent before #MWC (water column datagram) datagram if compatibility mode is enabled. The multibeam
datagram body is common with the #MWC datagram.
:return: A dictionary containing EMdgmCHE.
"""
# LMD added, tested.
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['cmnPart'] = self.read_EMdgmMbody()
dg['data'] = self.read_EMdgmCHEdata()
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
###########################################################
# Writing datagrams
###########################################################
def write_EMdgmMRZ(self, dg):
''' A method to write an MRZ datagram back to disk.'''
# Force the header type to be MRZ, just in case
# the datagram is converted from another type and
# the old type is still set.
dg['header']['dgmType'] = b'#MRZ'
self.write_EMdgmHeader(dg['header'])
self.write_EMdgmMpartition(dg['partition'])
self.write_EMdgmMbody(dg['cmnPart'])
self.write_EMdgmMRZ_pingInfo(dg['pingInfo'])
for sector in range(dg['pingInfo']['numTxSectors']):
self.write_EMdgmMRZ_txSectorInfo(dg['txSectorInfo'], sector)
self.write_EMdgmMRZ_rxInfo(dg['rxInfo'])
for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):
self.write_EMdgmMRZ_extraDetClassInfo(FID, dg['extraDetClassInfo'], detclass)
Nseabedimage_samples = 0
for record in range(dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain']):
self.write_EMdgmMRZ_sounding(dg['sounding'], record)
Nseabedimage_samples += dg['sounding']['SInumSamples'][record]
if Nseabedimage_samples > 0:
if 'SIsample_desidB' not in dg:
print(
"Warning, no Imagery data to write, although the field SInumSamples in the sounding datagram is non-zero.")
print("This will produce an unreadable file.")
# FIX: Should throw an error here.
else:
self.write_EMdgmMRZ_seabedImagery(dg, Nseabedimage_samples)
self.FID.write(struct.pack("I", dg['header']['numBytesDgm']))
def write_EMdgmMRZ_woImagery(self, dg):
''' A method to write an MRZ datagram back to disk, but omitting the imagery data.'''
# First we need to see how much space the imagery data will take.
Nseabedimage_samples = 0
for record in range(dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain']):
Nseabedimage_samples += dg['sounding']['SInumSamples'][record]
imageryBytes = Nseabedimage_samples * 2
# Now we need to reset the total packet size.
dg['header']['numBytesDgm'] -= imageryBytes
# Now write the packet, just leave out the imagery
# data and set Nsamples to 0.
self.write_EMdgmHeader(dg['header'])
self.write_EMdgmMpartition(dg['partition'])
self.write_EMdgmMbody(dg['cmnPart'])
self.write_EMdgmMRZ_pingInfo(dg['pingInfo'])
for sector in range(dg['pingInfo']['numTxSectors']):
self.write_EMdgmMRZ_txSectorInfo(dg['txSectorInfo'], sector)
write_EMdgmMRZ_rxInfo(dg['rxInfo'])
for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):
self.write_EMdgmMRZ_extraDetClassInfo(dg['extraDetClassInfo'], detclass)
Nseabedimage_samples = 0
for record in range(dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain']):
# Zero out the number of imagery samples for each sounding.
dg['sounding']['SInumSamples'][record] = 0
self.write_EMdgmMRZ_sounding(dg['sounding'], record)
Nseabedimage_samples += dg['sounding']['SInumSamples'][record]
# Don't write the imagery data.
# write_EMdgmMRZ_seabedImagery(FID, dg, Nseabedimage_samples)
self.FID.write(struct.pack("I", dg['header']['numBytesDgm']))
def write_EMdgmHeader(self, dg):
''' Method to write the datagram header.
write_EMdgmHeader(FID, dg['header'])
'''
format_to_pack = "<1I4s2B1H2I"
dg_seconds = int(dg['dgtime'])
dg_nanoseconds = int((dg['dgtime'] - dg_seconds) * 1e9)
self.FID.write(struct.pack(format_to_pack,
dg['numBytesDgm'],
dg['dgmType'],
dg['dgmVersion'],
dg['systemID'],
dg['echoSounderID'],
dg_seconds,
dg_nanoseconds))
def write_EMdgmMpartition(self, dg):
''' A method to write the Partition Information
write_EMdgmMpartition(FID, dg['partition'])
'''
format_to_pack = "<2H"
self.FID.write(struct.pack(format_to_pack,
dg['numOfDgms'],
dg['dgmNum']))
def write_EMdgmMbody(self, dg):
''' A method to write the datagram body information
write_EMdgmMbody(FID, dg['cmnPart'])
'''
format_to_pack = "<2H8B"
self.FID.write(struct.pack(format_to_pack,
dg['numBytesCmnPart'],
dg['pingCnt'],
dg['rxFansPerPing'],
dg['rxFanIndex'],
dg['swathsPerPing'],
dg['swathAlongPosition'],
dg['txTransducerInd'],
dg['rxTransducerInd'],
dg['numRxTransducers'],
dg['algorithmType']))
def write_EMdgmMRZ_pingInfo(self, dg):
'''A method to write MRZ ping info.
write_EMdgmMRZ_pingInfo(FID, dg['pinginfo'])
'''
format_to_pack_a = "<2H1f6B1H11f2h2B1H1I3f2H1f2H6f4B"
self.FID.write(struct.pack(format_to_pack_a,
dg['numBytesInfoData'],
dg['padding0'],
dg['pingRate_Hz'],
dg['beamSpacing'],
dg['depthMode'],
dg['subDepthMode'],
dg['distanceBtwSwath'],
dg['detectionMode'],
dg['pulseForm'],
dg['padding1'],
dg['frequencyMode_Hz'],
dg['freqRangeLowLim_Hz'],
dg['freqRangeHighLim_Hz'],
dg['maxTotalTxPulseLength_sec'],
dg['maxEffTxPulseLength_sec'],
dg['maxEffTxBandWidth_Hz'],
dg['absCoeff_dBPerkm'],
dg['portSectorEdge_deg'],
dg['starbSectorEdge_deg'],
dg['portMeanCov_deg'],
dg['stbdMeanCov_deg'],
dg['portMeanCov_m'],
dg['starbMeanCov_m'],
dg['modeAndStabilisation'],
dg['runtimeFilter1'],
dg['runtimeFilter2'],
dg['pipeTrackingStatus'],
dg['transmitArraySizeUsed_deg'],
dg['receiveArraySizeUsed_deg'],
dg['transmitPower_dB'],
dg['SLrampUpTimeRemaining'],
dg['padding2'],
dg['yawAngle_deg'],
dg['numTxSectors'],
dg['numBytesPerTxSector'],
dg['headingVessel_deg'],
dg['soundSpeedAtTxDepth_mPerSec'],
dg['txTransducerDepth_m'],
dg['z_waterLevelReRefPoint_m'],
dg['x_kmallToall_m'],
dg['y_kmallToall_m'],
dg['latLongInfo'],
dg['posSensorStatus'],
dg['attitudeSensorStatus'],
dg['padding3']))
# For some reason, it doesn't work to do this all in one step, but it works broken up into two steps. *shrug*
format_to_pack_b = "<2d1f"
self.FID.write(struct.pack(format_to_pack_b,
dg['latitude_deg'],
dg['longitude_deg'],
dg['ellipsoidHeightReRefPoint_m']))
def write_EMdgmMRZ_txSectorInfo(self, dg, sector):
''' Write MRZ txSectorInfo for single index "sector".
write_EMdgmMRZ_txSectorInfo(FID, dg['txSectorInfo'], sector)
'''
format_to_pack = "4B7f2B1H"
self.FID.write(struct.pack(format_to_pack,
dg['txSectorNumb'][sector],
dg['txArrNumber'][sector],
dg['txSubArray'][sector],
dg['padding0'][sector],
dg['sectorTransmitDelay_sec'][sector],
dg['tiltAngleReTx_deg'][sector],
dg['txNominalSourceLevel_dB'][sector],
dg['txFocusRange_m'][sector],
dg['centreFreq_Hz'][sector],
dg['signalBandWidth_Hz'][sector],
dg['totalSignalLength_sec'][sector],
dg['pulseShading'][sector],
dg['signalWaveForm'][sector],
dg['padding1'][sector]))
def write_EMdgmMRZ_rxInfo(self, dg):
''' Write MRZ rxInfo datagram.
write_EMdgmMRZ_rxInfo(FID, dg['rxInfo'])
'''
format_to_pack = "4H4f4H"
self.FID.write(struct.pack(format_to_pack,
dg['numBytesRxInfo'],
dg['numSoundingsMaxMain'],
dg['numSoundingsValidMain'],
dg['numBytesPerSounding'],
dg['WCSampleRate'],
dg['seabedImageSampleRate'],
dg['BSnormal_dB'],
dg['BSoblique_dB'],
dg['extraDetectionAlarmFlag'],
dg['numExtraDetections'],
dg['numExtraDetectionClasses'],
dg['numBytesPerClass']))
def write_EMdgmMRZ_extraDetClassInfo(self, dg, detclass):
''' Write the MRZ sounding extra Detection Class information.
write_EMdgmMRZ_extraDetClassInfo(FID,dg['extraDetClassInfo'],detclass)
'''
format_to_pack = "1H1b1B"
self.FID.write(struct.pack(format_to_pack,
dg['numExtraDetInClass'][detclass],
dg['padding'][detclass],
dg['alarmFlag'][detclass]))
def write_EMdgmMRZ_sounding(self, dg, record):
''' Write MRZ soundings records.
write_EMdgmMRZ_sounding(FID, dg['sounding'], record)
'''
format_to_pack = "1H8B1H6f2H18f4H"
self.FID.write(struct.pack(format_to_pack,
dg['soundingIndex'][record],
dg['txSectorNumb'][record],
dg['detectionType'][record],
dg['detectionMethod'][record],
dg['rejectionInfo1'][record],
dg['rejectionInfo2'][record],
dg['postProcessingInfo'][record],
dg['detectionClass'][record],
dg['detectionConfidenceLevel'][record],
dg['padding'][record],
dg['rangeFactor'][record],
dg['qualityFactor'][record],
dg['detectionUncertaintyVer_m'][record],
dg['detectionUncertaintyHor_m'][record],
dg['detectionWindowLength_sec'][record],
dg['echoLength_sec'][record],
dg['WCBeamNumb'][record],
dg['WCrange_samples'][record],
dg['WCNomBeamAngleAcross_deg'][record],
dg['meanAbsCoeff_dbPerkm'][record],
dg['reflectivity1_dB'][record],
dg['reflectivity2_dB'][record],
dg['receiverSensitivityApplied_dB'][record],
dg['sourceLevelApplied_dB'][record],
dg['BScalibration_dB'][record],
dg['TVG_dB'][record],
dg['beamAngleReRx_deg'][record],
dg['beamAngleCorrection_deg'][record],
dg['twoWayTravelTime_sec'][record],
dg['twoWayTravelTimeCorrection_sec'][record],
dg['deltaLatitude_deg'][record],
dg['deltaLongitude_deg'][record],
dg['z_reRefPoint_m'][record],
dg['y_reRefPoint_m'][record],
dg['x_reRefPoint_m'][record],
dg['beamIncAngleAdj_deg'][record],
dg['realTimeCleanInfo'][record],
dg['SIstartRange_samples'][record],
dg['SIcentreSample'][record],
dg['SInumSamples'][record]))
def write_EMdgmMRZ_seabedImagery(self, dg, Nseabedimage_samples):
''' Write the MRZ seabedImagery datagram
write_EMdgmMRZ_seabedImagery(FID, dg['SIsample_desidB'])
'''
format_to_pack = str(Nseabedimage_samples) + "h"
self.FID.write(struct.pack(format_to_pack,
*dg['SIsample_desidB']))
###############################################################
# Routines for writing and reading custom compressed packets
###############################################################
def compressSoundings(self, dg):
''' A method to compress the soundings table by column rather than by row.'''
record = len(dg['soundingIndex'])
format_to_pack = "1H8B1H6f2H18f4H"
buffer = struct.pack(str(record) + "H", *dg['soundingIndex'])
buffer += struct.pack(str(record) + "B", *dg['txSectorNumb'])
buffer += struct.pack(str(record) + "B", *dg['detectionType'])
buffer += struct.pack(str(record) + "B", *dg['detectionMethod'])
buffer += struct.pack(str(record) + "B", *dg['rejectionInfo1'])
buffer += struct.pack(str(record) + "B", *dg['rejectionInfo2'])
buffer += struct.pack(str(record) + "B", *dg['postProcessingInfo'])
buffer += struct.pack(str(record) + "B", *dg['detectionClass'])
buffer += struct.pack(str(record) + "B", *dg['detectionConfidenceLevel'])
buffer += struct.pack(str(record) + "H", *dg['padding'])
buffer += struct.pack(str(record) + "f", *dg['rangeFactor'])
buffer += struct.pack(str(record) + "f", *dg['qualityFactor'])
buffer += struct.pack(str(record) + "f", *dg['detectionUncertaintyVer_m'])
buffer += struct.pack(str(record) + "f", *dg['detectionUncertaintyHor_m'])
buffer += struct.pack(str(record) + "f", *dg['detectionWindowLength_sec'])
buffer += struct.pack(str(record) + "f", *dg['echoLength_sec'])
buffer += struct.pack(str(record) + "H", *dg['WCBeamNumb'])
buffer += struct.pack(str(record) + "H", *dg['WCrange_samples'])
buffer += struct.pack(str(record) + "f", *dg['WCNomBeamAngleAcross_deg'])
buffer += struct.pack(str(record) + "f", *dg['meanAbsCoeff_dbPerkm'])
buffer += struct.pack(str(record) + "f", *dg['reflectivity1_dB'])
buffer += struct.pack(str(record) + "f", *dg['reflectivity2_dB'])
buffer += struct.pack(str(record) + "f", *dg['receiverSensitivityApplied_dB'])
buffer += struct.pack(str(record) + "f", *dg['sourceLevelApplied_dB'])
buffer += struct.pack(str(record) + "f", *dg['BScalibration_dB'])
buffer += struct.pack(str(record) + "f", *dg['TVG_dB'])
buffer += struct.pack(str(record) + "f", *dg['beamAngleReRx_deg'])
buffer += struct.pack(str(record) + "f", *dg['beamAngleCorrection_deg'])
buffer += struct.pack(str(record) + "f", *dg['twoWayTravelTime_sec'])
buffer += struct.pack(str(record) + "f", *dg['twoWayTravelTimeCorrection_sec'])
buffer += struct.pack(str(record) + "f", *dg['deltaLatitude_deg'])
buffer += struct.pack(str(record) + "f", *dg['deltaLongitude_deg'])
buffer += struct.pack(str(record) + "f", *dg['z_reRefPoint_m'])
buffer += struct.pack(str(record) + "f", *dg['y_reRefPoint_m'])
buffer += struct.pack(str(record) + "f", *dg['x_reRefPoint_m'])
buffer += struct.pack(str(record) + "f", *dg['beamIncAngleAdj_deg'])
buffer += struct.pack(str(record) + "H", *dg['realTimeCleanInfo'])
buffer += struct.pack(str(record) + "H", *dg['SIstartRange_samples'])
buffer += struct.pack(str(record) + "H", *dg['SIcentreSample'])
buffer += struct.pack(str(record) + "H", *dg['SInumSamples'])
return bz2.compress(buffer)
def encodeArrayIntoUintX(self, A, res):
''' Differential encoding of an array of values into a byte array
A: An array of values
res: Desired resolution. This determines whether the encoding is
in an 8-bit or 16-bit array. Details provided below.
returns: bytes buffer containing packed values and metadata to unpack it.
The data is differentially encoded, meaning that the difference
in sequential values is calculated, then the minimum differential value
is subtracted off the array before scaling each value by max_bits / (max-min).
max_bits is 255 for uint8 encoding and 65535 for uint16 encoding. To
determine the encoding, (max-min) / max_bits is compared to the desired
resolution to ensure the minimum increment falls below it. uint8 is checked
first, if it fails, uint16 is checked. If it also fails, uint32 is
used and no actual compression is achieved.
A buffer is created from the result containing everything needed to
decipher it. Specifically:
The first value of the original array as a 4-byte float
Min difference values as 4-byte float.
Max difference value as a 4-byte float.
The number of bits used in the encoding (8 or 16) as a uint8.
The number of difference values (len(A)-1) as an 4-byte unsigned int
The array of scaled difference values cast to unsigned "max_bits" integers
'''
if isinstance(A, list):
A = np.array(A)
# There are two strategies taken here. Sometimes the
# data varies smoothly but over a large range, and it
# is more efficient to encode the data's sequential
# differences, since they are small in amplitude.
# But sometimes the data is very stochastic and the
# first range of differences are large relative to
# the maximum and minimum values in the data. For
# example consider the sequence [0 2 0]. The range
# of the values is 2, but the range of the first
# differences is 4 (+2 - -2). In this case, it is
# more efficient to encode the values themselves.
valuesToEncode = np.diff(A.flatten())
maxv = np.max(valuesToEncode)
minv = np.min(valuesToEncode)
maxA = np.max(A)
minA = np.min(A)
# print("maxvaluesToEncode:%f, minvaluesToEncode:%f" % (maxv,minv))
# print("maxA:%f, minA:%f" % (maxA,minA))
differentialEncode = True
if (maxA - minA) < (maxv - minv):
differentialEncode = False
maxv = maxA
minv = minA
valuesToEncode = A[1:]
# print("Encoding: %s" % differentialEncode)
if ((maxv - minv) / 255.0) < res:
bits = 8
elif ((maxv - minv) / 65535.0) < res:
bits = 16
else:
bits = 32
# print("CANNOT Maintain Resolution - Loss of Data!")
# print("max diff: %f, min diff: %f, res: %f" % (maxv, minv, res))
# bits = 16
# return None
# print(bits)
if maxv == minv:
# Value is constant.
scaleFactor = 1.0
else:
if bits == 8:
scaleFactor = 255.0 / (maxv - minv)
elif bits == 16:
scaleFactor = 65535.0 / (maxv - minv)
else:
scaleFactor = 4294967295.0 / (maxv - minv)
tmp = (((valuesToEncode - minv) * scaleFactor)).astype(int)
# This bullshit gets around an apparant bug in the struct module.
if isinstance(A[0], np.ndarray):
tmp2 = A[0].tolist()
else:
tmp2 = A[0]
if isinstance(tmp2, np.int64) or isinstance(tmp2, np.float64):
buffer = struct.pack('f', tmp2)
else:
buffer = struct.pack('f', tmp2[0])
# buffer = struct.pack('f',float(A[0][0]))
N = len(tmp)
buffer += struct.pack('f', minv)
buffer += struct.pack('f', maxv)
# Set a marker by recording the number of points
# to encode as a negative number to indicate that
# the fields have been differentially encoded.
if differentialEncode:
buffer += struct.pack('i', -N)
else:
buffer += struct.pack('i', N)
buffer += struct.pack('B', bits)
if bits == 8:
buffer += struct.pack(str(N) + 'B', *tmp)
if bits == 16:
buffer += struct.pack(str(N) + 'H', *tmp)
if bits == 32:
buffer += struct.pack(str(N) + 'I', *tmp)
return buffer
def decodeUintXintoArray(self, buffer):
''' Decodes differential-encoded data from X-bit unsigned integers into a float array.
See encodeArrayIntoUintX().
'''
fields = struct.unpack('fffiB', buffer[0:17])
A0 = fields[0]
minv = fields[1]
maxv = fields[2]
N = fields[3]
differentialDecode = False
if N < 0:
differentialDecode = True
N = -N
bits = fields[4]
if bits == 8:
dA = struct.unpack(str(N) + 'B', buffer[17:(17 + N)])
bytesDecoded = 17 + N
elif bits == 16:
dA = struct.unpack(str(N) + 'H', buffer[17:(17 + N * 2)])
bytesDecoded = 17 + (N * 2)
elif bits == 32:
dA = struct.unpack(str(N) + 'I', buffer[17:(17 + N * 4)])
bytesDecoded = 17 + (N * 4)
if differentialDecode:
if bits == 8:
orig = np.cumsum(
[A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 255.0) + minv)).tolist()
elif bits == 16:
orig = np.cumsum(
[A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 65535.0) + minv)).tolist()
else:
orig = np.cumsum(
[A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 4294967295.0) + minv)).tolist()
else:
if bits == 8:
orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 255.0) + minv)
elif bits == 16:
orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 65535.0) + minv)
else:
orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 4294967295.0) + minv)
# print(A0)
# print(minv)
# print(maxv)
# print(N)
# print(bits)
return (orig, bytesDecoded)
def encodeAndCompressSoundings(self, dg):
''' A method to differential-encode and compress the soundings table.
Float values are encoded in this way
See encodeArrayIntoUintX() for details on how.
Some attempt is made to minimize the impact of
non-float fields in the original datagram too.
A note about the "res" or resolution argument to
encodeArrayIntoUintX(): This field attempts to be
the maximum error one can expect between the original
value and the final decoded value after encoding.
But because it is the first difference in values that
are actually encoded, errors accumulate in the
decoding process as the decoded differences are cumulateively
summed and the errors that result can be larger than the
"res" value. Some experimentation is required to ensure
sufficient bits are used to reduce the desired error.
'''
record = len(dg['soundingIndex'])
buffer = struct.pack(str(record) + "H", *dg['soundingIndex'])
## The following optimization has almost no effect
## because of the compressoin applied to the
## sounding buffer:
# Valid values for txSectorNumber are 0-7 (probably)
# Valid values for detectionType are 0-2
# Valid values for detectionMethod are 0-15.
# But detectionMethod > 2 have been reserved for
# future use as long as any one can remember. Under
# the assumption that Kongsberg won't record more
# than 9 detection methods or have more than 9
# transmit sectors, these values can be packed
# into a single 8-bit value.
tmp = (np.array(dg['detectionType']) * 100. +
np.array(dg['detectionMethod']) * 10. +
np.array(dg['txSectorNumb'])).astype(int)
buffer += struct.pack(str(record) + "B", *tmp)
# I don't think there's any way to tell with no ambiguity
# when decoding if they were packed or not. For example,
# if there were just one tx sector, and only normal type
# detections of using amplitude method, the values would
# all be 1, which is a valid tx sector value. So I'll leave
# these commented out.
# else:
# buffer += struct.pack(str(record)+"B", *dg['txSectorNumb'])
# buffer += struct.pack(str(record)+"B", *dg['detectionType'])
# buffer += struct.pack(str(record)+"B", *dg['detectionMethod'])
buffer += struct.pack(str(record) + "B", *dg['rejectionInfo1'])
buffer += struct.pack(str(record) + "B", *dg['rejectionInfo2'])
buffer += struct.pack(str(record) + "B", *dg['postProcessingInfo'])
buffer += struct.pack(str(record) + "B", *dg['detectionClass'])
buffer += struct.pack(str(record) + "B", *dg['detectionConfidenceLevel'])
# No point in carrying along the padding field. It's for byte alignment
# but we've already reorganized the data. so we can omit it
# and recreate it on the other side.
buffer += self.encodeArrayIntoUintX(dg['rangeFactor'], 1)
buffer += self.encodeArrayIntoUintX(dg['qualityFactor'], .01)
buffer += self.encodeArrayIntoUintX(dg['detectionUncertaintyVer_m'], .01)
buffer += self.encodeArrayIntoUintX(dg['detectionUncertaintyHor_m'], .1)
buffer += self.encodeArrayIntoUintX(dg['detectionWindowLength_sec'], .001)
buffer += self.encodeArrayIntoUintX(dg['echoLength_sec'], .001)
buffer += struct.pack(str(record) + "H", *dg['WCBeamNumb'])
buffer += struct.pack(str(record) + "H", *dg['WCrange_samples'])
buffer += self.encodeArrayIntoUintX(dg['WCNomBeamAngleAcross_deg'], .001)
# meanAbsCoeff_dbPerkm is a single value per transmit sector. No point in
# encoding them all. This method first line gets a unique index for
# each sector. These are used to capture a dbPkm for each.
_, idx = np.unique(dg['txSectorNumb'], return_index=True)
# Encoding as ushort's in .01's of a dB.
vals = np.round(np.array(dg['meanAbsCoeff_dbPerkm'])[np.sort(idx)] * 100).astype(int)
buffer += struct.pack(str(len(idx)) + "H", *vals)
# Reflectivity1_dB values get -100 when the detect is invalid
# and reflectivity2_dB get any of several values thare are
# also non-sensical. Because they are never near the mean of
# the valid data, the differential encoding scheme used
# here becomes very inefficient. So we will set them to
# the mode of the data to optimize the encoding and set them
# back to their original values on decoding.
# The values are rounded to 2 decimal places first because
# they are floats and the chances that any two floats are
# the same is quite small.
dg['reflectivity1_dB'] = np.round(dg['reflectivity1_dB'], decimals=2)
# This wizardry calculates the mode (most frequent value)
# of the reflectivity values associated with valid detects.
reflectivity_mode = stats.mode([y for x, y in
zip(dg['detectionMethod'], dg['reflectivity1_dB'])
if x != 0])[0][0]
# Replace all the non-detects with the mode.
dg['reflectivity1_dB'] = [y if x != 0 else reflectivity_mode
for x, y in
zip(dg['detectionMethod'], dg['reflectivity1_dB'])]
# Do the same with reflectiivty2.
dg['reflectivity2_dB'] = np.round(dg['reflectivity2_dB'], decimals=2)
reflectivity_mode = stats.mode([y for x, y in
zip(dg['detectionMethod'], dg['reflectivity2_dB'])
if x != 0])[0][0]
# Replace all the non-detects with the mode.
dg['reflectivity2_dB'] = [y if x != 0 else reflectivity_mode
for x, y in
zip(dg['detectionMethod'], dg['reflectivity2_dB'])]
buffer += self.encodeArrayIntoUintX(dg['reflectivity1_dB'], .1)
buffer += self.encodeArrayIntoUintX(dg['reflectivity2_dB'], .001)
buffer += self.encodeArrayIntoUintX(dg['receiverSensitivityApplied_dB'], .001)
buffer += self.encodeArrayIntoUintX(dg['sourceLevelApplied_dB'], .001)
buffer += self.encodeArrayIntoUintX(dg['BScalibration_dB'], .001)
buffer += self.encodeArrayIntoUintX(dg['TVG_dB'], .001)
buffer += self.encodeArrayIntoUintX(dg['beamAngleReRx_deg'], .001)
buffer += self.encodeArrayIntoUintX(dg['beamAngleCorrection_deg'], .001)
buffer += self.encodeArrayIntoUintX(dg['twoWayTravelTime_sec'], .000001)
buffer += self.encodeArrayIntoUintX(dg['twoWayTravelTimeCorrection_sec'], .0000001)
buffer += self.encodeArrayIntoUintX(dg['deltaLatitude_deg'], .0000001)
buffer += self.encodeArrayIntoUintX(dg['deltaLongitude_deg'], .0000001)
buffer += self.encodeArrayIntoUintX(dg['z_reRefPoint_m'], .001)
buffer += self.encodeArrayIntoUintX(dg['y_reRefPoint_m'], .001)
buffer += self.encodeArrayIntoUintX(dg['x_reRefPoint_m'], .001)
buffer += self.encodeArrayIntoUintX(dg['beamIncAngleAdj_deg'], .001)
# realTimeCleanInfo is for future use. So we can omit it for now.
# buffer += struct.pack(str(record)+"H", *dg['realTimeCleanInfo'])
buffer += struct.pack(str(record) + "H", *dg['SIstartRange_samples'])
buffer += struct.pack(str(record) + "H", *dg['SIcentreSample'])
buffer += struct.pack(str(record) + "H", *dg['SInumSamples'])
return bz2.compress(buffer)
def expandAndDecodeSoundings(self, buffer, records):
''' When the soundings datagram is differential-encoded and compressed, this method reverses it on reading.
buffer: bytes object containing the compressed data.
records: Number of soundings encoded in the block.
returns: dg['sounding'] containing dictionary of lists of sounding record fields.
'''
buffer = bz2.decompress(buffer)
dg = {}
ptr = 0
dg['soundingIndex'] = struct.unpack(str(records) + "H", buffer[0:(records * 2)])
ptr += (records * 2)
tmp = np.array(struct.unpack(str(records) + "B", buffer[ptr:(ptr + records)]))
ptr += records
dg['detectionType'] = np.round(tmp / 100.).astype(int)
dg['detectionMethod'] = np.round((tmp - dg['detectionType'] * 100) / 10.).astype(int)
dg['txSectorNumb'] = np.round((tmp - dg['detectionType'] * 100 - dg['detectionMethod'] * 10)).astype(int)
dg['detectionType'] = dg['detectionType'].tolist()
dg['detectionMethod'] = dg['detectionMethod'].tolist()
dg['txSectorNumb'] = dg['txSectorNumb'].tolist()
# dg['txSectorNumb'] = struct.unpack(str(records) + "B", buffer[ptr:(ptr + records )])
# ptr += records
# dg['detectionType'] = struct.unpack(str(records) + "B", buffer[ptr:(ptr + records)])
# ptr += records
# dg['detectionMethod'] = struct.unpack(str(records) + "B", buffer[ptr:(ptr + records)])
# ptr += records
dg['rejectionInfo1'] = struct.unpack(str(records) + "B", buffer[ptr:(ptr + records)])
ptr += records
dg['rejectionInfo2'] = struct.unpack(str(records) + "B", buffer[ptr:(ptr + records)])
ptr += records
dg['postProcessingInfo'] = struct.unpack(str(records) + "B", buffer[ptr:(ptr + records)])
ptr += records
dg['detectionClass'] = struct.unpack(str(records) + "B", buffer[ptr:(ptr + records)])
ptr += records
dg['detectionConfidenceLevel'] = struct.unpack(str(records) + "B", buffer[ptr:(ptr + records)])
ptr += records
# The padding data is not encoded, so we just generate 0's for it here.
dg['padding'] = list(np.zeros(shape=len(dg['soundingIndex'])).astype(int))
dg['rangeFactor'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['qualityFactor'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['detectionUncertaintyVer_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['detectionUncertaintyHor_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['detectionWindowLength_sec'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['echoLength_sec'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['WCBeamNumb'] = struct.unpack(str(records) + "H", buffer[ptr:(ptr + (records * 2))])
ptr += (records * 2)
dg['WCrange_samples'] = struct.unpack(str(records) + "H", buffer[ptr:(ptr + (records * 2))])
ptr += (records * 2)
dg['WCNomBeamAngleAcross_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
# meanAbsCoeff_dbPerkm is a single value for each transmit sector.
# And we've only encodeied one for each as ushorts in 0.01 dB.
# So we extract these.
Nsectors = len(np.unique(dg['txSectorNumb']))
values = np.array(struct.unpack(str(Nsectors) + "H", buffer[ptr:(ptr + (Nsectors * 2))])) / 100.0
ptr += (Nsectors * 2)
# Then assign them to each sector.
tmp = np.zeros(shape=len(dg['soundingIndex']))
for sectoridx in np.unique(dg['txSectorNumb']):
tmp[dg['txSectorNumb'] == sectoridx] = values[sectoridx]
dg['meanAbsCoeff_dbPerkm'] = tmp.tolist()
dg['reflectivity1_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
# Reset values for no-detect values that were modified to
# improve compression.
dg['reflectivity1_dB'] = [-100. if x == 0 else y
for x, y in
zip(dg['detectionMethod'], dg['reflectivity1_dB'])]
dg['reflectivity2_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
# Reset values for no-detect values that were modified to
# improve compression. Note this makes a suble if inconsequential
# change to the file, as the values in reflectivity2_dB for
# failed detections are not -100. They are not uniform in value
# and so cannot be replaced exactly here. But since these
# are for non-detects it should not matter to anyone. (I hope)
dg['reflectivity2_dB'] = [-100. if x == 0 else y
for x, y in
zip(dg['detectionMethod'], dg['reflectivity2_dB'])]
dg['receiverSensitivityApplied_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['sourceLevelApplied_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['BScalibration_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['TVG_dB'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['beamAngleReRx_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['beamAngleCorrection_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['twoWayTravelTime_sec'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['twoWayTravelTimeCorrection_sec'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['deltaLatitude_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['deltaLongitude_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['z_reRefPoint_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['y_reRefPoint_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['x_reRefPoint_m'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
dg['beamIncAngleAdj_deg'], bytesDecoded = self.decodeUintXintoArray(buffer[ptr:])
ptr += bytesDecoded
# dg['realTimeCleanInfo'] = struct.unpack(str(records) + "H", buffer[ptr:(ptr + (records * 2))])
# ptr += (records * 2)
dg['realTimeCleanInfo'] = list(np.zeros(shape=len(dg['soundingIndex'])).astype(int))
dg['SIstartRange_samples'] = struct.unpack(str(records) + "H", buffer[ptr:(ptr + (records * 2))])
ptr += (records * 2)
dg['SIcentreSample'] = struct.unpack(str(records) + "H", buffer[ptr:(ptr + (records * 2))])
ptr += (records * 2)
dg['SInumSamples'] = struct.unpack(str(records) + "H", buffer[ptr:(ptr + (records * 2))])
ptr += (records * 2)
return dg
def write_EncodedCompressedSoundings(self, buffer):
''' Write MRZ soundings records.
write_EMdgmMRZ_sounding(FID, dg['sounding'])
'''
self.FID.write(struct.pack('I', len(buffer)))
self.FID.write(buffer)
return
def encodeAndCompressImagery(self, dg):
''' A method to encode and compress the imagery data.'''
buffer = self.encodeArrayIntoUintX(np.array(dg['SIsample_desidB']), .1)
return bz2.compress(buffer)
def decodeAndDecompresssImagery(self, buffer, Nseabedimage_samples):
format_to_unpack = str(Nseabedimage_samples) + "h"
return self.decodeUintXintoArray(bz2.decompress(buffer))
def write_EncodedCompressedImagery(self, buffer):
''' A method to write the encoded compressed imagery'''
self.FID.write(struct.pack("I", len(buffer)))
self.FID.write(buffer)
def write_EMdgmCZ0(self, dg):
''' A method to write an MRZ datagram back to disk, but omitting the imagery data.'''
# First we need to see how much space the imagery data will take.
# And set the number of imagery samples per sounding field to zero.
Nseabedimage_samples = 0
for record in range(dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain']):
Nseabedimage_samples += dg['sounding']['SInumSamples'][record]
# dg['sounding']['SInumSamples'][record] = 0
imageryBytes = Nseabedimage_samples * 2
# Now we need to reset the total packet size.
# dg['header']['numBytesDgm'] -= imageryBytes
# And we need to create a new MRZ packet type to hold compressed data.
dg['header']['dgmType'] = b'#CZ0'
imageryBuffer = self.encodeAndCompressImagery(dg)
soundingsBuffer = self.encodeAndCompressSoundings(dg['sounding'])
# Reduce the datagram size by the difference in size of the
# original and compressed sounding data, including the size
# of teh soundings buffer which is written as a 4-type int.
Nsoundings = (dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain'])
dg['header']['numBytesDgm'] -= (Nsoundings * 120
- (len(soundingsBuffer) + 4))
# Reduce the datagram size by the difference in size of the
# original and encoded, compressed imagery data.
dg['header']['numBytesDgm'] -= (imageryBytes - (len(imageryBuffer) + 4))
# Now write the packet, just leave out the imagery
# data and set Nsamples to 0.
self.write_EMdgmHeader(dg['header'])
self.write_EMdgmMpartition(dg['partition'])
self.write_EMdgmMbody(dg['cmnPart'])
self.write_EMdgmMRZ_pingInfo(dg['pingInfo'])
for sector in range(dg['pingInfo']['numTxSectors']):
self.write_EMdgmMRZ_txSectorInfo(dg['txSectorInfo'], sector)
self.write_EMdgmMRZ_rxInfo(dg['rxInfo'])
for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):
self.write_EMdgmMRZ_extraDetClassInfo(dg['extraDetClassInfo'], detclass)
self.write_EncodedCompressedSoundings(soundingsBuffer)
self.write_EncodedCompressedImagery(imageryBuffer)
self.FID.write(struct.pack("I", dg['header']['numBytesDgm']))
def write_EMdgmCZ1(self, dg):
''' A method to write a new datagram compressing teh soundings and
omitting the imagery data.'''
# First we need to see how much space the imagery data will take.
# And set the number of imagery samples per sounding field to zero.
Nseabedimage_samples = 0
for record in range(dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain']):
Nseabedimage_samples += dg['sounding']['SInumSamples'][record]
dg['sounding']['SInumSamples'][record] = 0
imageryBytes = Nseabedimage_samples * 2
# Now we need to reset the total packet size.
dg['header']['numBytesDgm'] -= imageryBytes
# And we need to create a new MRZ packet type to hold compressed data.
dg['header']['dgmType'] = b'#CZ1'
soundingsBuffer = self.encodeAndCompressSoundings(dg['sounding'])
# Reduce the datagram size by the difference in size of the
# original and compressed sounding data, including the size
# of the soundings buffer which is also written, as a 4-type int.
Nsoundings = (dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain'])
dg['header']['numBytesDgm'] -= (Nsoundings * 120
- (len(soundingsBuffer) + 4))
# Now write the packet, just leave out the imagery
# data and set Nsamples to 0.
self.write_EMdgmHeader(dg['header'])
self.write_EMdgmMpartition(dg['partition'])
self.write_EMdgmMbody(dg['cmnPart'])
self.write_EMdgmMRZ_pingInfo(dg['pingInfo'])
for sector in range(dg['pingInfo']['numTxSectors']):
self.write_EMdgmMRZ_txSectorInfo(dg['txSectorInfo'], sector)
self.write_EMdgmMRZ_rxInfo(dg['rxInfo'])
for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):
self.write_EMdgmMRZ_extraDetClassInfo(dg['extraDetClassInfo'], detclass)
self.write_EncodedCompressedSoundings(soundingsBuffer)
# write_EncodedCompressedImagery(FID,imageryBuffer)
# Don't write the imagery data.
# write_EMdgmMRZ_seabedImagery(FID, dg, Nseabedimage_samples)
self.FID.write(struct.pack("I", dg['header']['numBytesDgm']))
def read_EMdgmCZ0(self):
"""
The #CR0 datagram is a custom datagram in which the sounding data
and imagery data are encoded and compressed.
The format of this datagram will evolve as better methods are devised.
Therefore, files compressed in this way should only be used in a
temporary way for passing data over telemetry links. Files left
compressed are in danger of being unreadable in future releases.
"""
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['partition'] = self.read_EMdgmMpartition()
dg['cmnPart'] = self.read_EMdgmMbody()
dg['pingInfo'] = self.read_EMdgmMRZ_pingInfo()
# Read TX sector info for each sector
txSectorInfo = []
for sector in range(dg['pingInfo']['numTxSectors']):
txSectorInfo.append(self.read_EMdgmMRZ_txSectorInfo())
dg['txSectorInfo'] = self.listofdicts2dictoflists(txSectorInfo)
# Read reInfo
dg['rxInfo'] = self.read_EMdgmMRZ_rxInfo()
# Read extra detect metadata if they exist.
extraDetClassInfo = []
for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):
extraDetClassInfo.append(self.read_EMdgmMRZ_extraDetClassInfo())
dg['extraDetClassInfo'] = self.listofdicts2dictoflists(extraDetClassInfo)
# Read the sounding data.
Nseabedimage_samples = 0
soundingsBuffer = self.read_EncodedCompressedSoundingsBlock()
Nsoundings = (dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain'])
dg['sounding'] = self.expandAndDecodeSoundings(soundingsBuffer,
Nsoundings)
for record in range(Nsoundings):
Nseabedimage_samples += dg['sounding']['SInumSamples'][record]
# Read the seabed imagery.
# Seabed image sample amplitude, in 0.1 dB. Actual number of
# seabed image samples (SIsample_desidB) to be found
# by summing parameter SInumSamples in struct EMdgmMRZ_sounding_def
# for all beams. Seabed image data are raw beam sample data
# taken from the RX beams. The data samples are selected
# based on the bottom detection ranges. First sample for
# each beam is the one with the lowest range. The centre
# sample from each beam is georeferenced (x, y, z data from
# the detections). The BS corrections applied at the centre
# sample are the same as used for reflectivity2_dB
# (struct EMdgmMRZ_sounding_def).
imageryBuffer = self.read_EncodedCompressedImageryBlock()
dg['SIsample_desidB'], bytesDecoded = self.decodeAndDecompresssImagery(imageryBuffer,
Nseabedimage_samples)
dg['SIsample_desidB'] = np.array(dg['SIsample_desidB'], dtype=int)
# Increase the reported size of the packet by the increase
# in the size of the decoded soundings block. There are 120
# bytes per sounding. And the size of the soundings buffer
# is also recorded, as a 4-byte int.
dg['header']['numBytesDgm'] += (Nsoundings * 120 -
(len(soundingsBuffer) + 4))
# Same for compressed imagery.
dg['header']['numBytesDgm'] += (Nseabedimage_samples * 2 -
(len(imageryBuffer) + 4))
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EMdgmCZ1(self):
"""
The #CR1 datagram is a custom datagram in which the sounding data
are encoded and compressed and imagery is omitted.
The format of this datagram will evolve as better methods are devised.
Therefore, files compressed in this way should only be used in a
temporary way for passing data over telemetry links. Files left
compressed are in danger of being unreadable in future releases.
"""
start = self.FID.tell()
dg = {}
dg['header'] = self.read_EMdgmHeader()
dg['partition'] = self.read_EMdgmMpartition()
dg['cmnPart'] = self.read_EMdgmMbody()
dg['pingInfo'] = self.read_EMdgmMRZ_pingInfo()
# Read TX sector info for each sector
txSectorInfo = []
for sector in range(dg['pingInfo']['numTxSectors']):
txSectorInfo.append(self.read_EMdgmMRZ_txSectorInfo())
dg['txSectorInfo'] = self.listofdicts2dictoflists(txSectorInfo)
# Read reInfo
dg['rxInfo'] = self.read_EMdgmMRZ_rxInfo()
# Read extra detect metadata if they exist.
extraDetClassInfo = []
for detclass in range(dg['rxInfo']['numExtraDetectionClasses']):
extraDetClassInfo.append(self.read_EMdgmMRZ_extraDetClassInfo())
dg['extraDetClassInfo'] = self.listofdicts2dictoflists(extraDetClassInfo)
# Read the sounding data.
Nseabedimage_samples = 0
soundingsBuffer = self.read_EncodedCompressedSoundingsBlock()
Nsoundings = (dg['rxInfo']['numExtraDetections'] +
dg['rxInfo']['numSoundingsMaxMain'])
dg['sounding'] = self.expandAndDecodeSoundings(soundingsBuffer, Nsoundings)
# Increase the reported size of the packet by the increase
# in the size of the decoded soundings block. There are 120
# bytes per sounding. And the size of the soundings buffer
# is also recorded, as a 4-byte int.
dg['header']['numBytesDgm'] += (Nsoundings * 120 -
(len(soundingsBuffer) + 4))
# Skip the imagery data...
# Seek to end of the packet.
self.FID.seek(start + dg['header']['numBytesDgm'], 0)
return dg
def read_EncodedCompressedSoundingsBlock(self):
''' Read the compressed soundings block'''
bytestoread = struct.unpack('I', self.FID.read(4))
buffer = self.FID.read(bytestoread[0])
return buffer
def read_EncodedCompressedImageryBlock(self):
''' Read the compressed imagery block.'''
bytestoread = struct.unpack('I', self.FID.read(4))
buffer = self.FID.read(bytestoread[0])
return buffer
###########################################################
# Utilities
###########################################################
def OpenFiletoRead(self, inputfilename=None):
""" Open a KMALL data file for reading."""
if self.filename is None:
if inputfilename is None:
print("No file name specified")
sys.exit(1)
else:
filetoopen = inputfilename
else:
filetoopen = self.filename
if self.verbose >= 1:
print("Opening: %s to read" % filetoopen)
self.FID = open(filetoopen, "rb")
def OpenFiletoWrite(self, inputfilename=None):
""" Open a KMALL data file for reading."""
if self.filename is None:
if inputfilename is None:
print("No file name specified")
sys.exit(1)
else:
filetoopen = inputfilename
else:
filetoopen = self.filename
if self.verbose >= 1:
print("Opening: %s to write" % filetoopen)
self.FID = open(filetoopen, "wb")
def closeFile(self):
""" Close a file."""
if self.FID is not None:
self.FID.close()
def print_datagram(self, dg):
""" A utility function to print the fields of a parsed datagram. """
print("\n")
for k, v in dg.items():
print("%s:\t\t\t%s\n" % (k, str(v)))
def index_file(self):
""" Index a KMALL file - message type, time, size, byte offset. """
if self.FID is None:
self.OpenFiletoRead()
else:
self.closeFile() # forces flushing.
self.OpenFiletoRead()
# Get size of the file.
self.FID.seek(0, 2)
self.file_size = self.FID.tell()
self.FID.seek(0, 0)
if (self.verbose == 1):
print("Filesize: %d" % self.file_size)
self.msgoffset = []
self.msgsize = []
self.msgtime = []
self.msgtype = []
self.pktcnt = 0
while self.FID.tell() < self.file_size:
try:
# Get the byte offset.
self.msgoffset.append(self.FID.tell())
# Read the first four bytes to get the datagram size.
msgsize = struct.unpack("I", self.FID.read(4))
self.msgsize.append(msgsize[0])
# Read the datagram.
msg_buffer = self.FID.read(int(self.msgsize[self.pktcnt]) - 4)
except:
print("Error indexing file: %s" % self.filename)
self.msgoffset = self.msgoffset[:-1]
self.msgsize = self.msgsize[:-1]
continue
# Interpret the header.
header_without_length = struct.Struct('ccccBBHII')
(dgm_type0, dgm_type1, dgm_type2, dgm_type3, dgm_version,
sysid, emid,
sec,
nsec) = header_without_length.unpack_from(msg_buffer, 0)
dgm_type = dgm_type0 + dgm_type1 + dgm_type2 + dgm_type3
self.msgtype.append(str(dgm_type))
# Decode time
# osec = sec
# osec *= 1E9
# osec += nsec
# lisec = nanosec
# lisec /= 1E6
# Captue the datagram header timestamp.
self.msgtime.append(sec + nsec / 1.0E9)
if self.verbose:
print("MSG_TYPE: %s,\tOFFSET:%0.0f,\tSIZE: %0.0f,\tTIME: %0.3f" %
(dgm_type,
self.msgoffset[self.pktcnt],
self.msgsize[self.pktcnt],
self.msgtime[self.pktcnt]))
self.pktcnt += 1
self.msgoffset = np.array(self.msgoffset)
self.msgsize = np.array(self.msgsize)
self.msgtime = np.array(self.msgtime)
self.Index = pd.DataFrame({'Time': self.msgtime,
'ByteOffset': self.msgoffset,
'MessageSize': self.msgsize,
'MessageType': self.msgtype})
self.Index.set_index('Time', inplace=True)
self.Index['MessageType'] = self.Index.MessageType.astype('category')
if self.verbose >= 2:
print(self.Index)
def extract_nav(self):
''' Extract navigation data.
Only works when data is interpreted into the KMbinary record at the
moment.'''
self.extract_attitude()
def extract_attitude(self):
''' Extract all raw attitude data from data file into self.att
FIX: This method needs to be much more robust. It currently only
handles our situation in which we are providing POS/MV Group 102
messages, and these, it appears, are being interpreted into the
KMbinary datagram. But it does not handle 1) multiple navigation
inputs, 2) multiple navigation input types, 3) there are no checks to
see that the data is valid. etc.
'''
if self.Index is None:
self.index_file()
if self.FID is None:
self.OpenFiletoRead()
# Get offsets for 'SKM' attitude datagrams.
SKMOffsets = [x for x, y in zip(self.msgoffset, self.msgtype)
if y == "b'#SKM'"]
attitudeDatagrams = list()
for offset in SKMOffsets:
self.FID.seek(offset, 0)
dg = self.read_EMdgmSKM()
attitudeDatagrams.append(dg['sample']['KMdefault'])
# Convert list of dictionaries to dictionary of lists.
self.att = self.listofdicts2dictoflists(attitudeDatagrams)
self.FID.seek(0, 0)
return
def listofdicts2dictoflists(self, listofdicts):
""" A utility to convert a list of dicts to a dict of lists."""
# dg = {}
#
# # This is done in two steps, handling both dictionary items that are
# # lists and scalars separately. As long as no item combines both lists
# # and scalars the method works.
# #
# # There is some mechanism to handle this in a single list
# # comprehension statement, checking for types on the fly, but I cannot
# # find any syntax that returns the proper result.
# if len(listofdicts) == 0:
# return None
#
# for k, v in listofdicts[0].items():
# dg[k] = [item for dictitem in listofdicts
# if isinstance(dictitem[k], list)
# for item in dictitem[k]]
# scalartmp = [dictitem[k] for dictitem in listofdicts
# if not isinstance(dictitem[k], list)]
# if len(dg[k]) == 0:
# dg[k] = scalartmp
#
# return dg
if listofdicts:
needs_flattening = [k for (k,v) in listofdicts[0].items() if isinstance(v, list)]
d_of_l = {k: [dic[k] for dic in listofdicts] for k in listofdicts[0]}
if needs_flattening:
# print('flattening {}'.format(needs_flattening))
for nf in needs_flattening:
d_of_l[nf] = [item for sublist in d_of_l[nf] for item in sublist]
return d_of_l
else:
return None
def extract_xyz(self):
pass
def check_ping_count(self):
""" A method to check to see that all required MRZ datagrams exist """
if self.Index is None:
self.index_file()
if self.FID is None:
self.OpenFiletoRead()
# M = map( lambda x: x=="b'#MRZ'", self.msgtype)
# MRZOffsets = self.msgoffset[list(M)]
# Get the file byte count offset for each MRZ datagram.
MRZOffsets = [x for x, y in zip(self.msgoffset, self.msgtype) if y == "b'#MRZ'"]
self.pingcnt = []
self.rxFans = []
self.rxFanIndex = []
# Skip through the file capturing the ping count information:
# The ping count values
# The number of receive fans specified for each ping
# The receive fan index for each received MRZ record.
#
# Notes: A ping can span more than 1 MRZ datagrams. This happens when
# 1 MRZ datagram exists for each receive "fan"
# In dual swath mode, at least two receive fans are generated.
# The ping counter will not change for the second MRZ packet.
for offset in MRZOffsets:
self.FID.seek(offset, 0)
dg = self.read_EMdgmHeader()
dg = self.read_EMdgmMpartition()
dg = self.read_EMdgmMbody()
self.pingcnt.append(dg['pingCnt'])
self.rxFans.append(dg['rxFansPerPing'])
self.rxFanIndex.append(dg['rxFanIndex'])
self.pingcnt = np.array(self.pingcnt)
self.rxFans = np.array(self.rxFans)
self.rxFanIndex = np.array(self.rxFanIndex)
# Things to check:
# Is the total sum of rxFans equal to the number of MRZ packets?
# Are the unique ping counter values sequential?
# The number of multiple ping counter values has to be larger than the
# number of rx fans and packets.
# Sorting by ping count and then calculating the difference in
# successive values allows one to check to see that at least one
# packet exists for each ping (which may have more than one).
if len(self.pingcnt) > 0:
PingCounterRange = max(self.pingcnt) - min(self.pingcnt)
dpu = np.diff(np.sort(np.unique(self.pingcnt)))
NpingsMissed = sum((dpu[dpu > 1] - 1))
NpingsSeen = len(np.unique(self.pingcnt))
# MaxDiscontinuity = max(abs(dpu))
if self.verbose > 1:
print("File: %s\n\tPing Counter Range: %d:%d N=%d" %
(self.filename, min(self.pingcnt), max(self.pingcnt), PingCounterRange))
print("\tNumbr of pings missing: %d of %d" % (NpingsMissed, NpingsMissed + NpingsSeen))
else:
PingCounterRange = 0
NpingsSeen = 0
NpingsMissed = 0
if self.verbose > 1:
print("No pings in file.")
# print("\tNumbr of pings seen: %d" % NpingsSeen)
# print('File: %s\n\tNumber of missed full pings: %d of %d' %
# (self.filename, PingCounterRange - NpingsSeen, PingCounterRange ))
# dp = np.diff(self.pingcnt)
# FirstPingInSeries = np.array([x==0 for x in dp])
HaveAllMRZ = True
MissingMRZCount = 0
# Go through every "ping" these may span multiple packets...
for idx in range(len(self.pingcnt)):
# Side note: This method is going to produce a warning multiple
# times for each ping series that fails the test. Sloppy.
# Capture how many rx fans there should be for this ping.
N_RxFansforSeries = self.rxFans[idx]
# Get the rxFan indices associated with this ping record.
PingsInThisSeriesMask = np.array([x == self.pingcnt[idx] for x in self.pingcnt])
rxFanIndicesforThisSeries = self.rxFanIndex[PingsInThisSeriesMask]
# Check to see that number of records equals the total.
if len(rxFanIndicesforThisSeries) != N_RxFansforSeries:
if HaveAllMRZ:
if self.verbose > 1:
print("\tDetected missing MRZ records!")
if self.verbose > 1:
print('\tNot enough rxFan (MRZ) records for ping: %d: Indices %s of [0:%d] found' %
(self.pingcnt[idx],
",".join(str(x) for x in rxFanIndicesforThisSeries),
N_RxFansforSeries - 1))
HaveAllMRZ = False
MissingMRZCount = MissingMRZCount + 1
# Shamelessly creating a data frame just to get a pretty table.
#res = pd.DataFrame([["File", "NpingsTotal", "Pings Missed", "MissingMRZRecords"],
# [self.filename, NpingsMissed + NpingsSeen, NpingsMissed, MissingMRZCount]])
#print(res.to_string(index=False, header=False))
if HaveAllMRZ:
if self.verbose > 1:
print("\tNumber of MRZ records equals number required for each ping.")
return (self.filename, NpingsMissed + NpingsSeen, NpingsMissed, MissingMRZCount)
def report_packet_types(self):
""" A method to report datagram packet count and size in a file. """
if self.Index is None:
self.index_file()
# Get a list of packet types seen.
types = list(set(self.msgtype))
pktcount = {}
pktSize = {}
pktMinSize = {}
pktMaxSize = {}
# Calculate some stats.
for type in types:
M = np.array(list(map(lambda x: x == type, self.msgtype)))
pktcount[type] = sum(M)
pktSize[type] = sum(self.msgsize[M])
pktMinSize[type] = min(self.msgsize[M])
pktMaxSize[type] = max(self.msgsize[M])
# print(self.Index.groupby("MessageType").describe().reset_index())
msg_type_group = self.Index.groupby("MessageType")
summary = {"Count": msg_type_group["MessageType"].count(),
"Size:": msg_type_group["MessageSize"].sum(),
"Min Size": msg_type_group["MessageSize"].min(),
"Max Size": msg_type_group["MessageSize"].max()}
IndexSummary = pd.DataFrame(summary)
print(IndexSummary)
def _initialize_sequential_read(self, start_ptr, end_ptr):
"""
sequential_read_records gives you the ability to just read a chunk of a file, starting at start_ptr, ending
at end_ptr. This method sets up this functionality by figuring out the length of the chunk and the max length
of the file.
"""
self.eof = False
if end_ptr:
filelen = int(end_ptr - start_ptr)
else:
self.FID.seek(-start_ptr, 2)
filelen = self.FID.tell()
self.FID.seek(0, 2)
self.file_size = self.FID.tell()
self.FID.seek(start_ptr, 0)
return filelen
def _build_startbytesearch(self):
"""
Build the regular expression we are going to use to find the next startbyte, if necessary.
"""
# we search for the pound sign as a first step, use this compiled expression for the second tier, ensuring
# the pound sign actually indicates the record identifier
# went through and found the possible letters for all the records we care about
# have to be explicit, as there are datagrams within datagrams, see read_EMdgmSKMinfo
search_exp = b'#[CIMS][CDHIKOPRVWZ][CEILMOPTZ01]'
compiled_expr = re.compile(search_exp)
return compiled_expr
def seek_next_startbyte(self, file_length, start_ptr=0):
"""
Determines if current pointer is at the start of a record. If not, finds the next valid one.
"""
# check is to continue on until you find the pound sign, which might indicate the record identifier,
# can't just search for # though, have to use regex to ensure the 3 capital letter identifier comes after.
at_the_right_byte = False
while not at_the_right_byte:
cur_ptr = self.FID.tell()
if cur_ptr >= start_ptr + file_length:
# at the end of file, return False to stop searching
return False
# consider start bytes right at the end of the given filelength as valid, even if they extend
# over to the next chunk
srchdat = self.FID.read(min(20, (start_ptr + file_length) - cur_ptr))
stx_idx = srchdat.find(b'#')
if stx_idx >= 0:
possible_start = cur_ptr + stx_idx
self.FID.seek(possible_start)
datchk = self.FID.read(4)
m = self.datagram_ident_search.search(datchk, 0)
if m:
self.FID.seek(possible_start - 4)
return True
def _divide_rec(self, rec):
"""
MRZ comes in from sequential read by time/ping. Each ping may have multiple sectors to it which we want
to treat as separate pings. Do this by generating a new record for each sector in the ping. When rec is MRZ,
the return is a list of rec split by sector. Otherwise returns the original rec as the only element in a list
returns: totalrecs, list of split rec
"""
if self.datagram_ident != 'MRZ':
return [rec]
elif rec['pingInfo']['numTxSectors'] == 1:
return [rec]
else:
totalrecs = []
pingtime = rec['header']['dgtime']
for sec in rec['txSectorInfo']['txSectorNumb']:
split_rec = copy.copy(rec)
split_rec['txSectorInfo'] = {k: v[sec] for (k,v) in rec['txSectorInfo'].items()}
rx_index = np.where(np.array(rec['sounding']['txSectorNumb']) == sec)
split_rec['sounding'] = {k: np.array(v)[rx_index] for (k,v) in rec['sounding'].items()}
# ping time equals datagram time plus sector transmit delay
split_rec['header']['dgtime'] = pingtime + split_rec['txSectorInfo']['sectorTransmitDelay_sec']
totalrecs.append(split_rec)
return totalrecs
def _pad_to_dense(self, arr, padval=999.0, maxlen=500, override_type=None, detectioninfo=False):
"""
Appends the minimal required amount of zeroes at the end of each array in the jagged array `M`, such that `M`
loses its jaggedness.
A required operation for our sector-wise read. Each sector has a varying amount of beams over time, so the
resulting number of values per ping (beam pointing angle for example) will differ between pings. Here we make
these ragged arrays square, by using the padval to fill in the holes.
A padval of 999 is arbitrary, but we use that nodatavalue in kluster to reform pings and do processing, so
leave at 999 for Kluster. maxlen is the max number of expected beams per sector.
returns: Z, square array padded with padval where arr is ragged
"""
# override the dynamic length of beams across records by applying static length limit.
# ideally this should cover all cases
if override_type is not None:
typ = override_type
else:
typ = arr[0].dtype
Z = np.full((len(arr), maxlen), padval, dtype=typ)
for enu, row in enumerate(arr):
# some records being read have NaNs in them unexpectedly, like part of the record isn't being read
row[np.isnan(row)] = 0
if detectioninfo:
Z[enu, :len(row)] = self.translate_detectioninfo(row)
else:
Z[enu, :len(row)] = row
return Z
def _build_sequential_read_categories(self):
"""
sequential_read_records will go through the file and build a dictionary of the desired records. Specify those
records that you want here, in recs_categories. I use a dot notation to access the correct attribute, see
below.
"""
recs_categories = {'SKM': ['sample.KMdefault.dgtime', 'sample.KMdefault.roll_deg', 'sample.KMdefault.pitch_deg',
'sample.KMdefault.heave_m', 'sample.KMdefault.heading_deg',
'sample.KMdefault.latitude_deg', 'sample.KMdefault.longitude_deg',
'sample.KMdefault.ellipsoidHeight_m'],
'IIP': ['header.dgtime', 'install_txt'],
'MRZ': ['header.dgtime', 'cmnPart.pingCnt', 'cmnPart.rxTransducerInd',
'pingInfo.soundSpeedAtTxDepth_mPerSec', 'pingInfo.numTxSectors', 'header.systemID',
'txSectorInfo.txSectorNumb', 'txSectorInfo.tiltAngleReTx_deg',
'txSectorInfo.sectorTransmitDelay_sec', 'txSectorInfo.centreFreq_Hz',
'sounding.beamAngleReRx_deg', 'sounding.txSectorNumb', 'sounding.detectionType',
'sounding.qualityFactor', 'sounding.twoWayTravelTime_sec',
'pingInfo.modeAndStabilisation', 'pingInfo.pulseForm', 'pingInfo.depthMode'],
'IOP': ['header.dgtime', 'runtime_txt'],
'SVP': ['time_sec', 'sensorData.depth_m', 'sensorData.soundVelocity_mPerSec']}
recs_categories_translator = {'SKM': {'sample.KMdefault.dgtime': [['attitude', 'time'], ['navigation', 'time']],
'sample.KMdefault.roll_deg': [['attitude', 'roll']],
'sample.KMdefault.pitch_deg': [['attitude', 'pitch']],
'sample.KMdefault.heave_m': [['attitude', 'heave']],
'sample.KMdefault.heading_deg': [['attitude', 'heading']],
'sample.KMdefault.latitude_deg': [['navigation', 'latitude']],
'sample.KMdefault.longitude_deg': [['navigation', 'longitude']],
'sample.KMdefault.ellipsoidHeight_m': [['navigation', 'altitude']]},
'MRZ': {'header.dgtime': [['ping', 'time']],
'cmnPart.pingCnt': [['ping', 'counter']],
'cmnPart.rxTransducerInd': [['ping', 'rxid']],
'pingInfo.soundSpeedAtTxDepth_mPerSec': [['ping', 'soundspeed']],
'pingInfo.numTxSectors': [['ping', 'ntx']],
'header.systemID': [['ping', 'serial_num']],
'txSectorInfo.txSectorNumb': [['ping', 'txsectorid']],
'txSectorInfo.tiltAngleReTx_deg': [['ping', 'tiltangle']],
'txSectorInfo.sectorTransmitDelay_sec': [['ping', 'delay']],
'txSectorInfo.centreFreq_Hz': [['ping', 'frequency']],
'sounding.beamAngleReRx_deg': [['ping', 'beampointingangle']],
'sounding.txSectorNumb': [['ping', 'txsector_beam']],
'sounding.detectionType': [['ping', 'detectioninfo']],
'sounding.qualityFactor': [['ping', 'qualityfactor_percent']],
'sounding.twoWayTravelTime_sec': [['ping', 'traveltime']],
'pingInfo.modeAndStabilisation': [['ping', 'yawpitchstab']],
'pingInfo.pulseForm': [['ping', 'mode']],
'pingInfo.depthMode': [['ping', 'modetwo']]},
'IIP': {'header.dgtime': [['installation_params', 'time']],
'install_txt': [['installation_params', 'installation_settings']]},
'IOP': {'header.dgtime': [['runtime_params', 'time']],
'runtime_txt': [['runtime_params', 'runtime_settings']]},
'SVP': {'time_sec': [['profile', 'time']],
'sensorData.depth_m': [['profile', 'depth']],
'sensorData.soundVelocity_mPerSec': [['profile', 'soundspeed']]}}
recs_categories_result = {
'attitude': {'time': None, 'roll': None, 'pitch': None, 'heave': None, 'heading': None},
'installation_params': {'time': None, 'serial_one': None, 'serial_two': None,
'installation_settings': None},
'ping': {'time': None, 'counter': None, 'rxid': None, 'soundspeed': None, 'ntx': None,
'serial_num': None, 'txsectorid': None, 'tiltangle': None, 'delay': None,
'frequency': None, 'beampointingangle': None, 'txsector_beam': None,
'detectioninfo': None, 'qualityfactor_percent': None, 'traveltime': None, 'mode': None,
'modetwo': None, 'yawpitchstab': None},
'runtime_params': {'time': None, 'runtime_settings': None},
'profile': {'time': None, 'depth': None, 'soundspeed': None},
'navigation': {'time': None, 'latitude': None, 'longitude': None, 'altitude': None}}
return recs_categories, recs_categories_translator, recs_categories_result
def _finalize_records(self, recs_to_read, recs_count):
"""
Take output from sequential_read_records and alter the type/size/translate as needed for Kluster to read and
convert to xarray. Major steps include
- adding empty arrays so that concatenation later on will work
- pad_to_dense to convert the ragged sector-wise arrays into square numpy arrays
- translate the runtime parameters from integer/binary codes to string identifiers for easy reading (and to
allow comparing results between different file types)
returns: recs_to_read, dict of dicts finalized
"""
# drop the delay array and txsector_beam array since we've already used it for adjusting ping time and building
# sector masks
recs_to_read['ping'].pop('delay')
recs_to_read['ping'].pop('txsector_beam')
# need to force in the serial number, its not in the header anymore with these kmall files...
if recs_to_read['installation_params']['installation_settings'] is not None:
inst_params = recs_to_read['installation_params']['installation_settings'][0]
if inst_params is not None:
recs_to_read['installation_params']['serial_one'] = np.array([int(inst_params['pu_serial_number'])])
# currently nothing in the record for identifying the second system in a dual head
recs_to_read['installation_params']['serial_two'] = np.array([0])
for rec in recs_to_read:
for dgram in recs_to_read[rec]:
if recs_count[rec] == 0:
if rec != 'runtime_params' or dgram == 'time':
# found no records, empty array
recs_to_read[rec][dgram] = | np.zeros(0) | numpy.zeros |
import typing as tp
import pathlib
import dataclasses
import collections
import itertools
import enum
import cv2
import imutils
import numpy as np
import shapely.geometry
import matplotlib.pyplot as plt
class colors(enum.Enum):
BLUE = (255, 0, 0)
GREEN = (0, 255, 0)
RED = (0, 0, 255)
@property
def bgr(self):
return self.value
def dist(self, other) -> float:
return np.linalg.norm(np.array(self.value) - other)
@dataclasses.dataclass
class Line:
"""Oriented Lines in R^2. """
a: float
b: float
c: float
def __post_init__(self):
if self.a == self.b == 0:
raise ValueError("Parameters 'a' and 'b' cannot both be zero.")
# Normalize -- Two lines are the same if parameters are the same up to
# a _positive_ scaling factor, left side is positive
norm = np.linalg.norm(dataclasses.astuple(self)[:2])
self.a = self.a / norm
self.b = self.b / norm
self.c = self.c / norm
def __call__(self, point) -> float:
x, y = point
return self.a * x + self.b * y + self.c
def __neg__(self) -> "Line":
return self.__class__(-self.a, -self.b, -self.c)
def parallel_line(self, point) -> "Line":
"""Returns the parallel line passing through (x, y). """
x, y = point
a, b, c = dataclasses.astuple(self)
# Orientation consistent with cross-product with pos. z-axis
return self.__class__(a, b, -a * x - b * y)
def orthogonal_line(self, point) -> "Line":
"""Returns the orthogonal line passing through (x, y). """
x, y = point
a, b, c = dataclasses.astuple(self)
# Orientation consistent with cross-product with pos. z-axis
return self.__class__(-b, a, -a * y + b * x)
def project_point(self, point):
orth = self.orthogonal_line(point)
return self ^ orth
def get_line_segment(self, image):
""" Returns the line segment that intersects the image. """
# TODO: Fix for when line more closely aligns with y-axis
m, n = image.shape[:2]
a, b, c = dataclasses.astuple(self)
x0, y0 = (0, int(-c/b))
x1, y1 = (n, int(-(a*n + c)/b))
return ((x0, y0), (x1, y1))
def __add__(self, other):
a, b, c = dataclasses.astuple(self)
return self.__class__(a, b, c + float(other))
def __sub__(self, other):
a, b, c = dataclasses.astuple(self)
return self.__class__(a, b, c - float(other))
def __ge__(self, point):
return self(point) <= 0
def __le__(self, point):
return self(point) >= 0
def __gt__(self, point):
return self(point) < 0
def __lt__(self, point):
return self(point) > 0
def __xor__(self, other):
# Intersect
A = np.vstack((dataclasses.astuple(self),
dataclasses.astuple(other)))
x = np.linalg.solve(A[:, :2], -A[:, 2])
return x
@dataclasses.dataclass
class Reader:
color_to_grayscale: int = cv2.COLOR_BGR2GRAY
grayscale_to_color: int = cv2.COLOR_GRAY2BGR
blur_kernel_size: tp.Tuple[int, int] = (3, 3)
blur_dist: int = 0
morph_kernel = np.ones((3, 3))
morph_num_iter = 1
thresh_val = 130
thresh_maxval = 255
contour_mode: int = cv2.RETR_EXTERNAL # Retreive only the external contours
contour_method: int = cv2.CHAIN_APPROX_TC89_L1 # Apply a flavor of the Teh Chin chain approx algo
draw_binary: bool = True
draw_contours: bool = True
draw_axis: bool = True
marker_min_aspect = 0.5
marker_min_area = 100
marker_max_value = 100
rectangle_approx_tol = 0.04
resample_x_max = 1.8
resample_y_max = 0.8
resample_step_x = 1/1000
resample_step_y = 1/1000
def get_image_moment(self, order: int = 1):
image = self.image
if len(image.shape) > 2:
image = np.mean(image, axis=2)
m, n = image.shape
v = np.mean(image)
x0 = np.mean(image * np.arange(n)**order) / v
y0 = np.mean(image * np.arange(m)[:, np.newaxis]**order) / v
return (x0, y0)
def get_axis(self, rectangles: tp.List[np.ndarray]):
rectangles = [r.reshape(-1, 2) for r in rectangles]
# First get best line fitting the marker centres
centres = [r.mean(axis=0) for r in rectangles]
A = np.ones((len(rectangles), 3))
for i, c in enumerate(centres):
A[i, 1:] = c
# Best line coeeficients from the approximate null-vector
svd = np.linalg.svd(A)
c, a, b = svd[-1][-1, :]
centreline = Line(a, b, c)
# Obtain a better fit using all four vertices of the rectangles
B = np.zeros((4*len(rectangles), 4))
for (i, rect) in enumerate(rectangles):
# Insert rectangle vertices
B[4 * i: 4*i + 4, 2:] = rect
for j, pt in enumerate(rect):
# Constant coefficients
B[4*i + j, int(centreline <= pt)] = 1
svd = np.linalg.svd(B)
c, d, a, b = svd[-1][-1, :]
# Equation for x-axis -- best fit for centreline
# x_axis = Line(a, b, (c + d)/2)
x_axis = Line(a, b, c) # Why not (c + d) / 2
# Get image gentroid and orient the line
image_centre = self.get_image_moment(order=1)
if x_axis >= image_centre:
x_axis = -x_axis
# Place a preliminary y-axis
y_axis_prelim = x_axis.orthogonal_line(image_centre)
# Place origin on the first marker along the oriented x-axis
origin = sorted(centres, key=y_axis_prelim)[0]
y_axis = x_axis.orthogonal_line(origin)
self.axis = (x_axis, y_axis)
self.scale = | np.linalg.norm(centres[1] - centres[0]) | numpy.linalg.norm |
"""
Copyright (C) 2016-2019 <NAME>
Licensed under Illinois Open Source License (see the file LICENSE). For more information
about the license, see http://otm.illinois.edu/disclose-protect/illinois-open-source-license.
Implement ProSRS algorithm.
"""
import numpy as np
import os, sys, pickle, shutil, warnings
import multiprocessing as mp
from pathos.multiprocessing import ProcessingPool as Pool
from timeit import default_timer
from pyDOE import lhs
from scipy.spatial.distance import cdist
from ..utility.constants import STATE_NPZ_FILE_TEMP, STATE_PKL_FILE_TEMP, STATE_NPZ_TEMP_FILE_TEMP, STATE_PKL_TEMP_FILE_TEMP
from ..utility.classes import std_out_logger, std_err_logger
from ..utility.functions import eval_func, put_back_box, scale_one_zero, scale_zero_one, eff_npt, boxify, domain_intersect, print_table
from .surrogate import RBF_reg
try:
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
except:
pass
class Optimizer:
"""
A class that handles optimization using ProSRS algorithm.
"""
def __init__(self, prob, n_worker, n_iter=None, n_iter_doe=None, n_cycle=2, resume=False,
seed=1, seed_func=None, parallel_training=False, out_dir='out'):
"""
Constructor.
Args:
prob (`prosrs.Problem` type): Optimization problem.
n_worker (int): Number of workers for the optimization.
This also determines the number of proposed or evaluated
points per iteration.
n_iter (int or None, optional): Total number of iterations for the optimization.
If ``int``, the optimization will terminate upon finishing running `n_iter` iterations.
If None, then we use number of optimization cycles `n_cycle` as the termination condition.
n_iter_doe (int or None, optional): Number of iterations for DOE.
If None, then we use default value.
n_cycle (int, optional): Total number of optimization sycles.
This parameter takes effect only when `n_iter` is None, and
the optimization will terminate upon finishing `n_cycle` cycles.
resume (bool, optional): Whether to resume from the last run.
The information of the last run will be read from the directory `out_dir`.
seed (int or None, optional): Random seed for the optimizer.
If None, then we do not set random seed for the optimization.
seed_func (callable or None, optional): User-specified function for setting the random seed for evaluations.
If None, then we use ``numpy.random.seed(seed)`` method to set random seed.
If callable, it is a function taking ``seed`` as an argument.
Note: using ``numpy.random.seed`` may not always gaurantee
the reproducibility. Here we provide an option for users to specify their own routines.
parallel_training (bool, optional): Whether to train a RBF surrogate in parallel.
If True, then we will use all the available processes (cores) during training.
We use `pathos.multiprocessing` module for parallelism. Our tests have
shown that depending on the machine and the optimization function,
sometimes parallelism may cause memory issues. So we disable it by default.
out_dir (str, optional): Output directory.
All the output files (e.g., optimization status file) will be saved to
`out_dir` directory.
"""
# class members (convention: variables wih prefix '_' are constant parameters during optimization).
self._prob = prob # optimization problem
self._dim = prob.dim # dimention of optimization problem.
self._n_worker = n_worker # number of workers.
self._n_iter = n_iter # total number of iterations.
self._n_cycle = n_cycle # total number of optimization cycles.
self._resume = resume # whether to resume from the last run.
self._seed = seed # random seed for the optimizer.
self._seed_func = seed_func # function for setting random seed for evaluations.
self._parallel_training = parallel_training # whether to use parallelism for training RBF models.
self._out_dir = out_dir # output directory.
self._n_cand_fact = 1000 # number of candidate = self._n_cand_fact * self._dim.
self._wgt_pat_bd = [0.3, 1.] # the bound of the weight pattern in the SRS method.
self._normalize_data = True # whether to normalize data when training RBF regression model.
self._init_gamma = 0. # initial weight exponent in the SRS method (>=0). If zero, then disable weighting in RBF regression.
self._delta_gamma = 2. # amount of decrease of weight exponent whenever failure occurs.
self._init_sigma = 0.1 # initial sigma value in the SRS method (controls initial spread of Type II candidate points).
self._max_n_reduce_sigma = 2 # max number of times of halving self.sigma before zooming in/restart. Critical sigma value = self._init_sigma * 0.5**self._max_n_reduce_sigma.
self._rho = 0.4 # zoom-in factor. Must be in (0, 1).
self._init_p = 1. # initial p value in the SRS method (controls initial proportion of Type I candidate points).
self._init_beta = 0.02 # initial beta value (= initial probability of zooming out).
self._min_beta = 0.01 # minimum beta value (= minimum probability of zooming out).
self._alpha = 1. # parameter controlling decreasing rate of p value: p = p*n_{eff}**(-self._alpha/self.dim).
self._lambda_range = [1e-7, 1e4] # range of regularization parameter in RBF regression, over which we determine regularization constant in L2 regularization.
self._rbf_kernel = 'multiquadric' # RBF kernel. See `scipy.interpolate.Rbf <https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.interpolate.Rbf.html>`.
self._rbf_poly_deg = 0 # degree of RBF polynomial tail (either 0 or 1). If zero, then no polynomial tail.
self._n_fold = 5 # number of folds for the cross validation in training a RBF model.
self._resol = 0.01 # resolution parameter for determining whether to restart.
self._use_eff_npt = True # whether to use effective number of points for the dynamics of p value.
self._max_C_fail = max(2, int(np.ceil(self._dim/float(self._n_worker)))) # maximum number of consecutive failures before halving self.sigma value.
self._n_iter_doe = int(np.ceil(3/float(self._n_worker))) if n_iter_doe is None else n_iter_doe # number of iterations for DOE.
self._n_iter_doe = min(self._n_iter, self._n_iter_doe) if self._n_iter is not None else self._n_iter_doe # adjusted for the fact that self._n_iter_doe <= self._n_iter.
self._n_doe_samp = self._n_iter_doe * self._n_worker # number of DOE samples
self._n_cand = self._n_cand_fact * self._dim # number of candidate points in SRS method
self._pool_rbf = Pool(nodes=mp.cpu_count()) if self._parallel_training else None # parallel pool used for RBF regression
self._state_npz_file = os.path.join(self._out_dir, STATE_NPZ_FILE_TEMP % self._prob.name) # file that saves optimizer state (needed for resume)
self._state_pkl_file = os.path.join(self._out_dir, STATE_PKL_FILE_TEMP % self._prob.name) # file that saves optimizer state (needed for resume)
self._state_npz_lock_file = self._state_npz_file+'.lock' # a lock file that may be generated in some system, which prevents reading data from `self._state_npz_file`.
self._state_pkl_lock_file = self._state_pkl_file+'.lock' # a lock file that may be generated in some system, which prevents reading data from `self._state_pkl_file`.
self._state_npz_temp_file = os.path.join(self._out_dir, STATE_NPZ_TEMP_FILE_TEMP % self._prob.name) # a temporary file that holds data for `self._state_npz_file`.
self._state_pkl_temp_file = os.path.join(self._out_dir, STATE_PKL_TEMP_FILE_TEMP % self._prob.name) # a temporary file that holds data for `self._state_pkl_file`.
self._verbose_dot_len = 10 # number of dots to display for verbose messages.
# sanity check
assert(type(self._n_worker) is int and self._n_worker > 0)
assert(0 <= self._wgt_pat_bd[0] <= self._wgt_pat_bd[1] <= 1 and len(self._wgt_pat_bd) == 2)
assert(self._delta_gamma >= 0)
assert(type(self._max_n_reduce_sigma) is int and self._max_n_reduce_sigma >= 0)
assert(0 < self._rho < 1)
assert(0 <= self._init_p <= 1)
assert(0 <= self._min_beta <= self._init_beta <= 1)
assert(self._alpha > 0)
assert(0 < self._lambda_range[0] <= self._lambda_range[1] and len(self._lambda_range) == 2)
assert(type(self._rbf_poly_deg) is int and self._rbf_poly_deg in [0, 1])
assert(type(self._n_fold) is int and self._n_fold > 1)
assert(0 < self._resol < 1)
assert(type(self._max_C_fail) is int and self._max_C_fail > 0)
assert(type(self._n_iter_doe) is int and self._n_iter_doe > 0)
assert(type(self._n_cand) is int and self._n_cand >= self._n_worker)
if type(self._n_iter) is int:
assert(self._n_iter >= 0)
else:
assert(self._n_iter is None), 'n_iter is either an integer or None.'
assert(type(self._n_cycle) is int and self._n_cycle >= 0)
# create output directory, if not existent
if not os.path.isdir(self._out_dir):
os.makedirs(self._out_dir)
# initialize the state of the optimizer
if not self._resume:
np.random.seed(self._seed) # set random seed.
self.i_iter = 0 # iteration index (how many iterations have been run)
self.i_cycle = 0 # number of optimization cycles that have been completed
self.doe_samp = self.doe() # DOE samples
self.i_iter_doe = 0 # iteration index during DOE phase (how many DOE iterations have been run in current cycle)
self.t_build_arr = np.zeros(0) # time of building a RBF model for each iteration. If an iteration is DOE, = 0.
self.t_srs_arr = np.zeros(0) # time of running SRS method for each iteration. If an iteration is DOE, = 0.
self.t_prop_arr = np.zeros(0) # time of proposing new points for each iteration.
self.t_eval_arr = np.zeros(0) # time of evaluating proposed points for each iteration.
self.t_update_arr = np.zeros(0) # time of updating the optimizer state for each iteration.
self.gSRS_pct_arr = np.zeros(0) # pertentage of global SRS (= percentage of Type I candidates = floor(10*p_val)/10.) for each iteration.
self.zoom_lv_arr = np.zeros(0) # zoom level at the time of proposing new points for each iteration.
self.x_tree = np.zeros((0, self._dim)) # all the evaluated (proposed) points so far in a tree.
self.y_tree = np.zeros(0) # (noisy) y values of `self.x_tree`.
self.x_all = np.zeros((0, self._dim)) # all the evaluated (proposed) points so far in the course of optimization.
self.y_all = np.zeros(0) # (noisy) y values of `self.x_all`.
self.seed_all = np.zeros(0) # random seeds for points in `self.x_all`.
self.best_x = np.ones(self._dim)*np.nan # best point so far.
self.best_y = np.nan # (noisy) y value of the best point `self.best_x`.
self.zoom_lv = 0 # zoom level (zero-based).
self.act_node_ix = 0 # index of the activate node for the zoom level `self.zoom_lv` (zero-based).
self.srs_wgt_pat = np.linspace(self._wgt_pat_bd[0], self._wgt_pat_bd[1], self._n_worker) # weight pattern in the SRS method.
self.tree = self.init_tree() # initialize optimization tree
self.eval_seeds = self._seed+1+np.arange(self.i_iter*self._n_worker, (self.i_iter+1)*self._n_worker, dtype=int) # random seeds for parallel evaluations
else:
# load optimizer state from the last run
self.load_state()
def show(self, select=['problem', 'config', 'status', 'result', 'post_result'], n_top=1):
"""
Display the optimizer info.
Args:
select (list or tuple, optional): Select which info will be displayed.
Possible values and their meanings:
'problem': optimization problem to be solved.
'config': optimization configuration.
'status': optimization status.
'result': optimization results.
'post_result': posterior evaluation results.
n_top (int, optional): Number of top points to be displayed for optimzation results/posterior evaluation results.
This parameter takes effects only when `select` contains 'result' or 'post_result'.
"""
select_possible_vals = ['problem', 'config', 'status', 'result', 'post_result']
assert(type(select) in [list, tuple] and set(select)<=set(select_possible_vals)), 'invalid select value'
if 'problem' in select:
print(self._prob)
if 'config' in select:
print('Optimization configuration:')
print('- Number of workers: %d' % self._n_worker)
if self._n_iter is None:
print('- Termination criterion: stop after completing %d optimization cycles' % self._n_cycle)
else:
print('- Termination criterion: stop after completing %d iterations' % self._n_iter)
is_resume = 'Yes' if self._resume else 'No'
print('- Resuming from the last run? %s' % is_resume)
print("- Output directory: '%s'" % self._out_dir)
if 'status' in select:
print('Optimization status:')
print('- Number of iterations completed: %d' % self.i_iter)
print('- Number of cycles completed: %d' % self.i_cycle)
print('- Zoom level of current node: %d' % self.zoom_lv) # note: zoom level of root node is zero
node_domain = self.tree[self.zoom_lv][self.act_node_ix]['domain']
print('- Domain of current node: %s' % ('{'+', '.join(["'%s': %s" % (x, str(tuple(v))) for x, v in \
zip(self._prob.x_var, node_domain)])+'}'))
if 'result' in select:
assert(type(n_top) is int and n_top > 0)
top_pt, top_val, n_top = self.top_pts(n_top=n_top)
print('Optimization result:')
print('- Best point:')
print(' '+', '.join(['%s = %g' % (x, v) for x, v in zip(self._prob.x_var, self.best_x)]))
print('- Best (noisy) value:')
print(' %s = %g' % (self._prob.y_var, self.best_y))
if n_top > 1:
# display top points
print('- Top %d points sorted by (noisy) function values (Column %d):' % (n_top, self._dim+1))
table_data = np.hstack((top_pt, top_val.reshape((-1, 1))))
col_name = self._prob.x_var+[self._prob.y_var]
print_table(table_data, col_name=col_name)
if 'post_result' in select:
assert(type(n_top) is int and n_top > 0)
top_pt, top_mean_val, top_std_val, n_top = self.post_top_pts(n_top=n_top)
# display posterior best point and its value
print('Posterior evaluation results:')
print('- Condition: run ProSRS algorithm for %d iterations, then run posterior evaluations with %d Monte Carlo repeats'
% (self.i_iter_posterior_eval, self.posterior_eval_y.shape[1]))
print('- Best point:')
print(' '+', '.join(['%s = %g' % (x, v) for x, v in zip(self._prob.x_var, self.post_best_x)]))
print('- Best (mean) value:')
print(' %s = %g' % (self._prob.y_var, self.post_best_y))
if n_top > 1:
# display top points
print('- Top %d points sorted by Monte Carlo mean estimates (Column %d):' % (n_top, self._dim+1))
table_data = np.hstack((top_pt, top_mean_val.reshape((-1, 1)), top_std_val.reshape((-1, 1))))
col_name = self._prob.x_var+['mean of '+self._prob.y_var, 'std of '+self._prob.y_var]
print_table(table_data, col_name=col_name)
def run(self, std_out_file=None, std_err_file=None, verbosity=1):
"""
Run ProSRS algorithm.
Args:
std_out_file (str or None, optional): Standard output file path.
If ``str``, then standard outputs will be directed to the file `std_out_file`.
If None, then standard outputs will not be saved to a file.
std_err_file (str or None, optional): Standard error file path.
If ``str``, then standard errors will be directed to the file `std_err_file`.
If None, then standard errors will not be saved to a file.
verbosity (int, optional): Level of verbosity (0-2) while running the algorithm.
If zero, then no verbose. The larger this value is, the more information
will be displayed.
"""
assert(0 <= verbosity <= 2)
# log standard outputs and standard errors.
# here we write to a new file if we do not resume. Otherwise, we append to the old file.
if std_out_file is not None:
orig_std_out = sys.stdout
if not self._resume:
if os.path.isfile(std_out_file):
os.remove(std_out_file)
sys.stdout = std_out_logger(std_out_file)
if std_err_file is not None:
orig_std_err = sys.stderr
if not self._resume:
if os.path.isfile(std_err_file):
os.remove(std_err_file)
sys.stderr = std_err_logger(std_err_file)
if verbosity == 2:
print('')
self.show(select=['problem']) # show optimization problem
print('')
self.show(select=['config']) # show optimization configuration
if verbosity > 0:
print('\nStart running ProSRS algorithm ...')
# main loop
while not self.is_done():
if verbosity > 0:
print('\nIteration %d:' % (self.i_iter+1))
# propose new points
new_pt = self.propose(verbose=verbosity>0)
# evaluate proposed points
new_val = self.eval_pt(new_pt, verbose=verbosity>0)
# update optimizer state with the new evaluations
self.update(new_pt, new_val, verbose=verbosity>0)
if verbosity == 2:
self.show(select=['status']) # show optimization status
# flush standard outputs and standard errors to files
if std_out_file is not None:
sys.stdout.terminal.flush()
sys.stdout.log.flush()
if std_err_file is not None:
sys.stderr.terminal.flush()
sys.stderr.log.flush()
if verbosity > 0:
print('\nFinished running ProSRS algorithm.')
if verbosity == 2:
print('')
self.show(select=['result']) # show optimization result
# reset stdout and stderr
if std_out_file is not None:
sys.stdout = orig_std_out
if std_err_file is not None:
sys.stderr = orig_std_err
def doe(self, criterion='maximin'):
"""
Design of experiments using Latin Hypercube Sampling (LHS).
Args:
criterion (str, optional): Sampling criterion for LHS.
For details, see `pyDOE documentation <https://pythonhosted.org/pyDOE/randomized.html>`.
Returns:
samp (2d array): LHS samples. Each row is one sample.
"""
unit_X = lhs(self._dim, samples=self._n_doe_samp, criterion=criterion) # unit_X: 2d array in unit cube
samp = np.zeros_like(unit_X)
for i in range(self._dim):
samp[:, i] = unit_X[:, i]*(self._prob.domain[i][1]-self._prob.domain[i][0])+self._prob.domain[i][0] # scale and shift
return samp
def init_tree(self):
"""
Initialize an optimization tree.
Returns:
tree (dict): Initial tree.
"""
tree = {self.zoom_lv: [{'ix': np.arange(self._n_doe_samp, dtype=int), # indice of data for the tree node (w.r.t. `self.x_tree` or `self.y_tree`).
'domain': self._prob.domain, # domain of the tree node.
'parent_ix': None, # parent node index for the upper zoom level (zero-based). If None, there's no parent.
'beta': self._init_beta, # zoom-out probability.
'state': self.init_node_state() # state of the tree node.
}]}
return tree
def init_node_state(self):
"""
Initialize the state of a node of the optimization tree.
Returns:
state (dict): Values of state variables.
"""
state = {'p': self._init_p, # p value in the SRS method (controls proportion of Type I candidate points).
'Cr': 0, # counter that counts number of times of reducing the sigma value of local SRS.
'Cf': 0, # counter that counts number of consecutive failures.
'gamma': self._init_gamma # weight exponent parameter of weighted RBF
}
return state
def propose(self, verbose=True):
"""
Propose new points for the next iteration.
Args:
verbose (bool, optional): Whether to verbose about proposing new points.
Returns:
new_pt (2d array): Proposed new points. Each row is one point.
"""
tt1 = default_timer()
if self.i_iter_doe < self._n_iter_doe:
if verbose:
sys.stdout.write('Proposing new points (DOE) '+'.'*self._verbose_dot_len)
t1 = default_timer()
# i.e., current iteration is in DOE phase
new_pt = self.doe_samp[self.i_iter_doe*self._n_worker:(self.i_iter_doe+1)*self._n_worker]
# the following variables will be saved in the `self.update` method
# so we need to set them appropriately
self.gSRS_pct = np.nan
self.t_build = np.nan
self.t_srs = np.nan
t2 = default_timer()
if verbose:
sys.stdout.write(' Done (time took: %.2e sec).\n' % (t2-t1))
else:
# i.e., current iteration is in the optimization phase (not in the DOE phase)
########### Get activated node ##############
self.act_node = self.tree[self.zoom_lv][self.act_node_ix]
self.x_node = self.x_tree[self.act_node['ix']] # get X data of the node
self.y_node = self.y_tree[self.act_node['ix']] # get Y data of the node
self.p_val = self.act_node['state']['p']
self.n_reduce_sigma = self.act_node['state']['Cr']
self.n_fail = self.act_node['state']['Cf']
self.gamma = self.act_node['state']['gamma']
self.gSRS_pct = np.floor(10*self.p_val)/10. # pertentage of global SRS (= percentage of Type I candidates)
self.sigma = self._init_sigma*0.5**self.n_reduce_sigma
########### Build RBF surrogate model ##############
if verbose:
sys.stdout.write('Building RBF regression model '+'.'*self._verbose_dot_len)
t1 = default_timer()
self.rbf_mod, _, _, _ = RBF_reg(self.x_node, self.y_node, self._lambda_range,
normalize_data=self._normalize_data, wgt_expon=self.gamma,
n_fold=self._n_fold, kernel=self._rbf_kernel,
poly_deg=self._rbf_poly_deg, pool=self._pool_rbf)
t2 = default_timer()
self.t_build = t2-t1
if verbose:
sys.stdout.write(' Done (time took: %.2e sec).\n' % self.t_build)
########### Propose new points using SRS method ##############
if verbose:
sys.stdout.write('Proposing new points '+'.'*self._verbose_dot_len)
t1 = default_timer()
new_pt = self.SRS()
t2 = default_timer()
self.t_srs = t2-t1
if verbose:
sys.stdout.write(' Done (time took: %.2e sec).\n' % self.t_srs)
tt2 = default_timer()
self.t_prop = tt2-tt1
return new_pt
def SRS(self):
"""
Propose new points using SRS method.
Returns:
new_pt (2d array): Proposed points.
"""
# generate candidate points
if self.gSRS_pct == 1:
# generate candidate points uniformly (global SRS)
cand_pt = np.zeros((self._n_cand, self._dim))
for d, bd in enumerate(self.act_node['domain']):
cand_pt[:, d] = np.random.uniform(low=bd[0], high=bd[1], size=self._n_cand)
else:
n_cand_gSRS = int(np.round(self._n_cand*self.gSRS_pct)) # number of candidate points for global SRS
n_cand_lSRS = self._n_cand-n_cand_gSRS # number of candidate points for local SRS
assert(n_cand_lSRS > 0) # sanity check
# generate candidate points uniformly (global SRS)
cand_pt_gSRS = np.zeros((n_cand_gSRS, self._dim))
if n_cand_gSRS > 0:
for d, bd in enumerate(self.act_node['domain']):
cand_pt_gSRS[:, d] = np.random.uniform(low=bd[0], high=bd[1], size=n_cand_gSRS)
# find x_star
Y_fit = self.rbf_mod(self.x_node)
min_ix = np.argmin(Y_fit)
x_star = self.x_node[min_ix]
assert(np.all([bd[0] <= x_star[j] <= bd[1] for j,bd in enumerate(self.act_node['domain'])])) # sanity check
# find step size (i.e. std) for each coordinate of `x_star`
step_size_arr = np.array([self.sigma*(bd[1]-bd[0]) for bd in self.act_node['domain']])
assert(np.min(step_size_arr) > 0) # sanity check
# generate candidate points (Gaussian about x_star, local SRS)
cand_pt_lSRS = np.random.multivariate_normal(x_star, np.diag(step_size_arr**2), n_cand_lSRS)
# combine two types of candidate points
comb_cand_pt = np.vstack((cand_pt_gSRS, cand_pt_lSRS))
# put candidate points back to the domain, if there's any outside
uniq_cand_pt, raw_cand_pt = put_back_box(comb_cand_pt, self.act_node['domain'])
# get candidate points (``len(uniq_cand_pt) < n_worker`` is pathological case, almost never encountered in practice)
cand_pt = uniq_cand_pt if len(uniq_cand_pt) >= self._n_worker else raw_cand_pt
# select new points from candidate points
n_cand = len(cand_pt)
assert(n_cand >= self._n_worker)
resp_cand = self.rbf_mod(cand_pt)
resp_score = scale_zero_one(resp_cand) # response score
# initializations
new_pt = np.zeros((self._n_worker, self._dim))
refer_pt = self.x_node.copy() # reference points based on which we compute distance scores
# select points sequentially
for j in range(self._n_worker):
wt = self.srs_wgt_pat[j]
if len(refer_pt) > 0:
if j == 0:
# distance matrix for `refer_pt` and `cand_pt`
dist_mat = cdist(cand_pt, refer_pt)
dist_cand = np.amin(dist_mat, axis=1)
else:
# distance to the previously proposed point
dist_prop_pt = cdist(cand_pt, new_pt[j-1].reshape((1, -1))).flatten()
dist_cand = np.minimum(dist_cand, dist_prop_pt)
dist_score = scale_one_zero(dist_cand) # distance score
else:
# pathological case
dist_score = np.zeros(n_cand) # distance score
cand_score = resp_score*wt+(1-wt)*dist_score # candidate score
assert (np.amax(cand_score)<=1 and np.amin(cand_score)>=0) # sanity check
# select the best one based on the score
min_ix = np.argmin(cand_score)
new_pt[j] = cand_pt[min_ix]
# update variables
refer_pt = np.vstack((refer_pt, new_pt[j].reshape((1, -1))))
dist_cand = np.delete(dist_cand, min_ix)
resp_score = np.delete(resp_score, min_ix)
cand_pt = np.delete(cand_pt, min_ix, axis=0)
n_cand -= 1
return new_pt
def eval_pt(self, x, verbose=True):
"""
Evaluate proposed points.
Args:
x (2d array): Points to be evaluated. Each row is one point.
verbose (bool, optional): Whether to verbose about the evaluation.
Returns:
y (1d array): Evaluations of points in `x`.
"""
if verbose:
sys.stdout.write('Evaluating proposed points '+'.'*self._verbose_dot_len)
t1 = default_timer()
assert(callable(self._prob.f)), 'Error! Unable to perform evaluations. Please first define the (noisy) optimization function ``f`` in the ``Problem`` object.'
y = eval_func(self._prob.f, x, n_proc=self._n_worker, seeds=self.eval_seeds.tolist(),
seed_func=self._seed_func)
t2 = default_timer()
self.t_eval = t2-t1
if verbose:
sys.stdout.write(' Done (time took: %.2e sec).\n' % self.t_eval)
return y
def update(self, new_x, new_y, verbose=True):
"""
Update the state of the optimizer.
Args:
new_x (2d array): Proposed new points. Each row is one point.
new_y (1d array): (Noisy) values of the points in `new_x`.
verbose (bool, optional): Whether to verbose about updating the state of the optimizer.
"""
if verbose:
sys.stdout.write('Updating optimizer state '+'.'*self._verbose_dot_len)
specific_msg = '' # specific message indicating the action
t1 = default_timer()
self.i_iter += 1
self.eval_seeds = self._seed+1+np.arange(self.i_iter*self._n_worker, (self.i_iter+1)*self._n_worker, dtype=int)
self.x_tree = np.vstack((self.x_tree, new_x))
self.y_tree = np.append(self.y_tree, new_y)
self.x_all = np.vstack((self.x_all, new_x))
self.y_all = np.append(self.y_all, new_y)
self.seed_all = np.append(self.seed_all, self.eval_seeds)
min_ix = np.argmin(self.y_all)
self.best_x = self.x_all[min_ix]
self.best_y = self.y_all[min_ix]
self.t_build_arr = np.append(self.t_build_arr, self.t_build)
self.t_srs_arr = np.append(self.t_srs_arr, self.t_srs)
self.t_prop_arr = np.append(self.t_prop_arr, self.t_prop)
try:
self.t_eval_arr = np.append(self.t_eval_arr, self.t_eval)
except:
# i.e., self.t_eval is not defined. This could happen when one uses customized evaluation function.
self.t_eval_arr = np.append(self.t_eval_arr, np.nan)
self.gSRS_pct_arr = np.append(self.gSRS_pct_arr, self.gSRS_pct)
self.zoom_lv_arr = np.append(self.zoom_lv_arr, self.zoom_lv)
if self.i_iter_doe < self._n_iter_doe: # i.e., current iteration is in DOE phase
self.i_iter_doe += 1
else:
# update weight pattern in SRS method
if self._n_worker == 1:
self.srs_wgt_pat = np.array([self._wgt_pat_bd[0]]) if self.srs_wgt_pat[0] == self._wgt_pat_bd[1] \
else np.array([self._wgt_pat_bd[1]]) # alternating weights
# update tree node
npt = len(self.x_tree)
self.act_node['ix'] = np.append(self.act_node['ix'], np.arange(npt-self._n_worker, npt, dtype=int))
if self._n_worker > 1 or (self._n_worker == 1 and self.srs_wgt_pat[0] == self._wgt_pat_bd[0]):
if self.p_val >= 0.1:
# compute p_val
if self._use_eff_npt:
eff_n = eff_npt(self.x_tree[self.act_node['ix']], self.act_node['domain'])
else:
eff_n = len(self.x_tree[self.act_node['ix']])
self.p_val = self.p_val*eff_n**(-self._alpha/float(self._dim))
if self.gSRS_pct == 0: # i.e. pure local SRS
best_Y_prev = np.min(self.y_node)
best_Y_new = np.min(new_y) # minimum of Y values of newly proposed points
if best_Y_prev <= best_Y_new: # failure
self.n_fail += 1 # count failure
else:
self.n_fail = 0
if self.n_fail == self._max_C_fail:
self.n_fail = 0
self.gamma -= self._delta_gamma
self.n_reduce_sigma += 1 # update counter
self.act_node['state']['p'] = self.p_val
self.act_node['state']['Cr'] = self.n_reduce_sigma
self.act_node['state']['Cf'] = self.n_fail
self.act_node['state']['gamma'] = self.gamma
if self.n_reduce_sigma > self._max_n_reduce_sigma:
# then we either restart or zoom-in (i.e., critical state is reached)
Y_fit = self.rbf_mod(self.x_tree[self.act_node['ix']])
min_ix = np.argmin(Y_fit)
x_star = self.x_tree[self.act_node['ix']][min_ix]
# suppose we're going to zoom in
child_node_ix = self.get_child_node(x_star)
if child_node_ix is None:
# then we create a new child (if zoom in)
domain_lb, domain_ub = zip(*self.act_node['domain'])
blen = np.array(domain_ub)-np.array(domain_lb) # bound length for each dimension
assert(np.min(blen)>0)
domain_lb = np.maximum(x_star-self._rho/2.*blen, domain_lb)
domain_ub = np.minimum(x_star+self._rho/2.*blen, domain_ub)
domain = list(zip(domain_lb, domain_ub)) # the list function is used to ensure compatibility of python3
child_node = {'ix': np.nonzero(boxify(self.x_tree, domain)[0])[0],
'domain': domain,
'parent_ix': self.act_node_ix,
'beta': self._init_beta,
'state': self.init_node_state()}
else:
# then we activate an existing child node (if zoom in)
child_node = self.tree[self.zoom_lv+1][child_node_ix]
child_node['ix'] = np.nonzero(boxify(self.x_tree, child_node['domain'])[0])[0]
child_npt = len(child_node['ix'])
domain_lb, domain_ub = zip(*child_node['domain'])
blen = np.array(domain_ub)-np.array(domain_lb) # bound length for each dimension
assert(np.min(blen)>0)
if np.all(blen*child_npt**(-1./self._dim) < (self._prob.domain_ub-self._prob.domain_lb)*self._resol): # resolution condition
# then we restart
if verbose:
specific_msg += 'Restart for the next iteration!\n'
self.i_iter_doe = 0
self.doe_samp = self.doe()
self.i_cycle += 1
self.zoom_lv = 0
self.act_node_ix = 0
self.x_tree = np.zeros((0, self._dim))
self.y_tree = np.zeros(0)
self.tree = self.init_tree()
else:
# then we zoom in
self.act_node['state'] = self.init_node_state() # reset the state of the current node
self.zoom_lv += 1
if child_node_ix is None:
# then we create a new child
if self.zoom_lv not in self.tree.keys():
self.act_node_ix = 0
self.tree[self.zoom_lv] = [child_node]
else:
self.act_node_ix = len(self.tree[self.zoom_lv])
self.tree[self.zoom_lv].append(child_node)
if verbose:
specific_msg += 'Zoom in (created a new child node)!\n'
else:
# then activate existing child node
self.act_node_ix = child_node_ix
# reduce zoom-out probability
child_node['beta'] = max(self._min_beta, child_node['beta']/2.)
if verbose:
specific_msg += 'Zoom in (activated an existing child node)!\n'
if self._n_worker > 1 or (self._n_worker == 1 and self.srs_wgt_pat[0] == self._wgt_pat_bd[0]):
if np.random.uniform() < self.tree[self.zoom_lv][self.act_node_ix]['beta'] and self.zoom_lv > 0 and self.i_iter_doe >= self._n_iter_doe:
# then we zoom out
child_node = self.tree[self.zoom_lv][self.act_node_ix]
self.act_node_ix = child_node['parent_ix']
self.zoom_lv -= 1
assert(self.act_node_ix is not None)
# check that the node after zooming out will contain the current node
assert(domain_intersect(self.tree[self.zoom_lv][self.act_node_ix]['domain'], child_node['domain']) == child_node['domain'])
if verbose:
specific_msg += 'Zoom out!\n'
t2 = default_timer()
t_update = t2-t1
self.t_update_arr = np.append(self.t_update_arr, t_update)
if verbose:
sys.stdout.write(' Done (time took: %.2e sec).\n' % t_update+specific_msg)
self.save_state()
def is_done(self):
"""
Check whether we are done with the optimization.
Returns:
done (bool): Indicator.
"""
if self._n_iter is None:
assert(self.i_cycle <= self._n_cycle)
done = self.i_cycle == self._n_cycle
else:
assert(self.i_iter <= self._n_iter)
done = self.i_iter == self._n_iter
return done
def get_child_node(self, x_star):
"""
Get the child node for the optimization tree.
Args:
x_star (1d array): Focal point of zoom-in.
Returns:
child_ix (int or None): Selected child node index.
If None, then we need to create a new child.
"""
assert(x_star.ndim == 1)
# get zoom level of a child node
n_zoom_child = self.zoom_lv+1
if n_zoom_child not in self.tree.keys():
child_ix = None
else:
# the indice of candidate child nodes
child_node_ix_list = [i for i, c in enumerate(self.tree[n_zoom_child]) if all(boxify(x_star.reshape((1, -1)), c['domain'])[0])]
if len(child_node_ix_list) == 0:
child_ix = None
else:
# find the child node among the candidates, of which the center of the domain is closest to x_star
dist_list = [np.linalg.norm(np.mean(self.tree[n_zoom_child][i]['domain'])-x_star) for i in child_node_ix_list]
child_ix = child_node_ix_list[np.argmin(dist_list)]
return child_ix
def save_state(self):
"""
Save the state of the optimizer to files.
"""
# save state to pickle file
with open(self._state_pkl_temp_file, 'wb') as f:
pickle.dump(np.random.get_state(), f) # first save to temporary file, preventing data loss due to termination during execution of `pickl.dump`
shutil.copy2(self._state_pkl_temp_file, self._state_pkl_file) # create a new or overwrite the old `self._state_pkl_file`
os.remove(self._state_pkl_temp_file) # remove temporary file
# save state to npz file
np.savez(self._state_npz_temp_file,
# constant parameters
_dim=self._dim, _n_worker=self._n_worker, _n_iter=self._n_iter, _n_cycle = self._n_cycle,
_resume=self._resume, _seed=self._seed, _parallel_training=self._parallel_training, _out_dir=self._out_dir,
_n_cand_fact=self._n_cand_fact, _wgt_pat_bd=self._wgt_pat_bd, _normalize_data=self._normalize_data,
_init_gamma=self._init_gamma, _delta_gamma=self._delta_gamma, _init_sigma=self._init_sigma,
_max_n_reduce_sigma=self._max_n_reduce_sigma, _rho=self._rho, _init_p=self._init_p,
_init_beta=self._init_beta, _min_beta=self._min_beta, _alpha=self._alpha, _lambda_range=self._lambda_range,
_rbf_kernel=self._rbf_kernel, _rbf_poly_deg=self._rbf_poly_deg, _n_fold=self._n_fold, _resol=self._resol,
_use_eff_npt=self._use_eff_npt, _max_C_fail=self._max_C_fail, _n_iter_doe=self._n_iter_doe,
_n_doe_samp=self._n_doe_samp, _n_cand=self._n_cand, _state_npz_file=self._state_npz_file,
_state_pkl_file=self._state_pkl_file, _state_npz_lock_file=self._state_npz_lock_file,
_state_pkl_lock_file=self._state_pkl_lock_file, _state_npz_temp_file=self._state_npz_temp_file,
_state_pkl_temp_file=self._state_pkl_temp_file, _verbose_dot_len=self._verbose_dot_len,
# state variables
i_iter=self.i_iter, i_cycle=self.i_cycle, doe_samp=self.doe_samp, i_iter_doe=self.i_iter_doe,
t_build_arr=self.t_build_arr, t_srs_arr=self.t_srs_arr, t_prop_arr=self.t_prop_arr,
t_eval_arr=self.t_eval_arr, t_update_arr=self.t_update_arr, gSRS_pct_arr=self.gSRS_pct_arr,
zoom_lv_arr=self.zoom_lv_arr, x_tree=self.x_tree, y_tree=self.y_tree, x_all=self.x_all,
y_all=self.y_all, seed_all=self.seed_all, best_x=self.best_x, best_y=self.best_y,
zoom_lv=self.zoom_lv, act_node_ix=self.act_node_ix, srs_wgt_pat=self.srs_wgt_pat, tree=self.tree,
eval_seeds=self.eval_seeds, py_version=sys.version_info[0])
shutil.copy2(self._state_npz_temp_file, self._state_npz_file)
os.remove(self._state_npz_temp_file) # remove temporary file
def load_state(self):
"""
Load the state of the optimizer from files.
"""
# load state data from pkl file
if os.path.isfile(self._state_pkl_lock_file):
os.remove(self._state_pkl_lock_file) # remove lock file, if there's any
try:
with open(self._state_pkl_file, 'rb') as f:
np.random.set_state(pickle.load(f))
except Exception as e:
alter_py = 3 if sys.version_info[0] == 2 else 2 # alternative python version
sys.exit("Error! Unable to load random state from the file: '%s'. \nError message: %s. \nExplanation: this issue was probably caused by inconsistent Python versions (e.g., first ran optimization with Python%d and then resumed the optimization with Python%d). \nSolution: now try to run the code again with Python%d." \
% (self._state_pkl_file, e, alter_py, sys.version_info[0], alter_py))
# load state data from npz file
if os.path.isfile(self._state_npz_lock_file):
os.remove(self._state_npz_lock_file) # remove lock file, if there's any
try:
data = np.load(self._state_npz_file)
except Exception as e:
alter_py = 3 if sys.version_info[0] == 2 else 2 # alternative python version
sys.exit("Error! Unable to load data from the file: '%s'. \nError message: %s. \nExplanation: this issue was probably caused by inconsistent Python versions (e.g., first ran optimization with Python%d and then resumed the optimization with Python%d). \nSolution: now try to run the code again with Python%d." \
% (self._state_npz_file, e, alter_py, sys.version_info[0], alter_py))
# check consistency
assert(self._dim==data['_dim'] and self._n_worker==data['_n_worker'] and self._n_cand_fact==data['_n_cand_fact']
and | np.all(self._wgt_pat_bd==data['_wgt_pat_bd']) | numpy.all |
#!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
from rdkit import Chem
from rdkit import rdBase
from rdkit.Chem import AllChem
from rdkit import DataStructs
from sklearn import svm
import time
import pickle
import re
import threading
import pexpect
rdBase.DisableLog('rdApp.error')
"""Scoring function should be a class where some tasks that are shared for every call
can be reallocated to the __init__, and has a __call__ method which takes a single SMILES of
argument and returns a float. A multiprocessing class will then spawn workers and divide the
list of SMILES given between them.
Passing *args and **kwargs through a subprocess call is slightly tricky because we need to know
their types - everything will be a string once we have passed it. Therefor, we instead use class
attributes which we can modify in place before any subprocess is created. Any **kwarg left over in
the call to get_scoring_function will be checked against a list of (allowed) kwargs for the class
and if a match is found the value of the item will be the new value for the class.
If num_processes == 0, the scoring function will be run in the main process. Depending on how
demanding the scoring function is and how well the OS handles the multiprocessing, this might
be faster than multiprocessing in some cases."""
import os
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import time
import numpy as np
import gc
import sys
sys.setrecursionlimit(50000)
import pickle
import random
# torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.nn.Module.dump_patches = True
import copy
import pandas as pd
# then import my own modules
from timeit import default_timer as timer
from AttentiveFP.featurizing import graph_dict as graph
from AttentiveFP.AttentiveLayers import Fingerprint, graph_dataset, null_collate, Graph, Logger, time_to_str
SEED = 168
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
from rdkit import Chem
# from rdkit.Chem import AllChem
from rdkit.Chem import QED
from rdkit.Chem import rdMolDescriptors, MolSurf
from rdkit.Chem.Draw import SimilarityMaps
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from numpy.polynomial.polynomial import polyfit
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import seaborn as sns
from utils import Param
sns.set()
from IPython.display import SVG, display
#import sascorer
class Attentivefp(object):
def __init__(self, filename, **kwargs):
self.batch_size = 50
self.epochs = 200
self.p_dropout = 0.2
self.fingerprint_dim = 128
self.weight_decay = 5 # also known as l2_regularization_lambda
self.learning_rate = 3.5
self.K = 2
self.T = 2
self.param = None
self.data_df = None
self.label_class = None
self.need_gpu = True
self.param = Param(filename,'data/tang')
self.predict_path = 'best'
self.weighted = 'mean'
self.gpu = 'cpu'
for key, value in kwargs.items():
if hasattr(self,key):
setattr(self,key,value)
if self.gpu == 'gpu':
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# cuda_aviable = torch.cuda.is_available()
# device = torch.device(0)
@staticmethod
def pre_data(smiles_list):
#print("number of all smiles: ", len(smiles_list))
atom_num_dist = []
remained_smiles = []
canonical_smiles_list = []
del_smiles_list = []
for smiles in smiles_list:
try:
mol = Chem.MolFromSmiles(smiles)
atom_num_dist.append(len(mol.GetAtoms()))
Chem.SanitizeMol(mol)
Chem.DetectBondStereochemistry(mol, -1)
Chem.AssignStereochemistry(mol, flagPossibleStereoCenters=True, force=True)
Chem.AssignAtomChiralTagsFromStructure(mol, -1)
remained_smiles.append(smiles)
canonical_smiles_list.append(Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True))
except:
print('can not convert this {} smiles'.format(smiles))
del_smiles_list.append(smiles)
#print("number of successfully processed smiles: ", len(remained_smiles))
return del_smiles_list
@staticmethod
def run_data(data_df,name):
smiles_list = data_df.SMILES.values
del_smiles_list = Attentivefp.pre_data(smiles_list) #TODO: changed need debug
data_df = data_df[~data_df.SMILES.isin(del_smiles_list)]
smiles_list = data_df.SMILES.values
label_list = data_df.label.values
graph_dict = graph(smiles_list, label_list, name)
test_df = data_df.sample(frac=0.1, random_state=SEED)
test_smiles = test_df.SMILES.values
training_df = data_df.drop(test_df.index)
training_smiles = training_df.SMILES.values
print('train smiles:{} test smiles:{}'.format(len(training_smiles), len(test_smiles)))
return training_smiles,test_smiles,graph_dict
def val(self, smiles_list, graph_dict, model):
eval_loss_list = []
eval_loader = DataLoader(graph_dataset(smiles_list, graph_dict), self.batch_size, collate_fn=null_collate, num_workers=8,
pin_memory=True, shuffle=False, worker_init_fn=np.random.seed(SEED))
for b, (smiles, atom, bond, bond_index, mol_index, label) in enumerate(eval_loader):
atom = atom.cuda()
bond = bond.cuda()
bond_index = bond_index.cuda()
mol_index = mol_index.cuda()
label = label.cuda()
# if self.param.normalization:
# label = (label - mean_list[0]) / std_list[0]
input = model(atom, bond, bond_index, mol_index)
# if param.multi_task:
# loss_ = MultiLoss()
# loss = loss_(input, label.view(-1, param.task_num))
#
# else:
if self.param.type == 'regression':
loss = F.l1_loss(input, label.view(-1, self.param.output_units_num), reduction='mean')
else:
loss = F.cross_entropy(input, label.squeeze().long(), reduction='mean')
loss = loss.cpu().detach().numpy()
eval_loss_list.extend([loss])
loss = np.array(eval_loss_list).mean()
return loss #if not self.param.normalization else np.array(eval_loss_list) * std_list[0]
def evaluate(self):
data_df, label_class = self.param.get_data()
_ ,test_smiles, graph_dict = Attentivefp.run_data(data_df,self.param.name)
fold = 5
model_list = []
predict_list = []
label_list = []
for i in range(5):
for save_time in [
'2019112710', '2019112712', '2019112713', '2019112808', '2019112810', '2019112811',
'2019112813', '2019112814', '2019112815', '2019112816', '2019112817', '2019112818',
'2019112820','2019112821', '2019112900', '2019120506',
'2019120408',
]:
try:
model_list.append(
torch.load('saved_models/{}/fold_{}_{}_best.pt'.format(self.param.name, str(i), save_time)))
break
except FileNotFoundError:
pass
predict_list.append([])
label_list.append([])
if len(model_list) != 5:
raise FileNotFoundError('not enough model')
eval_loader = DataLoader(graph_dataset(test_smiles, graph_dict), self.batch_size, collate_fn=null_collate,
num_workers=8,
pin_memory=True, shuffle=False, worker_init_fn=np.random.seed(SEED))
for num, model in enumerate(model_list):
model.eval()
for b, (smiles, atom, bond, bond_index, mol_index, label) in enumerate(eval_loader):
atom = atom.cuda()
bond = bond.cuda()
bond_index = bond_index.cuda()
mol_index = mol_index.cuda()
label = label.cuda()
mol_prediction = model(atom, bond, bond_index, mol_index)
predict_list[num].extend(mol_prediction.squeeze(dim=1).detach().cpu().numpy())
label_list[num].extend(label.squeeze(dim=1).detach().cpu().numpy())
# print(predict.list)
label = np.array(label_list).sum(axis=0) / fold
from sklearn.linear_model import Ridge, LogisticRegression
if self.param.type == 'regression':
predict_mean = | np.array(predict_list) | numpy.array |
import nltk
import numpy as np
import tensorflow as tf
from nltk.stem import WordNetLemmatizer
import json
from google.colab import files
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dropout, Activation, Dense, Flatten
import pickle
from nltk import punkt
import random
#print(pickle.format_version)
nltk.download('punkt')
nltk.download('wordnet')
#uploaded=files.upload()
with open('chatbot_intents.json') as file:
data=json.load(file,strict=False)
print(data['intents'])
lemm=WordNetLemmatizer()
words=[]
labels=[]
x=[]
y=[]
for intent in data['intents']:
for pattern in intent['patterns']:
w=nltk.word_tokenize(pattern)
words.extend(w)
x.append((w,intent['tag']))
if intent['tag'] not in labels:
labels.append(intent['tag'])
words = [lemm.lemmatize(i.lower()) for i in words if i != '?']
words=sorted(list(set(words)))
labels=sorted(list(set(labels)))
print(len(words))
print(len(labels))
pickle.dump(words,open('words.pkl','wb'))
pickle.dump(labels,open('labels.pkl','wb'))
train=[]
output=[0]*len(labels)
print(len(x))
for doc in x:
bag=[]
pattern_w=doc[0]
pattern_w=[lemm.lemmatize(w.lower()) for w in pattern_w]
for w in words:
if w in pattern_w:
bag.append(1)
else:
bag.append(0)
output_row=list(output)
output_row[labels.index(doc[1])]=1
train.append((bag,output_row))
random.shuffle(train)
train=np.array(train)
train_x=list(train[:,0])
train_y=list(train[:,1])
train=np.array(train)
output=np.array(output)
model=Sequential()
model.add(Dense(64,input_shape=(len(train_x[0]),),activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(len(train_y[0]),activation='softmax'))
model.compile(optimizer='adam',metrics=['accuracy'],loss='categorical_crossentropy')
train_x=np.asarray(train_x)
train_y= | np.asarray(train_y) | numpy.asarray |
"""
Functions are useful statistical untilities for data processing in the ANN
Notes
-----
Author : <NAME>
Date : 15 July 2020
Usage
-----
[1] rmse(a,b)
[2] pickSmileModels(data,modelGCMs,pickSMILE)
[3] remove_annual_mean(data,data_obs,lats,lons,lats_obs,lons_obs)
[4] remove_merid_mean(data,data_obs)
[5] remove_observations_mean(data,data_obs,lats,lons)
[6] calculate_anomalies(data,data_obs,lats,lons,baseline,yearsall)
[7] remove_ensemble_mean(data,ravel_modelens,ravelmodeltime,rm_standard_dev,numOfEns)
[8] remove_ocean(data,data_obs)
[9] remove_land(data,data_obs)
[10] standardize_data(Xtrain,Xtest)
[11] standardize_dataSEPARATE(Xtrain,Xtest):
[12] rm_standard_dev(var,window,ravelmodeltime,numOfEns)
[13] rm_variance_dev(var,window)
[14] addNoiseTwinSingle(data,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)
[15] smoothedEnsembles(data,lat_bounds,lon_bounds)
"""
def rmse(a,b):
"""
Calculates the root mean squared error
takes two variables, a and b, and returns value
"""
### Import modules
import numpy as np
### Calculate RMSE
rmse_stat = np.sqrt(np.mean((a - b)**2))
return rmse_stat
###############################################################################
def pickSmileModels(data,modelGCMs,pickSMILE):
"""
Select models to analyze if using a subset
"""
### Pick return indices of models
lenOfPicks = len(pickSMILE)
indModels = [i for i, item in enumerate(modelGCMs) if item in pickSMILE]
### Slice data
if data.shape[0] == len(modelGCMs):
if len(indModels) == lenOfPicks:
modelSelected = data[indModels]
else:
print(ValueError('Something is wrong with the indexing of the models!'))
else:
print(ValueError('Something is wrong with the order of the data!'))
return modelSelected
###############################################################################
def remove_annual_mean(data,data_obs,lats,lons,lats_obs,lons_obs):
"""
Removes annual mean from data set
"""
### Import modulates
import numpy as np
import calc_Utilities as UT
### Create 2d grid
lons2,lats2 = np.meshgrid(lons,lats)
lons2_obs,lats2_obs = np.meshgrid(lons_obs,lats_obs)
### Calculate weighted average and remove mean
data = data - UT.calc_weightedAve(data,lats2)[:,:,:,np.newaxis,np.newaxis]
data_obs = data_obs - UT.calc_weightedAve(data_obs,lats2_obs)[:,np.newaxis,np.newaxis]
return data,data_obs
###############################################################################
def remove_merid_mean(data,data_obs,lats,lons,lats_obs,lons_obs):
"""
Removes meridional mean from data set
"""
### Import modules
import numpy as np
### Remove mean of latitude
data = data - np.nanmean(data,axis=3)[:,:,:,np.newaxis,:]
data_obs = data_obs - np.nanmean(data_obs,axis=1)[:,np.newaxis,:]
return data,data_obs
###############################################################################
def remove_observations_mean(data,data_obs,lats,lons):
"""
Removes observations to calculate model biases
"""
### Import modules
import numpy as np
### Remove observational data
databias = data - data_obs[np.newaxis,np.newaxis,:,:,:]
return databias
###############################################################################
def calculate_anomalies(data,data_obs,lats,lons,baseline,yearsall):
"""
Calculates anomalies for each model and observational data set. Note that
it assumes the years at the moment
"""
### Import modules
import numpy as np
### Select years to slice
minyr = baseline.min()
maxyr = baseline.max()
yearq = np.where((yearsall >= minyr) & (yearsall <= maxyr))[0]
if data.ndim == 5:
### Slice years
modelnew = data[:,:,yearq,:,:]
obsnew = data_obs[yearq,:,:]
### Average climatology
meanmodel = np.nanmean(modelnew[:,:,:,:,:],axis=2)
meanobs = np.nanmean(obsnew,axis=0)
### Calculate anomalies
modelanom = data[:,:,:,:,:] - meanmodel[:,:,np.newaxis,:,:]
obsanom = data_obs[:,:,:] - meanobs[:,:]
else:
obsnew = data_obs[yearq,:,:]
### Average climatology
meanobs = np.nanmean(obsnew,axis=0)
### Calculate anomalies
obsanom = data_obs[:,:,:] - meanobs[:,:]
modelanom = np.nan
print('NO MODEL ANOMALIES DUE TO SHAPE SIZE!!!')
return modelanom,obsanom
###############################################################################
def remove_ensemble_mean(data,ravel_modelens,ravelmodeltime,rm_standard_dev,numOfEns):
"""
Removes ensemble mean
"""
### Import modulates
import numpy as np
### Remove ensemble mean
if data.ndim == 4:
datameangoneq = data - np.nanmean(data,axis=0)
elif data.ndim == 5:
ensmeanmodel = np.nanmean(data,axis=1)
datameangoneq = np.empty((data.shape))
for i in range(data.shape[0]):
datameangoneq[i,:,:,:,:] = data[i,:,:,:,:] - ensmeanmodel[i,:,:,:]
print('Completed: Ensemble mean removed for model %s!' % (i+1))
if ravel_modelens == True:
datameangone = np.reshape(datameangoneq,(datameangoneq.shape[0]*datameangoneq.shape[1],
datameangoneq.shape[2],
datameangoneq.shape[3],
datameangoneq.shape[4]))
else:
datameangone = datameangoneq
if rm_standard_dev == False:
if ravelmodeltime == True:
datameangone = np.reshape(datameangoneq,(datameangoneq.shape[0]*datameangoneq.shape[1]*datameangoneq.shape[2],
datameangoneq.shape[3],
datameangoneq.shape[4]))
else:
datameangone = datameangoneq
return datameangone
###############################################################################
def remove_ocean(data,data_obs,lat_bounds,lon_bounds):
"""
Masks out the ocean for land_only == True
"""
### Import modules
import numpy as np
from netCDF4 import Dataset
import calc_dataFunctions as df
### Read in land mask
directorydata = '/Users/zlabe/Data/masks/'
filename = 'lsmask_19x25.nc'
datafile = Dataset(directorydata + filename)
maskq = datafile.variables['nmask'][:]
lats = datafile.variables['latitude'][:]
lons = datafile.variables['longitude'][:]
datafile.close()
mask,lats,lons = df.getRegion(maskq,lats,lons,lat_bounds,lon_bounds)
### Mask out model and observations
datamask = data * mask
data_obsmask = data_obs * mask
### Check for floats
datamask[np.where(datamask==0.)] = 0
data_obsmask[np.where(data_obsmask==0.)] = 0
return datamask, data_obsmask
###############################################################################
def remove_land(data,data_obs,lat_bounds,lon_bounds):
"""
Masks out the ocean for ocean_only == True
"""
### Import modules
import numpy as np
from netCDF4 import Dataset
import calc_dataFunctions as df
### Read in ocean mask
directorydata = '/Users/zlabe/Data/masks/'
filename = 'ocmask_19x25.nc'
datafile = Dataset(directorydata + filename)
maskq = datafile.variables['nmask'][:]
lats = datafile.variables['latitude'][:]
lons = datafile.variables['longitude'][:]
datafile.close()
mask,lats,lons = df.getRegion(maskq,lats,lons,lat_bounds,lon_bounds)
### Mask out model and observations
datamask = data * mask
data_obsmask = data_obs * mask
### Check for floats
datamask[np.where(datamask==0.)] = 0
data_obsmask[np.where(data_obsmask==0.)] = 0
return datamask, data_obsmask
###############################################################################
def standardize_data(Xtrain,Xtest):
"""
Standardizes training and testing data
"""
### Import modulates
import numpy as np
Xmean = np.mean(Xtrain,axis=0)
Xstd = np.std(Xtrain,axis=0)
Xtest = (Xtest - Xmean)/Xstd
Xtrain = (Xtrain - Xmean)/Xstd
stdVals = (Xmean,Xstd)
stdVals = stdVals[:]
### If there is a nan (like for land/ocean masks)
if np.isnan(np.min(Xtrain)) == True:
Xtrain[np.isnan(Xtrain)] = 0
Xtest[ | np.isnan(Xtest) | numpy.isnan |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=False)
rc('mathtext', fontset='cm')
# auxiliar function for plot ticks of equal length in x and y axis despite its scales.
def convert_display_to_data_coordinates(transData, length=10):
# create a transform which will take from display to data coordinates
inv = transData.inverted()
# transform from display coordinates to data coordinates in x axis
data_coords = inv.transform([(0, 0), (length, 0)])
# get the length of the segment in data units
yticks_len = data_coords[1, 0] - data_coords[0, 0]
# transform from display coordinates to data coordinates in y axis
data_coords = inv.transform([(0, 0), (0, length)])
# get the length of the segment in data units
xticks_len = data_coords[1, 1] - data_coords[0, 1]
return xticks_len, yticks_len
#####################################
# PARAMETERS - This can be modified #
#####################################
# limite superor de la densidad uniforme
beta = 10
# numero de muestras
N = 100
# numero de experimentos
M = 10000
#####################
# END OF PARAMETERS #
#####################
estimaciones1 = np.zeros((M, ))
estimaciones2 = np.zeros((M, ))
np.random.seed(4)
for i in np.arange(M):
x = np.random.uniform(low=0, high=beta, size=(N, ))
estimaciones1[i] = (N+1) / (2 * N) * np.amax(x)
estimaciones2[i] = np.mean(x)
# abscissa values
dbeta = 1
xmin = beta / 2 - dbeta
xmax = beta / 2 + dbeta
bin_edges = np.linspace(xmin, xmax, 60)
hist1, bins = np.histogram(estimaciones1, bin_edges, density=False)
hist2, bins = np.histogram(estimaciones2, bin_edges, density=False)
width = 0.9 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
# escalas iguales en ambas graficas
fs = np.amax(hist1)/np.amax(hist2)
print(np.amax(hist2))
# axis parameters
dx = 0.1
xmin_ax = xmin - dx
xmax_ax = xmax + dx
dy = 1000
ymax_ax = np.amax(hist1) + dy
ymin_ax = -dy
# length of the ticks for all subplot (6 pixels)
display_length = 6 # in pixels
# x ticks labels margin
xtm = -1000
ytm = 0.03
# font size
fontsize = 12
fig = plt.figure(0, figsize=(9, 5), frameon=False)
# PLOT OF P_ww
ax = fig.add_subplot(211)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(xmin, ymin_ax), xycoords='data', xy=(xmin, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.bar(center, hist1, align='center', width=width, ec='k', fc='b')
# xlabels and xtickslabels
plt.text(xmax_ax, xtm, '$\hat{\\theta}_1$', fontsize=fontsize, ha='left', va='baseline')
xts = np.arange(4.25, 6.1, 0.25)
for xt in xts:
plt.plot([xt, xt], [0, xtl], 'k')
plt.text(xt, xtm, '${:.2f}$'.format(xt), fontsize=fontsize, ha='center', va='baseline')
yts = | np.arange(1000, 5001, 1000) | numpy.arange |
import numpy as np
def solve_power(self, LUT, Rs):
"""Solve EEC to achieve given input / output power with respect to voltage and current constraints
Parameters
----------
self : ElecLUTdq
a ElecLUTdq object
LUT : LUTdq
Calculated look-up table
Rs: float
Stator phase resistance [Ohm]
Returns
----------
out_dict: dict
Dict containing all output quantities
"""
# Get output, machine and OP
output = self.parent.parent
machine = output.simu.machine
OP = output.elec.OP
# Maximum voltage
Urms_max = self.Urms_max
# Maximum current
Irms_max = self.Irms_max
# Electrical frequency
felec = OP.get_felec()
# Electrical pulsation
ws = 2 * np.pi * felec
# Stator winding number of phases
qs = machine.stator.winding.qs
# Check if there is a loss model
is_loss_model = LUT.simu.loss is not None
# iteration until convergence is reached, and max number of iterations on EEC
delta_Pem = 1e10
delta_Pem_max = 0.1
Nmax = 20
niter_Pem = 1
Id_min = self.Id_min
Id_max = self.Id_max
Iq_min = self.Iq_min
Iq_max = self.Iq_max
Nd = (
self.n_Id
if self.n_Id == 1
else int(self.n_Id * self.n_interp / (self.n_Id + self.n_Iq))
)
Nq = (
self.n_Iq
if self.n_Iq == 1
else int(self.n_Iq * self.n_interp / (self.n_Id + self.n_Iq))
)
while abs(delta_Pem) > delta_Pem_max and niter_Pem < Nmax:
# Refine Id/Iq mesh
Id_vect = | np.linspace(Id_min, Id_max, Nd) | numpy.linspace |
#encoding=utf-8
import numpy as np
from utils import cpm_utils
import cv2
import time
import math
import sys
import os
import imageio
import tensorflow as tf
from models.nets import cpm_body
import warnings
warnings.filterwarnings("ignore")
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
"""Parameters
"""
base_path = "demo"
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('DEMO_TYPE',
default_value='HM',
# default_value='test_imgs/single_gym.mp4',
# default_value='SINGLE',
docstring='MULTI: show multiple stage,'
'SINGLE: only last stage,'
'HM: show last stage heatmap,')
tf.app.flags.DEFINE_string('img_path',
default_value='test_imgs/golf.jpg',
docstring="Image to test")
tf.app.flags.DEFINE_string('model_path',
default_value='models/weights/cpm_body.pkl',
docstring='Your model')
tf.app.flags.DEFINE_integer('input_size',
default_value=368,
docstring='Input image size')
tf.app.flags.DEFINE_integer('hmap_size',
default_value=46,
docstring='Output heatmap size')
tf.app.flags.DEFINE_integer('cmap_radius',
default_value=40,
docstring='Center map gaussian variance')
tf.app.flags.DEFINE_integer('joints',
# default_value=14,
default_value=15,
docstring='Number of joints')
tf.app.flags.DEFINE_integer('stages',
default_value=6,
docstring='How many CPM stages')
# Set color for each finger
joint_color_code = [[139, 53, 255],
[0, 56, 255],
[43, 140, 237],
[37, 168, 36],
[147, 147, 0],
[70, 17, 145]]
limbs = [[0, 1],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[8, 9],
[9, 10],
[11, 12],
[12, 13]]
def mgray(test_img_resize, test_img):
test_img_resize = np.dot(test_img_resize[..., :3], [0.299, 0.587, 0.114]).reshape(
(FLAGS.input_size, FLAGS.input_size, 1))
# cv2.imshow('color', test_img.astype(np.uint8))
# cv2.imshow('gray', test_img_resize.astype(np.uint8))
cv2.imwrite(os.path.join(base_path, "color_.jpg"), test_img.astype(np.uint8))
cv2.imwrite(os.path.join(base_path, "gray_.jpg"), test_img_resize.astype(np.uint8))
cv2.waitKey(1)
return test_img_resize
def main(argv):
# tf_device = '/gpu:0'
tf_device = '/cpu:0'
with tf.device(tf_device):
"""Build graph
"""
input_data = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.input_size, FLAGS.input_size, 3], name='input_image')
center_map = tf.placeholder(dtype=tf.float32,
shape=[None, FLAGS.input_size, FLAGS.input_size, 1], #center map的大小也是368x368
name='center_map')
# model = cpm_body.CPM_Model(FLAGS.stages, FLAGS.joints + 1)
model = cpm_body.CPM_Model(FLAGS.stages, FLAGS.joints) #这里的stages和joints是传进来的参数,跟训练集的关节点个数应该相等
model.build_model(input_data, center_map, 1)
saver = tf.train.Saver()
"""Create session and restore weights
"""
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if FLAGS.model_path.endswith('pkl'):
model.load_weights_from_file(FLAGS.model_path, sess, False)
else:
saver.restore(sess, FLAGS.model_path)
test_center_map = cpm_utils.gaussian_img(FLAGS.input_size, FLAGS.input_size, FLAGS.input_size/2, #预测的时候,需要把人放图像中间,这里构造center map是以图片中心来构造的
FLAGS.input_size/2, FLAGS.cmap_radius) #如果预测人没有在中间,就会出现问题
test_center_map = np.reshape(test_center_map, [1, FLAGS.input_size, FLAGS.input_size, 1]) #增加batch和通道的维数都为1,此刻center map:1x368x368x1
# Check weights
for variable in tf.trainable_variables():
with tf.variable_scope('', reuse=True):
var = tf.get_variable(variable.name.split(':0')[0])
print(variable.name, np.mean(sess.run(var))) #这个for循环输出了模型中的参数,包括卷积核值和偏置值
with tf.device(tf_device):
# while True:
test_img_t = time.time()
test_img = cpm_utils.read_image(FLAGS.img_path, [], FLAGS.input_size, 'IMAGE') #构造预测输入,[]是视频输出,这里没有,test_img为368x368x3
test_img_resize = cv2.resize(test_img, (FLAGS.input_size, FLAGS.input_size)) #删掉,没用
print('img read time %f' % (time.time() - test_img_t))
test_img_input = test_img_resize / 256.0 - 0.5 #归一化到[-0.5,0.5]
test_img_input = np.expand_dims(test_img_input, axis=0) #增加了batch的维数为1,此刻test_img_input: 1x368x368x3
if FLAGS.DEMO_TYPE == 'MULTI':
# Inference
fps_t = time.time()
predict_heatmap, stage_heatmap_np = sess.run([model.current_heatmap,
model.stage_heatmap,
],
feed_dict={'input_image:0': test_img_input,
'center_map:0': test_center_map})
# Show visualized image
demo_img = visualize_result(test_img, FLAGS, stage_heatmap_np, None)
# cv2.imshow('demo_img', demo_img.astype(np.uint8))
cv2.imwrite(os.path.join(base_path, str(time.time())+"_.jpg"), demo_img.astype(np.uint8))
# if cv2.waitKey(1) == ord('q'): break
print('fps: %.2f' % (1 / (time.time() - fps_t)))
elif FLAGS.DEMO_TYPE == 'SINGLE':
# Inference
fps_t = time.time()
stage_heatmap_np = sess.run([model.stage_heatmap[5]],
feed_dict={'input_image:0': test_img_input,
'center_map:0': test_center_map})
# Show visualized image
demo_img = visualize_result(test_img, FLAGS, stage_heatmap_np, None)
# cv2.imshow('current heatmap', (demo_img).astype(np.uint8))
cv2.imwrite(os.path.join(base_path, "current_heatmap.jpg"), (demo_img).astype(np.uint8))
# if cv2.waitKey(1) == ord('q'): break
print('fps: %.2f' % (1 / (time.time() - fps_t)))
elif FLAGS.DEMO_TYPE == 'HM':
# Inference
fps_t = time.time()
stage_heatmap_np = sess.run([model.stage_heatmap[FLAGS.stages - 1]], #最后一个stage的heatmap,列表长度为1,因为只有一个人
feed_dict={'input_image:0': test_img_input, #stage_heatmap_np[0]的shape为 [1, 46, 46, 15]
'center_map:0': test_center_map})
print('fps: %.2f' % (1 / (time.time() - fps_t)))
# demo_stage_heatmap = stage_heatmap_np[len(stage_heatmap_np) - 1][0, :, :, 0:FLAGS.joints].reshape(
# (FLAGS.hmap_size, FLAGS.hmap_size, FLAGS.joints))
demo_stage_heatmap = stage_heatmap_np[-1][0, :, :, 0:FLAGS.joints].reshape( #将有效的heatmap切出来,把背景排除,结果:46x46x14
(FLAGS.hmap_size, FLAGS.hmap_size, FLAGS.joints))
demo_stage_heatmap = cv2.resize(demo_stage_heatmap, (FLAGS.input_size, FLAGS.input_size)) #heatmap变成了368x368x14
vertical_imgs = [] #下面在拼接heatmap的展示图片
tmp_img = None
joint_coord_set = np.zeros((FLAGS.joints, 2))
for joint_num in range(FLAGS.joints):
# Concat until 4 img
if (joint_num % 4) == 0 and joint_num != 0:
vertical_imgs.append(tmp_img)
tmp_img = None
demo_stage_heatmap[:, :, joint_num] *= (255 / np.max(demo_stage_heatmap[:, :, joint_num])) #将heatmap每层的最大值变成255,共有14层
# Plot color joints
if | np.min(demo_stage_heatmap[:, :, joint_num]) | numpy.min |
#**************Importing Required Libraries*************
import numpy as np
import pandas as pd
from numpy.linalg import inv
import matplotlib.pyplot as plt
#*************Declare Variables**************************
#Read Input File
measurements = pd.read_csv('obj_pose-laser-radar-synthetic-input.txt', header=None, delim_whitespace = True, skiprows=1)
# Manualy copy initial readings from first row of input file.
prv_time = 1477010443000000/1000000.0
x = np.array([
[0.312242],
[0.5803398],
[0],
[0]
])
#Initialize variables to store ground truth and RMSE values
ground_truth = np.zeros([4, 1])
rmse = np.zeros([4, 1])
#Initialize matrices P and A
P = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1000, 0],
[0, 0, 0, 1000]
])
A = np.array([
[1.0, 0, 1.0, 0],
[0, 1.0, 0, 1.0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]
])
H = np.array([
[1.0, 0, 0, 0],
[0, 1.0, 0, 0]
])
I = np.identity(4)
z_lidar = np.zeros([2, 1])
R = np.array([
[0.0225, 0],
[0, 0.0225]
])
noise_ax = 5
noise_ay = 5
Q = | np.zeros([4, 4]) | numpy.zeros |
"""{{ cookiecutter.project }} PSII analysis."""
# %% Setup
# Export .png to outdir from LemnaBase using LT-db_extractor.py
from plantcv import plantcv as pcv
import cppcpyutils as cppc
import importlib
import os
import cv2 as cv2
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import warnings
import importlib
from skimage import filters
from skimage import morphology
from skimage import segmentation
warnings.filterwarnings("ignore", module="matplotlib")
warnings.filterwarnings("ignore", module='plotnine')
# %% io directories
indir = os.path.join('data', 'psII')
# snapshotdir = indir
outdir = os.path.join('output', 'psII')
debugdir = os.path.join('debug', 'psII')
maskdir = os.path.join(outdir, 'masks')
fluordir = os.path.join(outdir, 'fluorescence')
os.makedirs(outdir, exist_ok=True)
os.makedirs(debugdir, exist_ok=True)
os.makedirs(maskdir, exist_ok=True)
outfile = os.path.join(outdir,'output_psII_level0.csv')
# %% pixel pixel_resolution
# mm (this is approx and should only be used for scalebar)
cppc.pixelresolution = 0.3
# %% Import tif file information based on the filenames. If extract_frames=True it will save each frame form the multiframe TIF to a separate file in data/pimframes/ with a numeric suffix
fdf = cppc.io.import_snapshots(indir, camera='psii')
# %% Define the frames from the PSII measurements and merge this information with the filename information
pimframes = pd.read_csv(os.path.join('data', 'pimframes_map.csv'),
skipinitialspace=True)
# this eliminate weird whitespace around any of the character fields
fdf_dark = (pd.merge(fdf.reset_index(), pimframes, on=['frameid'],
how='right'))
# %% remove absorptivity measurements which are blank images
# also remove Ft_FRon measurements. THere is no Far Red light.
df = (fdf_dark.query(
'~parameter.str.contains("Abs") and ~parameter.str.contains("FRon")',
engine='python'))
# %% remove the duplicate Fm and Fo frames where frame = Fmp and Fp from frameid 5,6
df = (df.query(
'(parameter!="FvFm") or (parameter=="FvFm" and (frame=="Fo" or frame=="Fm") )'
))
# %% Arrange dataframe of metadata so Fv/Fm comes first
param_order = pimframes.parameter.unique()
df['parameter'] = pd.Categorical(df.parameter,
categories=param_order,
ordered=True)
# %% Check for existing output file and only analyze new files
newheader = True
defaultcols = df.columns.values.tolist()
if os.path.exists(outfile):
# reading existing results file
existingdf = pd.read_csv(outfile)
# format dates consistently and NOT in data format becuase pandas doesn't handle datetimes well in merges(!?)...
existingdf.jobdate = pd.to_datetime(existingdf.jobdate).dt.strftime('%Y-%m-%d')
df.loc[:,'jobdate'] = df.jobdate.dt.strftime('%Y-%m-%d')
# set common index
mergecols = ['plantbarcode','jobdate','frame','frameid','parameter']
df = df.set_index(mergecols)
existingdf = existingdf.set_index(mergecols)
# filter and sort df
df = df[~df.index.isin(existingdf.index)].reset_index()
df.jobdate = pd.to_datetime(df.jobdate) #compatiability later
df = df[defaultcols]
newheader = False
# %% Setup Debug parmaeters
# pcv.params.debug can be 'plot', 'print', or 'None'. 'plot' is useful if you are testing your pipeline over a few samples so you can see each step.
pcv.params.debug = 'plot' # 'print' #'plot', 'None'
# Figures will show 9x9inches which fits my monitor well.
plt.rcParams["figure.figsize"] = (9, 9)
# plt.rcParams["font.family"] = "Arial" # All text is Arial
ilegend = 1
# %% The main analysis function
# This function takes a dataframe of metadata that was created above. We loop through each pair of images to compute photosynthetic parameters
def image_avg(fundf):
# dn't understand why import suddently needs to be inside function
# import cv2 as cv2
# import numpy as np
# import pandas as pd
# import os
# from matplotlib import pyplot as plt
# from skimage import filters
# from skimage import morphology
# from skimage import segmentation
# Predefine some variables
global c, h, roi_c, roi_h, ilegend, mask_Fm, fn_Fm
# Get the filename for minimum and maximum fluoresence
fn_min = fundf.query('frame == "Fo" or frame == "Fp"').filename.values[0]
fn_max = fundf.query('frame == "Fm" or frame == "Fmp"').filename.values[0]
# Get the parameter name that links these 2 frames
param_name = fundf['parameter'].iloc[0]
# Create a new output filename that combines existing filename with parameter
outfn = os.path.splitext(os.path.basename(fn_max))[0]
outfn_split = outfn.split('-')
# outfn_split[2] = datetime.strptime(fundf.jobdate.values[0],'%Y-%m-%d').strftime('%Y%m%d')
outfn_split[2] = fundf.jobdate.dt.strftime('%Y%m%d').values[0]
basefn = "-".join(outfn_split[0:-1])
outfn_split[-1] = param_name
outfn = "-".join(outfn_split)
print(outfn)
# Make some directories based on sample id to keep output organized
plantbarcode = outfn_split[0]
fmaxdir = os.path.join(fluordir, plantbarcode)
os.makedirs(fmaxdir, exist_ok=True)
# If debug mode is 'print', create a specific debug dir for each pim file
if pcv.params.debug == 'print':
debug_outdir = os.path.join(debugdir, outfn)
os.makedirs(debug_outdir, exist_ok=True)
pcv.params.debug_outdir = debug_outdir
# read images and create mask from max fluorescence
# read image as is. only gray values in PSII images
imgmin, _, _ = pcv.readimage(fn_min)
img, _, _ = pcv.readimage(fn_max)
fdark = np.zeros_like(img)
out_flt = fdark.astype('float32') # <- needs to be float32 for imwrite
if param_name == 'FvFm':
# save max fluorescence filename
fn_Fm = fn_max
# create mask
# #create black mask over lower half of image to threshold upper plant only
# img_half, _, _, _ = pcv.rectangle_mask(img, p1=(0,321), p2=(480,640))
# # mask1 = pcv.threshold.otsu(img_half,255)
# algaethresh = filters.threshold_otsu(image=img_half)
# mask0 = pcv.threshold.binary(img_half, algaethresh, 255, 'light')
# # create black mask over upper half of image to threshold lower plant only
# img_half, _, _, _ = pcv.rectangle_mask(img, p1=(0, 0), p2=(480, 319), color='black')
# # mask0 = pcv.threshold.otsu(img_half,255)
# algaethresh = filters.threshold_otsu(image=img_half)
# mask1 = pcv.threshold.binary(img_half, algaethresh, 255, 'light')
# mask = pcv.logical_xor(mask0, mask1)
# # mask = pcv.dilate(mask, 2, 1)
# mask = pcv.fill(mask, 350)
# mask = pcv.erode(mask, 2, 2)
# mask = pcv.erode(mask, 2, 1)
# mask = pcv.fill(mask, 100)
# otsuT = filters.threshold_otsu(img)
# # sigma=(k-1)/6. This is because the length for 99 percentile of gaussian pdf is 6sigma.
# k = int(2 * np.ceil(3 * otsuT) + 1)
# gb = pcv.gaussian_blur(img, ksize = (k,k), sigma_x = otsuT)
# mask = img >= gb + 10
# pcv.plot_image(mask)
# local_otsu = filters.rank.otsu(img, pcv.get_kernel((9,9), 'rectangle'))#morphology.disk(2))
# thresh_image = img >= local_otsu
# #_------> start of mask
# elevation_map = filters.sobel(img)
# # pcv.plot_image(elevation_map)
thresh = filters.threshold_otsu(image=img)
# # thresh = 50
# markers = np.zeros_like(img, dtype='uint8')
# markers[img > thresh + 8] = 2
# markers[img <= thresh + 8] = 1
# # pcv.plot_image(markers,cmap=plt.cm.nipy_spectral)
# mask = segmentation.watershed(elevation_map, markers)
# mask = mask.astype(np.uint8)
# # pcv.plot_image(mask)
# mask[mask == 1] = 0
# mask[mask == 2] = 1
# # pcv.plot_image(mask, cmap=plt.cm.nipy_spectral)
# mask = pcv.erode(mask, 2, 1)
mask = pcv.threshold.binary(img, thresh, 255)
if len(np.unique(mask))!=1:
mask = pcv.fill(mask, 100)
# pcv.plot_image(mask, cmap=plt.cm.nipy_spectral)
# <----------- end of masking
# roi needs to be defined regardless of mask
roi_c, roi_h = pcv.roi.multi(img,
coord=(250, 200),
radius=70,
spacing=(0, 220),
ncols=1,
nrows=2)
if len(np.unique(mask)) == 1:
c = []
YII = mask
NPQ = mask
newmask = mask
else:
# find objects and setup roi
c, h = pcv.find_objects(img, mask)
# setup individual roi plant masks
newmask = np.zeros_like(mask)
# compute fv/fm and save to file
YII, hist_fvfm = pcv.photosynthesis.analyze_fvfm(fdark=fdark,
fmin=imgmin,
fmax=img,
mask=mask,
bins=128)
# YII = np.divide(Fv,
# img,
# out=out_flt.copy(),
# where=np.logical_and(mask > 0, img > 0))
# NPQ is 0
NPQ = np.zeros_like(YII)
# cv2.imwrite(os.path.join(fmaxdir, outfn + '-fvfm.tif'), YII)
# print Fm - will need this later
# cv2.imwrite(os.path.join(fmaxdir, outfn + '-fmax.tif'), img)
# NPQ will always be an array of 0s
else: # compute YII and NPQ if parameter is other than FvFm
newmask = mask_Fm
# use cv2 to read image becase pcv.readimage will save as input_image.png overwriting img
# newmask = cv2.imread(os.path.join(maskdir, basefn + '-FvFm-mask.png'),-1)
if len( | np.unique(newmask) | numpy.unique |
import numpy as np
import random
def create_field(x = 5, y = 5):
return np.zeros((y,x), dtype= int)
def rand_array(a, num = 6):
i = j = n = 0
x = a.shape[0]
y = a.shape[1]
row = random.sample(range(1, x*y), num)
for i in range (x):
for j in range(y):
n += 1
if n in row:
a[i,j] = 9
def modify (a):
x = a.shape[0]
y = a.shape[1]
#insert row
row = np.zeros(y,dtype= int)
a = np.insert(a,x,[row],axis = 0)
a = np.insert(a,0,[row],axis = 0)
# rotate
a = np.rot90(a,1)
#insert row
x = a.shape[0]
y = a.shape[1]
row = np.zeros(y,dtype= int)
a = np.insert(a,x,[row],axis = 0)
a = np.insert(a,0,[row],axis = 0)
a = | np.rot90(a,3) | numpy.rot90 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import numpy as np
from NumPyNet.utils import check_is_fitted
from NumPyNet.layers.base import BaseLayer
__author__ = ['<NAME>', '<NAME>']
__email__ = ['<EMAIL>', '<EMAIL>']
class BatchNorm_layer(BaseLayer):
'''
BatchNormalization Layer
It performs a Normalization over the Batch axis of the Input.
Both scales and bias are trainable weights.
Equation:
output = scales * input_normalized + bias
Parameters
----------
scales : array-like (default=None)
Starting scale to be multiplied to the normalized input, array-like of shape (w, h, c).
If None, the array will be initialized with ones.
bias : array-like (default=None)
Bias to be added to the multiplication of scale and normalized input of shape (w, h, c).
If None, the array will be initialized with zeros.
input_shape : tuple (default=None)
Shape of the input in the format (batch, w, h, c), None is used when the layer is part of a Network model.
Example
-------
>>> import os
>>>
>>> import pylab as plt
>>> from PIL import Image
>>>
>>> img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
>>> float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
>>>
>>> # I need to load at least to images, or made a copy of it
>>> filename = os.path.join(os.path.dirname('__file__'), '..', '..', 'data', 'dog.jpg')
>>> inpt = np.asarray(Image.open(filename), dtype=float)
>>> inpt.setflags(write=1)
>>> w, h, c = inpt.shape
>>>
>>> batch_size = 5
>>>
>>> np.random.seed(123) # set seed to have fixed bias and scales
>>>
>>> # create a pseudo-input with batch_size images with a random offset from the original image
>>> rng = np.random.uniform(low=0., high=100., size=(batch_size, w, h, c))
>>> inpt = np.concatenate([np.expand_dims(inpt, axis=0) + r for r in rng], axis=0) # create a set of image
>>>
>>> # img_to_float of input, to work with numbers btween 0. and 1.
>>> inpt = np.asarray([img_2_float(x) for x in inpt ])
>>>
>>> b, w, h, c = inpt.shape # needed for initializations of bias and scales
>>>
>>> bias = np.random.uniform(0., 1., size=(w, h, c)) # random biases
>>> scales = np.random.uniform(0., 1., size=(w, h, c)) # random scales
>>>
>>> bias = np.zeros(shape=(w, h, c), dtype=float)
>>> scales = np.ones(shape=(w, h, c), dtype=float)
>>>
>>> # Model Initialization
>>> layer = BatchNorm_layer(input_shape=inpt.shape, scales=scales, bias=bias)
>>>
>>> # FORWARD
>>>
>>> layer.forward(inpt)
>>> forward_out = layer.output
>>> print(layer)
>>>
>>> # BACKWARD
>>>
>>> layer.delta = np.random.uniform(low=0., high=100., size=layer.out_shape)
>>> delta = np.ones(shape=inpt.shape, dtype=float) # delta same shape as the Input
>>> layer.backward(delta)
>>>
>>> # Visualizations
>>>
>>> fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=2, figsize=(10, 5))
>>> fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
>>>
>>> fig.suptitle('BatchNormalization Layer')
>>>
>>> ax1[0].imshow(float_2_img(inpt[0]))
>>> ax1[0].set_title('Original image')
>>> ax1[0].axis('off')
>>>
>>> ax1[1].imshow(float_2_img(layer.mean))
>>> ax1[1].set_title("Mean Image")
>>> ax1[1].axis("off")
>>>
>>> ax2[0].imshow(float_2_img(forward_out[0]))
>>> ax2[0].set_title('Forward')
>>> ax2[0].axis('off')
>>>
>>> ax2[1].imshow(float_2_img(delta[0]))
>>> ax2[1].set_title('Backward')
>>> ax2[1].axis('off')
>>>
>>> fig.tight_layout()
>>> plt.show()
References
----------
- https://arxiv.org/abs/1502.03167
'''
epsil = 1e-8
def __init__(self, scales=None, bias=None, input_shape=None, **kwargs):
self.scales = scales
self.bias = bias
# Updates
self.scales_update, self.bias_update = (None, None)
self.optimizer = None
super(BatchNorm_layer, self).__init__(input_shape=input_shape)
def __str__(self):
'''
PRINTER
'''
return 'batchnorm {0:4d} x{1:4d} x{2:4d} image'.format(*self.out_shape[1:])
def load_weights(self, chunck_weights, pos=0):
'''
Load weights from full array of model weights
Parameters
----------
chunck_weights : array-like
Model weights and bias
pos : int (default=0)
Current position of the array
Returns
----------
pos : int
Updated stream position.
'''
outputs = np.prod(self.out_shape)
self.bias = chunck_weights[pos: pos + outputs]
self.bias = self.bias.reshape(self.out_shape)
pos += outputs
self.scales = chunck_weights[pos: pos + outputs]
self.scales = self.scales.reshape(self.out_shape)
pos += outputs
return pos
def save_weights(self):
'''
Return the biases and weights in a single ravel fmt to save in binary file
'''
return np.concatenate([self.bias.ravel(), self.scales.ravel()], axis=0).tolist()
def forward(self, inpt):
'''
Forward function of the BatchNormalization layer. It computes the output of
the layer, the formula is:
output = scale * input_norm + bias
Where input_norm is:
input_norm = (input - mean) / sqrt(var + epsil)
where mean and var are the mean and the variance of the input batch of
images computed over the first axis (batch)
Parameters
----------
inpt : array-like
Input batch of images in format (batch, in_w, in_h, in _c)
Returns
-------
self
'''
self._check_dims(shape=self.input_shape, arr=inpt, func='Forward')
# Copy input, compute mean and inverse variance with respect the batch axis
self.x = inpt.copy()
self.mean = self.x.mean(axis=0) # shape = (w, h, c)
self.var = 1. / np.sqrt((self.x.var(axis=0)) + self.epsil) # shape = (w, h, c)
# epsil is used to avoid divisions by zero
# Compute the normalized input
self.x_norm = (self.x - self.mean) * self.var # shape (batch, w, h, c)
self.output = self.x_norm.copy() # made a copy to store x_norm, used in Backward
# Init scales and bias if they are not initialized (ones and zeros)
if self.scales is None:
self.scales = np.ones(shape=self.out_shape[1:])
if self.bias is None:
self.bias = np.zeros(shape=self.out_shape[1:])
# Output = scale * x_norm + bias
self.output = self.output * self.scales + self.bias
# output_shape = (batch, w, h, c)
self.delta = np.zeros(shape=self.out_shape, dtype=float)
return self
def backward(self, delta=None):
'''
BackPropagation function of the BatchNormalization layer. Every formula is a derivative
computed by chain rules: dbeta = derivative of output w.r.t. bias, dgamma = derivative of
output w.r.t. scales etc...
Parameters
----------
delta : array-like
delta array of shape (batch, w, h, c). Global delta to be backpropagated.
Returns
-------
self
'''
check_is_fitted(self, 'delta')
self._check_dims(shape=self.input_shape, arr=delta, func='Forward')
invN = 1. / np.prod(self.mean.shape)
# Those are the explicit computation of every derivative involved in BackPropagation
# of the batchNorm layer, where dbeta = dout / dbeta, dgamma = dout / dgamma etc...
self.bias_update = self.delta.sum(axis=0) # dbeta
self.scales_update = (self.delta * self.x_norm).sum(axis=0) # dgamma
self.delta *= self.scales # self.delta = dx_norm from now on
self.mean_delta = (self.delta * (-self.var)).mean(axis=0) # dmu
self.var_delta = ((self.delta * (self.x - self.mean)).sum(axis=0) *
(-.5 * self.var * self.var * self.var)) # dvar
# Here, delta is the derivative of the output w.r.t. input
self.delta = (self.delta * self.var +
self.var_delta * 2 * (self.x - self.mean) * invN +
self.mean_delta * invN)
if delta is not None:
delta[:] += self.delta
return self
def update(self):
'''
Update function for the batch-normalization layer.
Optimizer must be assigned externally as an optimizer object.
Returns
-------
self
'''
check_is_fitted(self, 'delta')
self.bias, self.scales = self.optimizer.update(params=[self.bias, self.scales],
gradients=[self.bias_update, self.scales_update]
)
return self
if __name__ == '__main__':
import os
import pylab as plt
from PIL import Image
img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
# I need to load at least to images, or made a copy of it
filename = os.path.join(os.path.dirname('__file__'), '..', '..', 'data', 'dog.jpg')
inpt = np.asarray(Image.open(filename), dtype=float)
inpt.setflags(write=1)
w, h, c = inpt.shape
batch_size = 5
np.random.seed(123) # set seed to have fixed bias and scales
# create a pseudo-input with batch_size images with a random offset from the original image
rng = np.random.uniform(low=0., high=100., size=(batch_size, w, h, c))
inpt = np.concatenate([ | np.expand_dims(inpt, axis=0) | numpy.expand_dims |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.