prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
"""
Original code from UPSILoN (Kim & Bailer-Jones 2016).
https://github.com/dwkim78/upsilon
"""
import warnings
import multiprocessing
import numpy as np
import scipy.stats as ss
from collections import OrderedDict
from scipy.optimize import leastsq
from upsilont.features.period_LS_pyfftw import fasper, significance
def get_train_feature_name():
"""
Return a list of features' names.
Features' name that are used to train a model and predict a class.
Sorted by the names.
Returns
-------
feature_names : list
A list of features' names.
"""
features = ['amplitude', 'hl_amp_ratio', 'kurtosis', 'period',
'phase_cusum', 'phase_eta', 'phi21', 'phi31', 'quartile31',
'r21', 'r31', 'shapiro_w', 'skewness', 'slope_per10',
'slope_per90', 'stetson_k']
features.sort()
return features
def get_all_feature_name():
"""
Return a list of entire features.
A set of entire features regardless of being used to train a model or
predict a class.
Returns
-------
feature_names : list
A list of features' names.
"""
features = get_train_feature_name()
features.append('cusum')
features.append('eta')
features.append('n_points')
features.append('period_SNR')
features.append('period_log10FAP')
features.append('period_uncertainty')
features.append('weighted_mean')
features.append('weighted_std')
features.sort()
return features
class VariabilityFeatures:
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
# Variable to calculate.
self.n_points = None
self.weight = None
self.weighted_mean = None
self.weighted_std = None
self.weighted_sum = None
self.mean = None
self.median = None
self.std = None
self.skewness = None
self.kurtosis = None
self.shapiro_w = None
self.quartile31 = None
self.stetson_k = None
self.hl_amp_ratio = None
self.cusum = None
self.eta = None
self.phase_eta = None
self.slope_per10 = None
self.slope_per90 = None
self.phase_cusum = None
self.f = None
self.period = None
self.period_uncertainty = None
self.period_log10FAP = None
self.period_SNR = None
self.amplitude = None
self.r21 = None
self.r31 = None
self.f_phase = None
self.phi21 = None
self.phi31 = None
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
# Extract features.
self.run()
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err == 0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) - \
np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(significance(fx, fy, nout, oversampling)[jmax])
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# Derive Fourier features for the first period.
# <NAME>., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 =
|
np.arctan(-p1[5] / p1[6])
|
numpy.arctan
|
# -*- coding: utf-8 -*-
"""Module containing utility bda for BDA simulation steps."""
import numpy as np
import os
import shutil
def adev(data, dt, tau):
"""Evaluate the Allan deviation of a time series.
Args:
data (np.array): Time series data.
dt (float): Time series data increment, in seconds.
tau (float): Averaging period, in seconds.
Returns:
The allan deviation of the series and error on the deviation.
"""
rate = 1. / dt
m = int(rate * tau)
# Truncate to an even multiple of this tau value
freq = data[0:len(data) - int(np.remainder(len(data), m))]
f = np.reshape(freq, (m, -1), order='F')
fa =
|
np.mean(f, 0)
|
numpy.mean
|
import numpy as np
import cv2
def region_of_interest(image):
height = image.shape[0]
polygons = np.array([[(200, height), (1100, height), (550, 250)]]) # Triangle polygon because cv2.fillPoly expects an array of polygons.
mask =
|
np.zeros_like(image)
|
numpy.zeros_like
|
import pandas as pd
import numpy as np
import scipy as sp
import sys
import warnings
import copy
from slicer.interpretapi.explanation import AttributionExplanation
from slicer import Slicer
# from ._order import Order
from .utils._general import OpChain
# slicer confuses pylint...
# pylint: disable=no-member
op_chain_root = OpChain()
class MetaExplanation(type):
""" This metaclass exposes the Explanation object's methods for creating template op chains.
"""
def __getitem__(cls, item):
return op_chain_root.__getitem__(item)
@property
def abs(cls):
return op_chain_root.abs
@property
def argsort(cls):
return op_chain_root.argsort
@property
def sum(cls):
return op_chain_root.sum
@property
def max(cls):
return op_chain_root.max
@property
def min(cls):
return op_chain_root.min
@property
def mean(cls):
return op_chain_root.mean
@property
def sample(cls):
return op_chain_root.sample
@property
def hclust(cls):
return op_chain_root.hclust
class Explanation(AttributionExplanation, metaclass=MetaExplanation):
""" This is currently an experimental feature don't depend on this object yet! :)
"""
def __init__(
self,
values,
expected_value = None,
data = None,
output_shape = tuple(),
interaction_order = 0,
instance_names = None,
input_names = None,
output_names = None,
output_indexes = None,
feature_types = None,
lower_bounds = None,
upper_bounds = None,
main_effects = None,
hierarchical_values = None,
original_rows = None,
clustering = None
):
self.transform_history = []
if issubclass(type(values), Explanation):
e = values
values = e.values
expected_value = e.expected_value
data = e.data
# TODO: better cloning :)
if data is not None:
input_shape = _compute_shape(data)
else:
input_shape = _compute_shape(values)
# trim any trailing None shapes since we don't want slicer to try and use those
if len(input_shape) > 0 and input_shape[-1] is None:
input_shape = input_shape[:-1]
values_dims = list(
range(len(input_shape) + interaction_order + len(output_shape))
)
output_dims = range(len(input_shape) + interaction_order, values_dims[-1])
#main_effects_inds = values_dims[0:len(input_shape)] + values_dims[len(input_shape) + interaction_order:]
self.output_names = output_names # TODO: needs to tracked after slicing still
kwargs_dict = {}
if lower_bounds is not None:
kwargs_dict["lower_bounds"] = (values_dims, Slicer(lower_bounds))
if upper_bounds is not None:
kwargs_dict["upper_bounds"] = (values_dims, Slicer(upper_bounds))
if main_effects is not None:
kwargs_dict["main_effects"] = (values_dims, Slicer(main_effects))
if output_indexes is not None:
kwargs_dict["output_indexes"] = (output_dims, Slicer(output_indexes))
if output_names is not None:
kwargs_dict["output_names"] = (output_dims, Slicer(output_names))
if hierarchical_values is not None:
kwargs_dict["hierarchical_values"] = (values_dims, Slicer(hierarchical_values))
if input_names is not None:
if not is_1d(input_names):
input_name_dims = values_dims
else:
input_name_dims = values_dims[1:]
kwargs_dict["input_names"] = (input_name_dims, Slicer(input_names))
else:
self.input_names = None
if original_rows is not None:
kwargs_dict["original_rows"] = (values_dims[1:], Slicer(original_rows))
if clustering is not None:
kwargs_dict["clustering"] = ([0], Slicer(clustering))
if expected_value is not None:
ndims = len(getattr(expected_value, "shape", []))
if ndims == len(values_dims):
kwargs_dict["expected_value"] = (values_dims, Slicer(expected_value))
elif ndims == len(values_dims)-1:
kwargs_dict["expected_value"] = (values_dims[1:], Slicer(expected_value))
else:
raise Exception("The shape of the passed expected_value does not match the shape of the passed values!")
else:
self.expected_value = None
# if clustering is not None:
# self.clustering = clustering
super().__init__(
data,
values,
input_shape,
output_shape,
expected_value,
interaction_order,
instance_names,
input_names,
feature_types,
**kwargs_dict
)
def get_shape(self):
return _compute_shape(self.values)
shape = property(get_shape)
# def get_expected_value(self):
# return self.expected_value
# expected_value = property(get_expected_value)
def __repr__(self):
out = ".values =\n"+self.values.__repr__()
if self.expected_value is not None:
out += "\n\n.expected_value =\n"+self.expected_value.__repr__()
if self.data is not None:
out += "\n\n.data =\n"+self.data.__repr__()
return out
def __getitem__(self, item):
""" This adds support for magic string indexes like "rank(0)".
"""
if not isinstance(item, tuple):
item = (item,)
# convert any magic strings
for i,t in enumerate(item):
if issubclass(type(t), OpChain):
tmp = list(item)
tmp[i] = t.apply(self)
if issubclass(type(tmp[i]), (np.int64, np.int32)): # because slicer does not like numpy indexes
tmp[i] = int(tmp[i])
elif issubclass(type(tmp[i]), np.ndarray):
tmp[i] = [int(v) for v in tmp[i]] # slicer wants lists not numpy arrays for indexing
item = tuple(tmp)
elif type(t) is str:
if is_1d(self.input_names):
ind = np.where(
|
np.array(self.input_names)
|
numpy.array
|
import tensorflow as tf
import numpy as np
from inspect import signature
from functools import wraps
import heapq
import itertools
import time
def activation_function(act,act_input):
act_func = None
if act == "sigmoid":
act_func = tf.nn.sigmoid(act_input)
elif act == "tanh":
act_func = tf.nn.tanh(act_input)
elif act == "relu":
act_func = tf.nn.relu(act_input)
elif act == "elu":
act_func = tf.nn.elu(act_input)
elif act == "identity":
act_func = tf.identity(act_input)
elif act == "softmax":
act_func = tf.nn.softmax(act_input)
elif act == "selu":
act_func = tf.nn.selu(act_input)
else:
raise NotImplementedError("ERROR")
return act_func
def get_data_format(data_format):
if data_format == "UIRT":
columns = ["user", "item", "rating", "time"]
elif data_format == "UIR":
columns = ["user", "item", "rating"]
elif data_format == "UIT":
columns = ["user", "item", "time"]
elif data_format == "UI":
columns = ["user", "item"]
else:
raise ValueError("please choose a correct data format. ")
return columns
def csr_to_user_dict(train_matrix):
"""convert a scipy.sparse.csr_matrix to a dict,
where the key is row number, and value is the
non-empty index in each row.
"""
train_dict = {}
for idx, value in enumerate(train_matrix):
if any(value.indices):
train_dict[idx] = value.indices.copy().tolist()
return train_dict
def csr_to_user_dict_bytime(time_matrix,train_matrix):
train_dict = {}
time_matrix = time_matrix
user_pos_items = csr_to_user_dict(train_matrix)
for u, items in user_pos_items.items():
sorted_items = sorted(items, key=lambda x: time_matrix[u,x])
train_dict[u] =
|
np.array(sorted_items, dtype=np.int32)
|
numpy.array
|
from pypet import Environment, Parameter, cartesian_product, progressbar, Parameter
import numpy as np
import csv
import os
import copy
import pickle
import logging
from datetime import date
import time
def add_parameters(traj):
"""
add parameters to the trajectory with descriptions and default values
Parameters:
traj: pypet.trajectory.Trajectory
trajectory container, which manages the parameters
Returns:
None
Dependencies:
from pypet import Parameter
import numpy as np
"""
traj.par.N_pop = Parameter('N_pop', 10**5, 'population size')
traj.par.N_site = Parameter('N_site', 20, 'sequence length')
traj.par.N_state = Parameter('N_state', 2, 'number of states per site')
traj.par.mu = Parameter('mu', 10**(-4), 'mutation prob. per site per time step')
traj.par.sigma_h = Parameter('sigma_h', 1, 'host fitness coefficient')
traj.par.D0 = Parameter('D0', 5, 'cross-immunity distance')
traj.par.h_0 = Parameter('h_0', -7, 'typical single mutation fitness cost')
traj.par.J_0 = Parameter('J_0', 0, 'typical mutation coupling coefficient')
traj.par.hJ_coeffs = Parameter('hJ_coeffs', 'p24',
'fitness coefficients')
# traj.par.seed = Parameter('seed', 123456, 'RNG seed')
# randomly choose rng seed and save it as parameter
seed = np.random.randint(10**6)
traj.par.seed = Parameter('seed', seed, 'RNG seed')
traj.par.N_simu = Parameter('N_simu', 200, 'number of time steps to simulate')
def fitness_coeff_constant(N_site,N_state,h_0,J_0):
"""
creating the mutational fitness coefficients for the simulated sequences
in the case of constant fields and constant couplings
Parameters:
N_site: int
sequence length
N_state: int
number of states per site
h_0: int or float
single-mutation fitness cost
J_0: int or float
fitness coupling coefficient for double mutations
Returns:
h_list: numpy.ndarray
mutational fitness change for mutation to each mutated state at each site
J_list: numpy.ndarray
added fitness change due to couplings of
two specific mutations to each state at each site
Dependencies:
import numpy as np
"""
numparam_J=int(N_site*(N_site-1)/2)
J_list=np.ones((numparam_J, N_state-1, N_state-1))*J_0
h_list=np.ones((N_site, N_state-1))*h_0
return h_list, J_list
def fitness_coeff_p24(N_site, N_state, filepath='C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape/NewApproachFromMarch2021/'
'InfluenzaFitnessInference/code/notebooks/fitnessinference/p24-B-S0.90-Ising-reduced-out-learn.j', seed=12345, h_min=-9., h_max=-0.5, J_min=-2., J_max=3.):
"""
creating the mutational fitness coefficients for the simulated sequences in the case
of fitness coeffs sampled from coefficients inferred for the HIV protein p24
Parameters:
N_site: int
sequence length (<=105)
N_state: int
number of states per site
filepath (optional): str
filepath to a .j file that was created by the ACE inference of
p24 fitness coefficients
seed (optional): int
seed for random sanpling from the given coefficients
h_min, h_max, J_min, J_max (optional): float
maximum and minimum mutational fitness coefficients
(fixed for various sequence lengths)
Returns:
h_list: numpy.ndarray
mutational fitness change for mutation to each mutated state at each site
J_list: numpy.ndarray
added fitness change due to couplings of
a mutation to a specific state at any site i
with a mutation to a specific state at any other site j
Dependencies:
import os
import numpy as np
import csv
"""
filepath = os.path.normpath(filepath)
if not os.path.exists(filepath):
filepath = os.path.join(os.getcwd(), 'code', 'notebooks', 'fitnessinference', 'p24-B-S0.90-Ising-reduced-out-learn.j')
# np.random.seed(seed)
# get coefficients from file
with open(filepath) as f:
reader = csv.reader(f, delimiter = '\t')
param_list = list(reader)
# calculate sequence length from the coeff data
seq_length = int((np.sqrt(1 + 8 * len(param_list)) - 1) / 2)
# separate h and J list
h_list = [[float(param_list[i][j]) for j in range(len(param_list[i]))]
for i in range(seq_length)]
J_list = [[float(param_list[i][j]) for j in range(len(param_list[i]))]
for i in range(seq_length, len(param_list))]
# calculate matrix from J_list
k=0
J_mat=[[[] for j in range(seq_length)] for i in range(seq_length)]
for i in range(seq_length):
for j in range(i):
J_mat[i][j]=J_list[k]
J_mat[j][i]=J_list[k]
k+=1
# reduce J and h lists to sequence of length N_site
J_list_red = []
for i in range(N_site):
for j in range(i):
J_list_red.append(J_mat[i][j])
h_list_red = h_list[:N_site]
# sample from h and J parameters to get coefficient lists for only N_state states at each site
J_list_final = np.array([np.random.choice(J_list_red[i], size=(N_state-1, N_state-1))
for i in range(len(J_list_red))])
h_list_final = np.array([np.random.choice(h_list_red[i], size=N_state-1)
for i in range(len(h_list_red))])
# # replace max and min of coefficients by specific value, comment out if no modification to sampled coefficients
# J_list_final = np.where(J_list_final==np.max(J_list_final), J_max, J_list_final)
# J_list_final = np.where(J_list_final==np.min(J_list_final), J_min, J_list_final)
# h_list_final = np.where(h_list_final==np.max(h_list_final), h_max, h_list_final)
# h_list_final = np.where(h_list_final==np.min(h_list_final), h_min, h_list_final)
return h_list_final, J_list_final
def mutate_seqs(seqs, N_state, mu):
"""
mutate list of sequences according to given mutation probability and number of states,
Parameters:
seqs: numpy.ndarray
list of sequences
N_state: int
number of states per site
mu: float
probability to mutate from the current state to any one of the other states <<1
Returns:
seqs_m: numpy.ndarray
list of mutated sequences
Dependencies:
import numpy as np
"""
# first choose randomly how far in the state space each site is shifted
shift_ind = np.random.choice(N_state, size=seqs.shape, replace=True, p=[1-mu*(N_state-1)]+[mu]*(N_state-1))
# from this calculate the new state index (which can be negative)
new_ind = np.array(- N_state + shift_ind + seqs, dtype=int)
# set the new state
state_list = np.arange(N_state)
seqs_m = state_list[new_ind]
return seqs_m
def fitness_int(seq, N_state, h_model, J_model, statevec_list):
"""
calculate the intrinsic fitness for one sequence
Parameters:
seq: numpy.ndarray
sequence
N_state: int
number of states per site
h_model: numpy.ndarray
mutational fitness change for mutation to each mutated state at each site
J_model: numpy.ndarray
added fitness change due to couplings of
two specific mutations to each state at each site
statevec_list: numpy.ndarray
list of vectors that represent the states of a sequence site
Returns:
f_int: float
intrinsic fitness for the sequence
Dependencies:
import numpy as np
"""
f_int = 0
k = 0
for i in range(len(seq)): # for each state 1
# state at site i
s1 = statevec_list[seq[i]]
# fitness contribution from state at i
f_int += np.dot(s1, h_model[i])
for j in range(i): # for each state 2<state 1
# state at other site j
s2 = statevec_list[seq[j]]
# fitness contribution from coupling of state at i with state at j
f_int += np.matmul(np.matmul(s1, J_model[k]), s2.T)
k += 1
return f_int
def fitness_int_list(strain_current, N_state, h_model, J_model):
"""
calculate the intrinsic fitness for each current strain
Parameters:
strain_current: numpy.ndarray
list of current strains (=unique sequences)
N_state: int
number of states per site
h_model: numpy.ndarray
mutational fitness change for mutation to each mutated state at each site
J_model: numpy.ndarray
added fitness change due to couplings of
two specific mutations to each state at each site
Returns:
f_int_list: numpy.ndarray
intrinsic fitness for each strain
Dependencies:
import numpy as np
"""
statevec_list=np.array([[int(i==j) for j in range(1,N_state)]
for i in range(N_state)])
f_int_list = np.array([fitness_int(seq, N_state, h_model, J_model, statevec_list)
for seq in strain_current])
return f_int_list
def fitness_host(seq, st_yearly, st_freq_yearly, sigma_h, D0):
"""
calculate the host population-dependent fitness contribution for one sequence
at the current time
Parameters:
seq: numpy.ndarray
sequence
st_yearly: list
list of strains for each time step up to t-1
st_freq_yearly: list
list of strain frequencies for each time step up to t-1
sigma_h: float
coefficient modulating f_host
D0: float
cross-immunity distance
Returns:
f_host: float
host-dependent fitness for the sequence at the current time
Dependencies:
import numpy as np
"""
f_host_noSig = 0 # initialize host fitness without sigma_h factor
for t in range(len(st_yearly)): # iterate through all prev. time steps
strains = st_yearly[t]
# create array of same dimension as strain list at t
seq_arr = np.repeat([seq], len(strains), axis=0)
# calculate mutational distances between seq_arr and strains
mut_dist = np.sum(seq_arr!=strains, axis=1)
f_host_noSig += -np.dot(st_freq_yearly[t], np.exp(-mut_dist/D0))
f_host = sigma_h*f_host_noSig
return f_host
def fitness_host_list(strain_current, st_yearly, st_freq_yearly, sigma_h, D0):
"""
calculate the host population-dependent fitness contribution for all strains
at the current time
Parameters:
strain_current: numpy.ndarray
list of current strains (=unique sequences)
st_yearly: list
list of strains for each time step up to t-1
st_freq_yearly: list
list of strain frequencies for each time step up to t-1
sigma_h: float
coefficient modulating f_host
D0: float
cross-immunity distance
Returns:
f_host_list: numpy.ndarray
host-dependent fitness for each strain at the current time
Dependencies:
import numpy as np
"""
f_host_list = np.array([fitness_host(seq, st_yearly, st_freq_yearly, sigma_h, D0)
for seq in strain_current])
return f_host_list
def flu_antigen_simulation(traj, filepath, varied_simu_params):
"""
simulate the evolution of flu-like antigenic sequences
Parameters:
traj: pypet.trajectory.Trajectory
trajectory container, which manages the parameters
filepath: str
path to folder
where results should be stored
varied_simu_params: list
list of names of parameters that are varied
in the parameter sweep
Results:
strain_yearly: list
[[list of unique sequences (strains)]
for each time step]
with strains from most to least prevalent at each time
strain_frequency_yearly: list
[[list of frequencies of strains]
for each time step]
in same order as in strain_yearly
pickled .data files with intermediate simulation results (uodated at each simulated time step)
Returns:
run_name: str
name of file without path or extension
in which the results of the single run are saved
Dependencies:
other functions in this module
import numpy as np
from pypet import Environment, Parameter
import os
import pickle
import copy
"""
# initializations:
# set RNG seed:
np.random.seed(traj.seed)
# current sequences, numpy array, initialized with all zeros
seqs =
|
np.zeros((traj.N_pop, traj.N_site))
|
numpy.zeros
|
#! /usr/bin/env python
"""
Forward model matched filter relying on either KLIP (Soummer et al. 2012;
Pueyo 2016) and LOCI (Lafreniere et al. 2007b) for the PSF reference
approximation. The original concept of matched filter applied to KLIP has been
first proposed in Ruffio et al. (2019) and then adapted in Dahlqvist et al.
(2021) to use the LOCI framework. For both PSF-subtraction techniques, a
forward model of the PSF is computed for each pixel contained in the field of
view and each frame to account for the over-subtraction and self-subtraction
of potential planetary signal due to the reference PSF subtraction. The
obtained model is then compared to the pixels intensities within each frame of
the residual cube. The SNR associated to each pixel contained in the field of
view, as well as its estimated contrast is thn obtained via a
Gaussian maximum likelihood approach.
"""
__author__ = '<NAME>'
__all__ = ['fmmf']
from multiprocessing import cpu_count
import numpy as np
import numpy.linalg as la
from skimage.draw import disk
from ..var import get_annulus_segments, frame_center
from ..preproc import frame_crop, cube_crop_frames, cube_derotate
from ..config.utils_conf import pool_map, iterable
from ..config import time_ini, timing
from ..fm import cube_inject_companions
from ..preproc.derotation import _find_indices_adi
def fmmf(cube, pa, psf, fwhm, min_r=None, max_r=None, model='KLIP', var='FR',
param={'ncomp': 20, 'tolerance': 5e-3, 'delta_rot': 0.5}, crop=5,
imlib='vip-fft', interpolation='lanczos4', nproc=1, verbose=True):
"""
Forward model matched filter generating SNR map and contrast map, using
either KLIP or LOCI as PSF subtraction techniques.
Parameters
----------
cube : numpy ndarray, 3d
Input cube (ADI sequences), Dim 1 = temporal axis, Dim 2-3 =
spatial axis
pa : numpy ndarray, 1d
Parallactic angles for each frame of the ADI sequences.
psf : numpy ndarray 2d
2d array with the normalized PSF template, with an odd shape.
The PSF image must be centered wrt to the array! Therefore, it is
recommended to run the function ``normalize_psf`` to generate a
centered and flux-normalized PSF template.
fwhm: int
Full width at half maximum for the instrument PSF
min_r : int,optional
Center radius of the first annulus considered in the FMMF detection
map estimation. The radius should be larger than half
the value of the 'crop' parameter . Default is None which
corresponds to one FWHM.
max_r : int
Center radius of the last annulus considered in the FMMF detection
map estimation. The radius should be smaller or equal to half the
size of the image minus half the value of the 'crop' parameter.
Default is None which corresponds to half the size of the image
minus half the value of the 'crop' parameter.
model: string, optional
Selected PSF-subtraction technique for the computation of the FMMF
detection map. FMMF work either with KLIP or LOCI. Default is 'KLIP'.
var: str, optional
Model used for the residual noise variance estimation used in the
matched filtering (maximum likelihood estimation of the flux and SNR).
Three different approaches are proposed: 'FR', 'FM', and 'TE'.
* 'FR': consider the pixels in the selected annulus with a width equal
to asize but separately for every frame.
* 'FM': consider the pixels in the selected annulus with a width
equal to asize but separately for every frame. Apply a mask one FWHM
on the selected pixel and its surrounding.
* 'TE': rely on the method developped in PACO to estimate the
residual noise variance (take the pixels in a region of one FWHM
arround the selected pixel, considering every frame in the
derotated cube of residuals except for the selected frame)
param: dict, optional
Dictionnary regrouping the parameters used by the KLIP (ncomp and
delta_rot) or LOCI (tolerance and delta_rot) PSF-subtraction
technique.
* ncomp : int, optional. Number of components used for the low-rank
approximation of the speckle field. Default is 20.
* tolerance: float, optional. Tolerance level for the approximation of
the speckle field via a linear combination of the reference images in
the LOCI algorithm. Default is 5e-3.
* delta_rot : float, optional. Factor for tunning the parallactic angle
threshold, expressed in FWHM. Default is 0.5 (excludes 0.5xFHWM on each
side of the considered frame).
crop: int, optional
Part of the PSF template considered in the estimation of the FMMF
detection map. Default is 5.
imlib : str, optional
Parameter used for the derotation of the residual cube. See the
documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
Parameter used for the derotation of the residual cube. See the
documentation of the ``vip_hci.preproc.frame_rotate`` function.
nproc : int or None, optional
Number of processes for parallel computing. By default ('nproc=1')
the algorithm works in single-process mode. If set to None, nproc
is automatically set to half the number of available CPUs.
verbose: bool, optional
If True provide a message each time an annulus has been treated.
Default True.
Returns
-------
flux_matrix : 2d ndarray
Maximum likelihood estimate of the contrast for each pixel in the field
of view
snr_matrix : 2d ndarray
Signal to noise ratio map (defined as the estimated contrast divided by
the estimated standard deviation of the contrast).
"""
start_time = time_ini(verbose)
if crop >= 2*round(fwhm)+1:
raise ValueError("Maximum cropsize should be lower or equal to two" +
" FWHM,please change accordingly the value of 'crop'")
if min_r is None:
min_r = int(round(fwhm))
if max_r is None:
max_r = cube.shape[-1]//2-(crop//2+1)
if nproc is None:
nproc = cpu_count()//2
res_full = pool_map(nproc, _snr_contrast_esti, iterable(range(min_r, max_r)),
cube, pa, psf, fwhm, model, var, param, crop, imlib,
interpolation, verbose)
flux_matrix = np.zeros((cube.shape[1], cube.shape[2]))
snr_matrix = np.zeros((cube.shape[1], cube.shape[2]))
for res_temp in res_full:
indices = get_annulus_segments(cube[0], res_temp[2], 1)
flux_matrix[indices[0][0], indices[0][1]] = res_temp[0]
snr_matrix[indices[0][0], indices[0][1]] = res_temp[1]
if verbose:
timing(start_time)
return flux_matrix, snr_matrix
def _snr_contrast_esti(ann_center, cube, pa, psf, fwhm, model, var, param, crop,
imlib, interpolation, verbose):
"""
Computation of the SNR and contrast associated to the pixels contained
in a given annulus via the foward model matched filter
"""
n, y, x = cube.shape
evals_matrix = []
evecs_matrix = []
KL_basis_matrix = []
refs_mean_sub_matrix = []
sci_mean_sub_matrix = []
resicube_klip = None
ind_ref_list = None
coef_list = None
ncomp = param['ncomp']
tolerance = param['tolerance']
delta_rot = param['delta_rot']
# Computation of the reference PSF, and the matrices
# required for the computation of the PSF forward models
pa_threshold = np.rad2deg(
2 * np.arctan(delta_rot * fwhm / (2 * (ann_center))))
mid_range = np.abs(np.amax(pa) - np.amin(pa)) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
pa_threshold = float(mid_range - mid_range * 0.1)
if model == 'KLIP':
resicube_klip = np.zeros_like(cube)
indices = get_annulus_segments(cube[0], ann_center-int(round(fwhm)/2),
int(round(fwhm)), 1)
for k in range(0, cube.shape[0]):
res_temp = KLIP_patch(k, cube[:, indices[0][0], indices[0][1]],
ncomp, pa, int(round(fwhm)), pa_threshold,
ann_center)
evals_temp = res_temp[0]
evecs_temp = res_temp[1]
KL_basis_temp = res_temp[2]
sub_img_rows_temp = res_temp[3]
refs_mean_sub_temp = res_temp[4]
sci_mean_sub_temp = res_temp[5]
resicube_klip[k, indices[0][0], indices[0][1]] = sub_img_rows_temp
evals_matrix.append(evals_temp)
evecs_matrix.append(evecs_temp)
KL_basis_matrix.append(KL_basis_temp)
refs_mean_sub_matrix.append(refs_mean_sub_temp)
sci_mean_sub_matrix.append(sci_mean_sub_temp)
mcube = cube_derotate(resicube_klip, pa, imlib=imlib,
interpolation=interpolation)
elif model == 'LOCI':
resicube, ind_ref_list, coef_list = LOCI_FM(cube, psf, ann_center, pa,
int(round(fwhm)), fwhm,
tolerance, delta_rot,
pa_threshold)
mcube = cube_derotate(resicube, pa, imlib=imlib,
interpolation=interpolation)
ceny, cenx = frame_center(cube[0])
indices = get_annulus_segments(mcube[0], ann_center, 1, 1)
indicesy = indices[0][0]
indicesx = indices[0][1]
flux_esti = np.zeros_like(indicesy)
prob_esti = np.zeros_like(indicesy)
var_f = _var_esti(mcube, pa, var, crop, ann_center)
for i in range(0, len(indicesy)):
psfm_temp = None
poscenty = indicesy[i]
poscentx = indicesx[i]
indices = get_annulus_segments(cube[0], ann_center-int(round(fwhm)/2),
int(round(fwhm)), 1)
an_dist = np.sqrt((poscenty-ceny)**2 + (poscentx-cenx)**2)
theta = np.degrees(np.arctan2(poscenty-ceny, poscentx-cenx))
model_matrix = cube_inject_companions(np.zeros_like(cube), psf, pa,
flevel=1, rad_dists=an_dist,
theta=theta, n_branches=1,
verbose=False, imlib=imlib,
interpolation=interpolation)
# PSF forward model computation for KLIP
if model == 'KLIP':
psf_map = np.zeros_like(model_matrix)
for b in range(0, n):
psf_map_temp = _perturb(b, model_matrix[:, indices[0][0],
indices[0][1]],
ncomp, evals_matrix, evecs_matrix,
KL_basis_matrix,
sci_mean_sub_matrix,
refs_mean_sub_matrix, pa, fwhm,
pa_threshold, ann_center)
psf_map[b, indices[0][0], indices[0][1]] = psf_map_temp
psf_map[b, indices[0][0], indices[0]
[1]] -= np.mean(psf_map_temp)
psf_map_der = cube_derotate(psf_map, pa, imlib=imlib,
interpolation=interpolation)
psfm_temp = cube_crop_frames(psf_map_der, int(2*round(fwhm)+1),
xy=(poscentx, poscenty), verbose=False)
# PSF forward model computation for LOCI
if model == 'LOCI':
values_fc = model_matrix[:, indices[0][0], indices[0][1]]
cube_res_fc = np.zeros_like(model_matrix)
matrix_res_fc = np.zeros((values_fc.shape[0],
indices[0][0].shape[0]))
for e in range(values_fc.shape[0]):
recon_fc = np.dot(coef_list[e], values_fc[ind_ref_list[e]])
matrix_res_fc[e] = values_fc[e] - recon_fc
cube_res_fc[:, indices[0][0], indices[0][1]] = matrix_res_fc
cube_der_fc = cube_derotate(cube_res_fc-np.mean(cube_res_fc),
pa, imlib=imlib,
interpolation=interpolation)
psfm_temp = cube_crop_frames(cube_der_fc, int(2*round(fwhm)+1),
xy=(poscentx, poscenty), verbose=False)
num = []
denom = []
# Matched Filter
for j in range(n):
if var == 'FR':
svar = var_f[j]
elif var == 'FM':
svar = var_f[i, j]
elif var == 'TE':
svar = var_f[i, j]
if psfm_temp.shape[1] == crop:
psfm = psfm_temp[j]
else:
psfm = frame_crop(psfm_temp[j],
crop, cenxy=[int(psfm_temp.shape[-1]/2),
int(psfm_temp.shape[-1]/2)],
verbose=False)
num.append(np.multiply(frame_crop(mcube[j], crop,
cenxy=[poscentx, poscenty],
verbose=False), psfm).sum()/svar)
denom.append(np.multiply(psfm, psfm).sum()/svar)
flux_esti[i] = sum(num)/np.sqrt(sum(denom))
prob_esti[i] = sum(num)/sum(denom)
if verbose == True:
print("Radial distance "+"{}".format(ann_center)+" done!")
return prob_esti, flux_esti, ann_center
def _var_esti(mcube, pa, var, crop, ann_center):
"""
Computation of the residual noise variance
"""
n, y, x = mcube.shape
if var == 'FR':
var_f = np.zeros(n)
indices = get_annulus_segments(
mcube[0], ann_center-int(crop/2), crop, 1)
poscentx = indices[0][1]
poscenty = indices[0][0]
for a in range(n):
var_f[a] = np.var(mcube[a, poscenty, poscentx])
elif var == 'FM':
indices = get_annulus_segments(mcube[0], ann_center, 1, 1)
indicesy = indices[0][0]
indicesx = indices[0][1]
var_f = np.zeros((len(indicesy), n))
indices = get_annulus_segments(
mcube[0], ann_center-int(crop/2), crop, 1)
for a in range(len(indicesy)):
indc = disk((indicesy[a], indicesx[a]), 3)
positionx = []
positiony = []
for k in range(0, len(indices[0][1])):
cond1 = set(np.where(indices[0][1][k] == indc[1])[0])
cond2 = set(np.where(indices[0][0][k] == indc[0])[0])
if len(cond1 & cond2) == 0:
positionx.append(indices[0][1][k])
positiony.append(indices[0][0][k])
for b in range((n)):
var_f[a, b] = np.var(mcube[b, positiony, positionx])
elif var == 'TE':
indices = get_annulus_segments(mcube[0], ann_center, 1, 1)
indicesy = indices[0][0]
indicesx = indices[0][1]
var_f = np.zeros((len(indicesy), n))
mcube_derot = cube_derotate(mcube, -pa)
for a in range(0, len(indicesy)):
radist = np.sqrt((indicesx[a]-int(x/2)) **
2+(indicesy[a]-int(y/2))**2)
if (indicesy[a]-int(y/2)) >= 0:
ang_s = np.arccos((indicesx[a]-int(x/2))/radist)/np.pi*180
else:
ang_s = 360-np.arccos((indicesx[a]-int(x/2))/radist)/np.pi*180
for b in range(n):
twopi = 2*np.pi
sigposy = int(y/2 + np.sin((ang_s-pa[b])/360*twopi)*radist)
sigposx = int(x/2 + np.cos((ang_s-pa[b])/360*twopi)*radist)
y0 = int(sigposy - int(crop/2))
y1 = int(sigposy + int(crop/2)+1) # +1 cause endpoint is
# excluded when slicing
x0 = int(sigposx - int(crop/2))
x1 = int(sigposx + int(crop/2)+1)
mask = np.ones(mcube_derot.shape[0], dtype=bool)
mask[b] = False
mcube_sel = mcube_derot[mask, y0:y1, x0:x1]
var_f[a, b] = np.var(np.asarray(mcube_sel))
return var_f
def _perturb(frame, model_matrix, numbasis, evals_matrix, evecs_matrix,
KL_basis_matrix, sci_mean_sub_matrix, refs_mean_sub_matrix,
angle_list, fwhm, pa_threshold, ann_center):
"""
Function allowing the estimation of the PSF forward model when relying on
KLIP for the computation of the speckle field. The code is based on the
PyKLIP library considering only the ADI case with a singlle number of
principal components considered. For more details about the code, consider
the PyKLIP library or the original articles (Pueyo, L. 2016, ApJ, 824, 117
or <NAME>., <NAME>., <NAME>., & Pueyo, L. 2017, ApJ, 842)
"""
# Selection of the reference library based on the given parallactic angle
# threshold
if pa_threshold != 0:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
models_ref = model_matrix[indices_left]
else:
models_ref = model_matrix
# Computation of the self-subtraction and over-subtraction for the current
# frame
model_sci = model_matrix[frame]
KL_basis = KL_basis_matrix[frame]
sci_mean_sub = sci_mean_sub_matrix[frame]
refs_mean_sub = refs_mean_sub_matrix[frame]
evals = evals_matrix[frame]
evecs = evecs_matrix[frame]
max_basis = KL_basis.shape[0]
N_pix = KL_basis.shape[1]
models_msub = models_ref - np.nanmean(models_ref, axis=1)[:, None]
models_msub[np.where(np.isnan(models_msub))] = 0
model_sci_msub = model_sci - np.nanmean(model_sci)
model_sci_msub[np.where(np.isnan(model_sci_msub))] = 0
model_sci_msub_rows = np.reshape(model_sci_msub, (1, N_pix))
sci_mean_sub_rows = np.reshape(sci_mean_sub, (1, N_pix))
delta_KL = np.zeros([max_basis, N_pix])
proj_models_T = models_msub.dot(refs_mean_sub.transpose())
for k in range(max_basis):
Zk = np.reshape(KL_basis[k, :], (1, KL_basis[k, :].size))
Vk = (evecs[:, k])[:, None]
diagVk_T = (Vk.T).dot(proj_models_T)
proj_models_Vk = proj_models_T.dot(Vk)
fac = -(1/(2*np.sqrt(evals[k])))
term1 = (diagVk_T.dot(Vk) + ((Vk.T).dot(proj_models_Vk))).dot(Zk)
term2 = (Vk.T).dot(models_msub)
DeltaZk = fac*term1 + term2
for j in range(k):
Zj = KL_basis[j, :][None, :]
Vj = evecs[:, j][:, None]
fac = np.sqrt(evals[j])/(evals[k]-evals[j])
t1 = diagVk_T.dot(Vj)
t2 = (Vj.T).dot(proj_models_Vk)
DeltaZk += fac*(t1 + t2).dot(Zj)
for j in range(k+1, max_basis):
Zj = KL_basis[j, :][None, :]
Vj = evecs[:, j][:, None]
fac = np.sqrt(evals[j])/(evals[k]-evals[j])
t1 = diagVk_T.dot(Vj)
t2 = (Vj.T).dot(proj_models_Vk)
DeltaZk += fac*(t1 + t2).dot(Zj)
delta_KL[k] = DeltaZk/
|
np.sqrt(evals[k])
|
numpy.sqrt
|
import unittest
import numpy as np
from nptest import nptest
class HistogramTests(unittest.TestCase):
#region bincount
def test_bincount_1(self):
x = np.arange(5)
a = np.bincount(x)
print(a)
x = np.array([0, 1, 1, 3, 2, 1, 7])
a = np.bincount(x)
print(a)
x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
a = np.bincount(x)
print(a)
print(a.size == np.amax(x)+1)
def test_bincount_2(self):
x = np.arange(5, dtype=np.int64)
a = np.bincount(x)
print(a)
x = np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.int16)
a = np.bincount(x)
print(a)
x = np.array([0, 1, 1, 3, 2, 1, 7, 23], dtype=np.int8)
a = np.bincount(x)
print(a)
print(a.size == np.amax(x)+1)
def test_bincount_3(self):
w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
x = np.arange(6, dtype=np.int64)
a = np.bincount(x, weights=w)
print(a)
x = np.array([0, 1, 3, 2, 1, 7], dtype=np.int16)
a = np.bincount(x,weights=w)
print(a)
x = np.array([0, 1, 3, 2, 1, 7], dtype=np.int8)
a = np.bincount(x, weights=w)
print(a)
def test_bincount_4(self):
x =
|
np.arange(5, dtype=np.int64)
|
numpy.arange
|
import unittest
from inferelator.single_cell_workflow import SingleCellWorkflow
from inferelator.preprocessing import single_cell, metadata_parser
from inferelator.tests.artifacts.test_stubs import TestDataSingleCellLike, create_puppet_workflow, TEST_DATA
from inferelator import default
from inferelator.utils import InferelatorData
import numpy as np
import pandas as pd
import os
my_dir = os.path.dirname(__file__)
test_count_data = pd.DataFrame([[0, 0, 0], [10, 0, 10], [4, 0, 5], [0, 0, 0]])
test_meta_data = metadata_parser.MetadataParserBranching.create_default_meta_data(test_count_data.index)
class SingleCellTestCase(unittest.TestCase):
def setUp(self):
self.data = TEST_DATA.copy()
self.prior = TestDataSingleCellLike.priors_data
self.gold_standard = self.prior.copy()
self.tf_names = TestDataSingleCellLike.tf_names
self.workflow = create_puppet_workflow(base_class=SingleCellWorkflow)(self.data, self.prior, self.gold_standard)
self.gene_data = TestDataSingleCellLike.gene_metadata
self.gene_list_index = TestDataSingleCellLike.gene_list_index
class SingleCellPreprocessTest(SingleCellTestCase):
def test_count_filter(self):
expr_filtered_1 = self.data.copy()
single_cell.filter_genes_for_count(expr_filtered_1)
self.assertEqual(expr_filtered_1.gene_names.tolist(), ["gene1", "gene2", "gene4", "gene6"])
expr_filtered_2 = self.data.copy()
single_cell.filter_genes_for_count(expr_filtered_2, count_minimum=4)
self.assertEqual(expr_filtered_2.gene_names.tolist(), ["gene1", "gene2", "gene4"])
expr_filtered_3 = self.data.copy()
single_cell.filter_genes_for_count(expr_filtered_3, count_minimum=20)
self.assertEqual(expr_filtered_3.gene_names.tolist(), ["gene2"])
with self.assertRaises(ValueError):
self.data.subtract(3)
single_cell.filter_genes_for_count(self.data, count_minimum=1)
def test_library_to_one_norm(self):
single_cell.normalize_expression_to_one(self.data)
np.testing.assert_almost_equal(self.data.expression_data.sum(axis=1).tolist(), [1] * 10)
def test_median_scaling_norm(self):
data = self.data.copy()
single_cell.normalize_medians_for_batch(data, batch_factor_column="Condition")
data.meta_data['umi'] = data.expression_data.sum(axis=1)
np.testing.assert_almost_equal(data.meta_data.groupby("Condition")['umi'].median().tolist(), [45, 45, 45])
data = self.data.copy()
single_cell.normalize_medians_for_batch(data, batch_factor_column="Genotype")
data.meta_data['umi'] = data.expression_data.sum(axis=1)
np.testing.assert_almost_equal(data.meta_data.groupby("Genotype")['umi'].median().tolist(), [45])
def test_size_factor_scaling_norm(self):
single_cell.normalize_sizes_within_batch(self.data, batch_factor_column="Condition")
test_umi = pd.Series({"A": 45.0, "B": 36.0, "C": 58.5})
meta_data1 = self.data.meta_data
meta_data1['umi'] = np.sum(self.data.expression_data, axis=1)
for group in meta_data1['Condition'].unique():
idx = meta_data1['Condition'] == group
np.testing.assert_almost_equal(meta_data1.loc[idx, 'umi'].tolist(), [test_umi[group]] * idx.sum(),
decimal=4)
def test_log_scaling(self):
data = self.data.copy()
single_cell.log10_data(data)
np.testing.assert_almost_equal(np.log10(self.data.expression_data + 1), data.expression_data)
data = self.data.copy()
single_cell.log2_data(data)
np.testing.assert_almost_equal(np.log2(self.data.expression_data + 1), data.expression_data)
data = self.data.copy()
single_cell.ln_data(data)
np.testing.assert_almost_equal(np.log(self.data.expression_data + 1), data.expression_data)
data = self.data.copy()
single_cell.tf_sqrt_data(data)
np.testing.assert_almost_equal(
|
np.sqrt(self.data.expression_data + 1)
|
numpy.sqrt
|
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGetitem:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH#1211; smoketest unrelated to the rest of this test
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_getitem_list_of_labels_categoricalindex_cols(self):
# GH#16115
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
expected = DataFrame(
[[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
)
dummies = get_dummies(cats)
result = dummies[list(dummies.columns)]
tm.assert_frame_equal(result, expected)
def test_getitem_sparse_column_return_type_and_dtype(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = DataFrame({"A": data})
expected = Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
# Also check iloc and loc while we're here
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
class TestGetitemListLike:
def test_getitem_list_missing_key(self):
# GH#13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
df.columns = ["x", "x", "z"]
# Check that we get the correct value in the KeyError
with pytest.raises(KeyError, match=r"\['y'\] not in index"):
df[["x", "y", "z"]]
class TestGetitemCallable:
def test_getitem_callable(self, float_frame):
# GH#12533
result = float_frame[lambda x: "A"]
expected = float_frame.loc[:, "A"]
tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_columns_one_level(self):
# GH#29749
df = DataFrame([[1, 2]], columns=[["a", "b"]])
expected = DataFrame([1], columns=[["a"]])
result = df["a"]
tm.assert_frame_equal(result, expected)
result = df.loc[:, "a"]
tm.assert_frame_equal(result, expected)
class TestGetitemBooleanMask:
def test_getitem_bool_mask_categorical_index(self):
df3 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=True),
name="B",
),
)
df4 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=False),
name="B",
),
)
result = df3[df3.index == "a"]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == "a"]
expected = df4.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name='B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name='B')
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
df4[df4.index < 2]
with pytest.raises(TypeError, match=msg):
df4[df4.index > 1]
@pytest.mark.parametrize(
"data1,data2,expected_data",
(
(
[[1, 2], [3, 4]],
[[0.5, 6], [7, 8]],
[[np.nan, 3.0], [np.nan, 4.0], [np.nan, 7.0], [6.0, 8.0]],
),
(
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[np.nan, 3.0], [np.nan, 4.0], [5, 7], [6, 8]],
),
),
)
def test_getitem_bool_mask_duplicate_columns_mixed_dtypes(
self,
data1,
data2,
expected_data,
):
# GH#31954
df1 = DataFrame(np.array(data1))
df2 = DataFrame(np.array(data2))
df = concat([df1, df2], axis=1)
result = df[df > 2]
exdict = {i: np.array(col) for i, col in enumerate(expected_data)}
expected = DataFrame(exdict).rename(columns={2: 0, 3: 1})
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_dup_cols(self):
dups = ["A", "A", "C", "D"]
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
return df
def test_getitem_boolean_frame_unaligned_with_duplicate_columns(self, df_dup_cols):
# `df.A > 6` is a DataFrame with a different shape from df
# boolean with the duplicate raises
df = df_dup_cols
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df[df.A > 6]
def test_getitem_boolean_series_with_duplicate_columns(self, df_dup_cols):
# boolean indexing
# GH#4879
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df.C > 6]
expected.columns = df_dup_cols.columns
df = df_dup_cols
result = df[df.C > 6]
tm.assert_frame_equal(result, expected)
result.dtypes
str(result)
def test_getitem_boolean_frame_with_duplicate_columns(self, df_dup_cols):
# where
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
# `df > 6` is a DataFrame with the same shape+alignment as df
expected = df[df > 6]
expected.columns = df_dup_cols.columns
df = df_dup_cols
result = df[df > 6]
tm.assert_frame_equal(result, expected)
result.dtypes
str(result)
class TestGetitemSlice:
def test_getitem_slice_float64(self, frame_or_series):
values = np.arange(10.0, 50.0, 2)
index = Index(values)
start, end = values[[5, 15]]
data =
|
np.random.randn(20, 3)
|
numpy.random.randn
|
#%matplotlib inline
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
import netCDF4 as nc
from scipy.interpolate import interp1d
import matplotlib.cm as cm
from salishsea_tools import (nc_tools, gsw_calls, geo_tools, viz_tools)
import seabird
import cmocean as cmo
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LightSource
import matplotlib as mpl
print("The Modules were imported successfully")
bathy = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc')
mesh_mask = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/mesh_mask.nc')
mbathy = mesh_mask['mbathy'][0,...]
Z = bathy.variables['Bathymetry'][:]
y_wcvi_slice = np.arange(180,350)
x_wcvi_slice = np.arange(480,650)
zlevels = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc').variables['deptht']
lon = bathy['nav_lon'][...]
lat = bathy['nav_lat'][...]
lon_wcvi = lon[180:350,480:650]
lat_wcvi = lat[180:350,480:650]
NEP_aug = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/NEP36_T_S_Spice_aug_larger_offshore_rho_correct.nc')
sal_aug = NEP_aug.variables['vosaline']
temp_aug = NEP_aug.variables['votemper']
spic_aug = NEP_aug.variables['spiciness']
rho_aug = NEP_aug.variables['density']
zlevels = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc').variables['deptht']
NEP_jul = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/NEP36_T_S_Spice_july_larger_offshore_rho_correct.nc')
sal_jul = NEP_jul.variables['vosaline']
temp_jul = NEP_jul.variables['votemper']
spic_jul = NEP_jul.variables['spiciness']
rho_jul = NEP_jul.variables['density']
NEP_jun = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/NEP36_T_S_Spice_june_larger_offshore_rho_correct.nc')
sal_jun = NEP_jun.variables['vosaline']
temp_jun = NEP_jun.variables['votemper']
spic_jun = NEP_jun.variables['spiciness']
rho_jun = NEP_jun.variables['density']
# NEP_iso_jul = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/short_slice_NEP36_jul_along_isopycnal_larger_offshore.nc')
# spic_iso_jul = NEP_iso_jul.variables['spiciness']
# iso_t = NEP_iso_jul.variables['isot']
short_NEP_iso_jul = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/short_slice_NEP36_jul_along_isopycnal_larger_offshore_rho_correct.nc')
short_spic_iso_jul = short_NEP_iso_jul.variables['spiciness']
short_iso_t = short_NEP_iso_jul.variables['isot']
#short_NEP_iso_aug = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/short_NEP36_aug_along_isopycnal_larger_offshore_rho_correct.nc')
#short_spic_iso_aug = short_NEP_iso_aug.variables['spiciness']
#short_iso_t = short_NEP_iso_aug.variables['isot']
short_NEP_iso_jun = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/short_NEP36_june_along_isopycnal_larger_offshore_rho_correct.nc')
short_spic_iso_jun = short_NEP_iso_jun.variables['spiciness']
short_iso_t = short_NEP_iso_jun.variables['isot']
print("The Data has been extracted from the respective files")
def plot_iso_den(t, rho_0, month):
if month == 'June':
depth_rho_0 =
|
np.zeros_like(sal_jun[0,0,...])
|
numpy.zeros_like
|
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.sans-serif'] = ['STHeiti'] # 用来正常显示中文标签
# plt.rcParams['font.size'] = 14 # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
datasets = [("../data/n10-1682-X-small.npy", "../data/n10-1682-y-small.npy"),
("../data/n10-3127-X-small.npy", "../data/n10-3127-y-small.npy"),
("../data/n10-6797-X-small.npy", "../data/n10-6797-y-small.npy"),
("../data/n20-6174-X-small.npy", "../data/n20-6174-y-small.npy")]
dataset_n = 4
def four_cdfs():
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(6, 6))
flat_axes = [ax for ax_row in axes for ax in ax_row]
for i in range(dataset_n):
X = np.load(datasets[i][0])
y = np.load(datasets[i][1]).astype(np.float64)
y /= np.max(y)
flat_axes[i].plot(X, y)
flat_axes[i].set_title("数据集D%i" % (i+1))
axes[1][0].set_xlabel("搜索键")
axes[1][1].set_xlabel("搜索键")
axes[0][0].set_ylabel("累积分布函数")
axes[1][0].set_ylabel("累积分布函数")
fig.tight_layout()
fig.savefig("four-cdfs.pdf")
plt.close(fig)
def dispatch():
X = np.load("../data/dispatch.npy")
y = np.linspace(0, 1, len(X))
mean = 2000
fig, ax = plt.subplots(figsize=(4, 3))
ax.plot(X, y, label="实际分配情况")
ax.vlines(mean, 0, 1, label="理想均匀分配情况")
ax.set_xlabel("递归索引模型最后级机器学习模型训练集数据量")
ax.set_ylabel("对数正态分布下递归索引模型\n数据分配的累积分布函数")
ax.set_xlim(left=-100, right=20000)
ax.legend(loc=4)
fig.tight_layout()
fig.savefig("dispatch.pdf")
plt.close(fig)
def osm_cdfs():
X = np.load("../data/osm-sizes-X-small.npy")
y = np.load("../data/osm-sizes-y-small.npy").astype(np.float64)
y /= np.max(y)
fig, ax = plt.subplots(figsize=(4, 3))
ax.plot(X, y)
ax.set_xlabel("搜索键")
ax.set_ylabel("OSM数据集的累积分布函数")
fig.tight_layout()
fig.savefig("osm-cdf.pdf")
plt.close(fig)
def stretching():
hot_data_start = 0.4
hot_data_ratio = 0.3
hot_query_ratio = 0.7
fig, axes = plt.subplots(ncols=2, figsize=(6, 3))
X = np.load("../data/n10-1682-X-small.npy")
y = np.load("../data/n10-1682-y-small.npy").astype(np.float64)
y /= np.max(y)
axes[0].set_xlabel("搜索键")
axes[0].set_ylabel("累积分布函数")
axes[0].plot(X[:int(len(X) * hot_data_start)],
y[:int(len(X) * hot_data_start)],
"C0")
axes[0].plot(X[int(len(X) * hot_data_start):
int(len(X) * (hot_data_start + hot_data_ratio))],
y[int(len(X) * hot_data_start):
int(len(X) * (hot_data_start + hot_data_ratio))],
"C1")
axes[0].plot(X[int(len(X) * (hot_data_start + hot_data_ratio)):],
y[int(len(X) * (hot_data_start + hot_data_ratio)):],
"C0")
accum_freq_pivot_1 = (1 - hot_query_ratio) * \
hot_data_start / (1 - hot_data_ratio)
accum_freq_pivot_2 = accum_freq_pivot_1 + hot_query_ratio
accum_freq_1 = np.linspace(0, accum_freq_pivot_1,
int(len(X) * hot_data_start))
accum_freq_2 = np.linspace(accum_freq_pivot_1, accum_freq_pivot_2,
int(len(X) * (hot_data_start + hot_data_ratio)) - int(len(X) * hot_data_start))
accum_freq_3 = np.linspace(accum_freq_pivot_2, 1,
len(X) - int(len(X) * (hot_data_start + hot_data_ratio)))
axes[1].plot(X[:int(len(X) * hot_data_start)],
accum_freq_1, "C0")
axes[1].plot(X[int(len(X) * hot_data_start):
int(len(X) * (hot_data_start + hot_data_ratio))],
accum_freq_2, "C1")
axes[1].plot(X[int(len(X) * (hot_data_start + hot_data_ratio)):],
accum_freq_3, "C0")
axes[1].set_xlabel("搜索键")
axes[1].set_ylabel("“拉伸”后的累积分布函数")
fig.tight_layout()
fig.savefig("stretching.pdf")
plt.close(fig)
def stretching_result():
original_lat = np.asarray((270.84, 260.47, 288.27))
stretching_lat = np.asarray((236.70, 222.96, 156.98))
original = 1000 / original_lat
stretching = 1000 / stretching_lat
ind = np.arange(len(original)) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots(figsize=(4, 3))
ax.bar(ind - width/2, original, width, label='原始方法')
ax.bar(ind + width/2, stretching, width, label='数据拉伸')
ax.set_ylabel('学习索引结构吞吐量')
ax.set_xticks(ind)
ax.set_xticklabels(('Skewed 1', 'Skewed 2', 'Skewed 3'))
ax.legend()
fig.tight_layout()
fig.savefig("stretching-result.pdf")
plt.close(fig)
def analyzer():
fig, ax = plt.subplots(figsize=(4, 3))
X = np.load("../data/osm-sizes-X-small.npy")
y = np.load("../data/osm-sizes-y-small.npy").astype(np.float64)
y /= np.max(y)
K = 50
sample_X = []
sample_y = []
max_X = np.max(X)
min_X =
|
np.min(X)
|
numpy.min
|
import ephem
import scipy as sp
import numpy as np
import pandas as pd
import pickle
from scipy import signal,stats
from datetime import datetime
from datetime import timezone
from maria import tools
#import tools
from importlib import resources
import weathergen
print('local objects')
# an array object describes the array of detectors. all of the arguments are dimensionful arrays of the same length
default_atmosphere_config = {'n_layers' : 16, # how many layers to simulate, based on the integrated atmospheric model
'min_depth' : 50, # the height of the first layer
'max_depth' : 5000, #
'rel_atm_rms' : 1e-1,
'turbulence_model' : 'scale_invariant',
'outer_scale' : 500}
default_site_config = {'site' : 'ACT',
'time' : datetime.now(timezone.utc).timestamp(),
'weather_gen_method' : 'random',
'region' : 'atacama' }
default_array_config = {'shape' : 'hex',
'n' : 600, # maximum number of detectors
'fov' : 10,
'nom_bands' : 1.5e11,
'white_noise' : 0} # maximum span of array
default_beams_config = {'optical_type' : 'diff_lim',
'beam_model' : 'top_hat',
'primary_size' : 5.5,
'min_beam_res' : .5 }
default_pointing_config = {'scan_type' : 'CES',
'duration' : 600,'samp_freq' : 20,
'center_azim' : 0, 'center_elev' : 90,
'az_throw' : 0, 'az_speed' : 1.5,
'el_throw' : 0, 'el_speed' : 1.5}
def validate_config(args,needed_args,name=''):
for arg in needed_args:
assert arg in args, f'Error: {name} config missing argument: {arg}'
class atmosphere():
def __init__(self, config=None):
if config==None:
print('No atm config specified, using default layers.')
self.config = default_atmosphere_config.copy()
else:
self.config = config.copy()
if self.config==None:
print('No site config specified, using the ACT site.')
self.config = default_site_config
use_auto_depths = np.all(np.isin(['min_depth','max_depth','n_layers'],list(self.config)))
use_manual_depths = np.all(np.isin(['depths'],list(self.config)))
if use_manual_depths:
if isinstance(self.depths, np.ndarray):
self.config['min_depth'] = self.depths.min()
self.config['max_depth'] = self.depths.max()
self.config['n_layers'] = len(self.depths)
else:
raise Exception('\'depths\' parameter must be a numpy array.')
if not (use_auto_depths or use_manual_depths):
for arg in ['min_depth','max_depth','n_layers']:
self.config[arg] = default_atmosphere_config[arg]
use_auto_depths = True
if use_auto_depths:
#self.depths = np.linspace(self.config['min_depth'], self.config['max_depth'], self.config['n_layers'])
self.depths = np.geomspace(self.config['min_depth'], self.config['max_depth'], self.config['n_layers'])
self.thicks = np.gradient(self.depths)
#raise Exception('Could not build atmospheric layers. Please specify the \'min_depth\', \'max_depth\', and \'n_layers\' parameters, or else enter an array of heights.')
necessary_args = ['turbulence_model','outer_scale','rel_atm_rms']
for arg in necessary_args:
if not arg in list(self.config):
self.config[arg] = default_atmosphere_config[arg]
if self.config['turbulence_model'] == 'scale_invariant':
self.matern = lambda r,r0,nu : 2**(1-nu)/sp.special.gamma(nu)*sp.special.kv(nu,r/r0+1e-10)*(r/r0+1e-10)**nu
with resources.path('maria','am_dict.npy') as handle:
self.spectra_dict = np.load(handle,allow_pickle=True)[()]
class site():
def __init__(self, config=None):
if config==None:
print('No site specified, using the ACT site.')
self.config = default_site_config.copy()
else:
self.config = config.copy()
if 'site' in list(self.config):
with resources.path("maria", "site_info.csv") as f:
self.site_df = pd.read_csv(f, index_col=0)
site_list = '\n\nsite' + 5*' ' + 'region' + 7*' ' + 'weather' + 3*' ' + 'longitude' + 3*' ' + 'latitude' + 2*' ' + 'height'
site_list += '\n' + (len(site_list)-2)*'#'
for sitename in list(self.site_df.index):
name,sup,loc,lon,lat,hgt = [self.site_df.loc[sitename,key] for key in ['longname','supported','region','longitude','latitude','altitude']]
lon_name = f'{np.round(np.abs(lon),3):>8}°' + ['W','E'][int(lon>0)]
lat_name = f'{np.round(np.abs(lat),3):>8}°' + ['S','N'][int(lat>0)]
site_list += f'\n{sitename:<8} {loc:<12} {sup:<8} {lon_name} {lat_name} {hgt:>6.0f}m'
if not self.config['site'] in self.site_df.index:
raise Exception('\'' + self.config['site'] + '\' is not a supported site! Supported sites are:' + site_list)
site_info = self.site_df.loc[self.config['site']]
region = site_info['region']
latitude = site_info['latitude']
longitude = site_info['longitude']
altitude = site_info['altitude']
else:
parameters = ['time','location','latitude','longitude','altitude']
if not np.all(np.isin(parameters),list(self.config)):
par_error = 'Please supply '
for par in parameters:
par_error += f'\'{par}\''
raise Exception(par_error)
else:
region = self.config['region']
latitude = self.config['latitude']
longitude = self.config['longitude']
altitude = self.config['altitude']
self.observer = ephem.Observer()
self.observer.lat, self.observer.lon, self.observer.elevation = str(latitude), str(longitude), altitude
self.region = region
self.timestamp = self.config['time']
self.observer.date = datetime.fromtimestamp(self.timestamp)
if 'weather_gen_method' in list(self.config):
self.weather = weathergen.generate(region=self.region,
time=self.timestamp,
method=self.config['weather_gen_method'])
print(self.weather['water_density'][0])
if 'pwv' in list(self.config):
self.weather['pwv'] = self.config['pwv']
class array():
def __init__(self, config=None):
if config==None:
print('No array specified, using the ACT array.')
self.config = default_array_config.copy()
else:
self.config = config.copy()
if 'shape' in list(self.config):
validate_config(list(self.config),['shape','fov','n'],name='')
self.config['offset_x'], self.config['offset_y'] = tools.make_array(self.config['shape'],
self.config['fov'],
self.config['n'])
self.z = np.radians(self.config['offset_x']) + 1j*np.radians(self.config['offset_y']); self.z -= self.z.mean()
self.x = np.real(self.z)
self.y = np.imag(self.z)
self.n = len(self.z)
# if no band is specified, then use the default setting
if not 'nom_bands' in list(self.config):
self.config['nom_bands'] = default_array_config['nom_bands']
if not 'bandwidths' in list(self.config):
self.config['bandwidths'] = 1e-1 * self.config['nom_bands']
#if type(self.config['bands']) in [float,int]:
#self.bands = self.config['bands'] * np.ones((self.n))
#self.band_errs = self.config['band_errs'] * np.ones((self.n))
self.nom_bands = self.config['nom_bands'] * np.ones((self.n))
self.bandwidths = self.config['bandwidths'] * np.ones((self.n))
self.nom_band_list, ui = np.unique(self.nom_bands,return_index=True)
self.bandwidth_list = self.bandwidths[ui[np.argsort(self.nom_band_list)]]
self.nom_band_list = np.sort(self.nom_band_list)
self.band_freq_list = np.c_[[mean + width*np.linspace(-.6,.6,121) for mean, width in zip(self.nom_band_list,
self.bandwidth_list)]]
#
flat_bp = lambda nu, band, width : (np.abs(nu-band) < .5*width).astype(float)
flat_bp = lambda nu, band, width : np.exp(np.log(.5)*(np.abs(nu-band)/(.5*width+1e-16))**8)
self.band_pass_list = np.c_[[flat_bp(freq,mean,width) for freq,mean,width in zip(self.band_freq_list,
self.nom_band_list,
self.bandwidth_list)]]
#self.band_field = np.sort(np.unique(np.r_[[mean + np.sqrt(2*np.log(2))*sigma*np.linspace(-1,1,9) for mean, sigma
# in zip(self.bands,self.band_errs)]]))
#self.n_band = len(self.ubands)
#self.n_band_field = len(self.band_field)
#ratio_f = 1.03
#n_bands = int(np.ceil(np.log(self.band_field.max()/self.band_field.min()) / np.log(ratio_f)))
#if n_bands < len(self.band_field):
# self.band_field = np.geomspace(self.band_field.min(),self.band_field.max(),n_bands)
#self.band_assoc = self.ubands[np.abs(np.subtract.outer(self.ubands,self.band_field)).argmin(axis=0)]
#gaussian_bp = lambda nu, band, band_sig : np.exp(-.5*((nu-band)/(band_sig+1e-16))**8)
#self.band_weights = flat_bp(self.band_field[None,:],self.nom_bands[:,None],.5*self.bandwidths[:,None])
#self.band_weights[self.band_weights < 1e-4] = 0
#self.unit_band_weights = self.band_weights.copy()
#self.unit_band_weights[self.unit_band_weights==0] = np.nan
#self.band_weights /= np.nansum(self.band_weights,axis=1)[:,None]
#self.ubands = np.unique(self.bands)
self.white_noise = self.config['white_noise'] * np.ones((self.n))
class pointing():
def __init__(self, config=None):
if config==None:
print('No pointing specified, defaulting to a 10-minute zenith stare at 20 Hz.')
self.config = default_pointing_config
else:
self.config = config.copy()
if 'scan_type' in list(self.config):
self.duration = self.config['duration']
self.dt = 1 / self.config['samp_freq']
self.time = np.arange(0, self.duration, self.dt)
self.nt = len(self.time)
self.f_ = np.fft.fftfreq(self.nt,self.dt)
self.center_azim, self.center_elev = np.radians(self.config['center_azim']), np.radians(self.config['center_elev'])
if self.config['scan_type']=='CES':
self.scan_freq = self.config['az_speed'] / (4*self.config['az_throw']+1e-16)
self.focal_azim = (self.center_azim + np.radians(self.config['az_throw'])*sp.signal.sawtooth(np.pi/2 + 2*np.pi*self.scan_freq*self.time,width=.5)) % (2*np.pi)
self.focal_elev = self.center_elev + np.zeros(self.nt)
if self.config['scan_type']=='lissajous_box':
focal_x = np.radians(self.config['x_throw']) * np.sin(2*np.pi*self.time/self.config['x_period'])
focal_y = np.radians(self.config['y_throw']) * np.sin(2*np.pi*self.time/self.config['y_period'])
self.focal_azim, self.focal_elev = tools.from_xy(focal_x,focal_y,self.center_azim,self.center_elev)
if self.config['scan_type']=='lissajous_daisy':
focal_r = np.radians(self.config['throw']) * np.sin(2*np.pi*self.time/self.config['r_period'])
focal_p = 2*np.pi*self.time/self.config['p_period']
focal_x, focal_y = focal_r * np.cos(focal_p), focal_r * np.sin(focal_p)
self.focal_azim, self.focal_elev = tools.from_xy(focal_x,focal_y,self.center_azim,self.center_elev)
else:
self.focal_azim = self.config['focal_azim']
self.focal_elev = self.config['focal_elev']
self.time = self.config['time']
self.duration = self.time.max() - self.time.min()
self.dt = np.gradient(self.time).mean()
self.nt = len(self.time)
self.f_ = np.fft.fftfreq(self.nt,self.dt)
class beams():
def __init__(self, config=None):
if config==None:
print('No beams specified, defaulting to ACT beams.')
self.config = default_beams_config
else:
self.config = config.copy()
for arg in list(default_beams_config):
if not arg in list(self.config):
self.config[arg] = default_beams_config[arg]
if self.config['optical_type'] == 'diff_lim':
self.aperture = self.config['primary_size']
self.min_beam_res = self.config['min_beam_res']
#self.n_bf = int(1.5*np.ceil(self.min_beam_res))
# we define the waist as the FWHM of the beam cross-section
#gauss_half_waist = lambda z, w_0, f : .5 * w_0 * np.sqrt(1 + np.square(2.998e8 * z) / np.square(f * np.pi * np.square(.5 * w_0)))
#sharp_half_waist = lambda z, w_0, f : .5 * np.maximum(w_0,1.27324 * 2.998e8 * z / (w_0 * f))
self.get_waist = lambda z, w_0, f : np.maximum(w_0,1.27324 * 2.998e8 * z / (w_0 * f))
if self.config['beam_model'] == 'top_hat':
self.get_window = lambda r, hwhm : np.exp(
|
np.log(.5)
|
numpy.log
|
# -*- encoding: utf-8 -*-
import numpy as np
import pytest
import os
import typhon
import scipy as sp
try:
from typhon.arts.workspace import Workspace, arts_agenda
from typhon.arts.workspace.variables import WorkspaceVariable
except:
skip_arts_tests = True
else:
skip_arts_tests = False
from typhon.arts.catalogues import Sparse
def agenda(ws):
ws.Print(ws.y, 0)
@pytest.mark.skipif(skip_arts_tests, reason='ARTS library not available')
class TestWorkspace:
def setup_method(self):
"""This ensures a new Workspace for every test."""
self.dir = os.path.dirname(os.path.realpath(__file__))
self.ws = Workspace()
self.setup_workspace()
def setup_workspace(self):
ws = self.ws
ws.atmosphere_dim = 1
ws.p_grid = np.linspace(1e5, 1e3, 21)
ws.Touch(ws.lat_grid)
ws.Touch(ws.lon_grid)
ws.f_grid = 183.0e9 * np.ones(1)
ws.stokes_dim = 1
ws.sensor_los = 180.0 * np.ones((1, 1))
ws.sensor_pos = 830e3 * np.ones((1, 1))
ws.sensorOff()
def test_index_transfer(self):
self.ws.IndexCreate("index_variable")
i = np.random.randint(0, 100)
self.ws.index_variable = i
assert self.ws.index_variable.value == i
def test_array_of_index_transfer(self):
self.ws.ArrayOfIndexCreate("array_of_index_variable")
i = [np.random.randint(0, 100) for j in range(10)]
self.ws.array_of_index_variable = i
assert self.ws.array_of_index_variable.value == i
self.ws.array_of_index_variable = []
assert self.ws.array_of_index_variable.value == []
def test_array_of_vector_transfer(self):
self.ws.ArrayOfVectorCreate("array_of_vector_variable")
aov = typhon.arts.xml.load(os.path.join(self.dir,
"xml/reference/arrayofvector.xml"))
self.ws.array_of_vector_variable = aov
assert self.ws.array_of_vector_variable.value == aov
def test_string_transfer(self):
self.ws.StringCreate("string_variable")
s = "some random string."
self.ws.string_variable = s
assert self.ws.string_variable.value == s
def test_vector_transfer(self):
self.ws.VectorCreate("vector_variable")
v = np.random.rand(10)
self.ws.vector_variable = v
assert all(self.ws.vector_variable.value == v)
def test_matrix_transfer(self):
self.ws.MatrixCreate("matrix_variable")
m = np.random.rand(10, 10)
self.ws.matrix_variable = m
assert all(self.ws.matrix_variable.value.ravel() == m.ravel())
def test_sparse_transfer(self):
sparse_formats = ["csc", "csr", "bsr", "lil", "dok", "coo", "dia"]
for f in sparse_formats:
n = 100
d2 = np.ones(n - 2)
d1 = np.ones(n - 1)
d = np.ones(n)
m = sp.sparse.diags(diagonals = [d2, d1, d, d1, d2],
offsets = [2, 1, 0, -1, -2])#
#format = "coo")
self.ws.sensor_response = m
print(self.ws.sensor_response.value)
assert np.all(m.todense() == self.ws.sensor_response.value.todense())
def test_supergeneric_overload_resolution(self):
self.ws.ArrayOfIndexCreate("array_of_index")
self.ws.ArrayOfArrayOfIndexCreate("array_of_array_of_index")
self.ws.array_of_index = [1, 2, 3]
self.ws.Append(self.ws.array_of_array_of_index, self.ws.array_of_index)
self.ws.Append(self.ws.array_of_array_of_index, self.ws.array_of_index)
def test_creation(self):
self.ws.ArrayOfIndexCreate("array_of_index")
self.ws.ArrayOfIndexCreate("array_of_index")
with pytest.raises(Exception):
self.ws.VectorCreate("array_of_index")
def test_wsm_error(self):
with pytest.raises(Exception):
self.ws.yCalc()
def test_doc(self):
repr(self.ws.yCalc)
def test_agenda(self):
self.ws.atmosphere_dim = 1
@arts_agenda
def add_1(ws):
ws.IndexAdd(ws.atmosphere_dim,
ws.atmosphere_dim,
1)
add_1.execute(self.ws)
assert self.ws.atmosphere_dim.value == 2
add_1.append(add_1)
add_1.execute(self.ws)
assert self.ws.atmosphere_dim.value == 4
args = [self.ws.atmosphere_dim, self.ws.atmosphere_dim, 1]
@arts_agenda
def add_2(ws):
ws.IndexAdd(*args)
add_2.execute(self.ws)
assert self.ws.atmosphere_dim.value == 5
def test_execute_controlfile(self):
dir = os.path.dirname(os.path.realpath(__file__))
test_dir = os.path.join(dir, "test_files")
self.ws.WriteXML("ascii", np.array([1.0]),
os.path.join(test_dir, "vector.xml"))
os.chdir(test_dir)
self.ws.execute_controlfile("controlfile.arts")
os.remove(os.path.join(test_dir, "vector.xml"))
def test_supergeneric_overload_failure(self):
with pytest.raises(Exception):
self.ws.NumericCreate("numeric_wsv")
self.ws.StringCreate("string_wsv")
self.ws.Copy(self.ws.string_wsv, self.ws.numeric_wsv)
def test_tensor_3(self):
t_0 = np.random.rand(*([3] * 3))
self.ws.Tensor3Create("tensor_3")
self.ws.tensor_3 = t_0
assert np.all(t_0 == self.ws.tensor_3.value)
def test_tensor_4(self):
t_0 = np.random.rand(*([3] * 4))
t_1 = self.ws.Tensor4Create("tensor_4")
self.ws.tensor_4 = t_0
assert np.all(t_0 == self.ws.tensor_4.value)
def test_tensor_5(self):
t_0 = np.random.rand(*([3] * 5))
t_1 = self.ws.Tensor5Create("tensor_5")
self.ws.tensor_5 = t_0
assert np.all(t_0 == self.ws.tensor_5.value)
def test_tensor_6(self):
t_0 = np.random.rand(*([3] * 6))
t_1 = self.ws.Tensor6Create("tensor_6")
self.ws.tensor_6 = t_0
assert np.all(t_0 == self.ws.tensor_6.value)
def test_tensor_7(self):
t_0 = np.random.rand(*([3] * 7))
self.ws.Tensor7Create("tensor_7")
self.ws.tensor_7 = t_0
assert np.all(t_0 == self.ws.tensor_7.value)
def test_execute_controlfile(self):
dir = os.path.dirname(os.path.realpath(__file__))
test_dir = os.path.join(dir, "test_files")
self.ws.WriteXML("ascii", np.array([1.0]),
os.path.join(test_dir, "vector.xml"))
os.chdir(test_dir)
agenda = self.ws.execute_controlfile("controlfile.arts")
self.ws.foo = "not bar"
@arts_agenda
def execute(ws):
ws.FlagOff(ws.jacobian_do)
ws.StringSet(ws.foo, "still not bar")
INCLUDE("controlfile.arts")
INCLUDE(agenda)
self.ws.execute_agenda(execute)
assert self.ws.foo.value == "bar"
os.remove(os.path.join(test_dir, "vector.xml"))
def test_covariance_matrix(self):
ws = self.ws
ws.jacobianInit()
ws.jacobianAddAbsSpecies(species = "O3",
g1 = ws.p_grid,
g2 = ws.lat_grid,
g3 = ws.lon_grid)
ws.jacobianAddAbsSpecies(species = "H2O",
g1 = ws.p_grid,
g2 = ws.lat_grid,
g3 = ws.lon_grid)
ws.jacobianClose()
ws.covmatDiagonal(out = ws.covmat_block,
out_inverse = ws.covmat_block,
vars = 10.0 * np.ones(ws.p_grid.value.size))
ws.covmat_sxAddBlock(block = ws.covmat_block)
ws.covmatDiagonal(out = ws.covmat_block,
out_inverse = ws.covmat_block,
vars = 20.0 * np.ones(ws.p_grid.value.size))
ws.covmat_sxAddBlock(block = ws.covmat_block)
def test_variable_set_empty(self):
self.ws.f_grid = np.array([94e9])
self.ws.f_grid = []
assert self.ws.f_grid.value.size == 0
def test_variable_creation(self):
# Unnamed variable
wsv = self.ws.create_variable("Matrix", None)
self.ws.__setattr__(wsv.name, np.eye(5))
assert np.all(np.isclose(np.eye(5),
self.ws.__getattr__(wsv.name).value))
# Named variable
wsv = self.ws.create_variable("Matrix", "matrix_wsv")
self.ws.matrix_wsv =
|
np.eye(5)
|
numpy.eye
|
# import numpy as np
# from numpy import cos, sin, exp, pi
# from collections import deque
# # SPFPM library for fixed point python numbers
# from FixedPoint import FXnum, FXfamily
# # Import for plotting
# from pylab import plot, show, grid, xlabel, ylabel
"""Create plots of signals generated by chirp() and sweep_poly()."""
import numpy as np
from numpy import cos, sin, pi
from scipy.signal.waveforms import chirp, sweep_poly
from scipy.signal import hilbert, decimate
from scipy.fft import fft, ifft
from numpy import poly1d
# TODO remove pylab; no longer recommended
from pylab import figure, plot, show, xlabel, ylabel, subplot, grid, title, \
yscale, savefig, clf
import matplotlib.pyplot as plt
import math
import random
FIG_SIZE = (7.5, 3.75)
# f0 = start freq
# f1 = end freq
# t1 = time of sweep
# t_f = fraction to generate
# fs = sample freq
# osf = oversampling factor
def make_linear(f0, f1, t1, t_f, fs, tstart=0.0, osf=1, returnWaveform=False,
makeComplex=False, negateFrequency=False, filename=None, fig_size=FIG_SIZE):
ttot = t1 * t_f # total time
n_samps = int(ttot * fs * osf)
t_samps = np.linspace(tstart, ttot + tstart, n_samps)
f_t = np.linspace(0, ttot, n_samps) # overriden later - FIX
if not makeComplex:
w = np.linspace(0, ttot, n_samps) # overriden later - FIX
else:
w = [0+0j] * n_samps
c = (f1 - f0) / t1 # 'chirpyness' (aka alpha)
#45-5 / t1 = 40 Hz/s
#-45 - (-5) / t1
#-5 - (-45) / t1 = 40 / Hz/z <- when inverting the frequency
# f(t) = ct + fo
# ph = ph0 + 2pi (c/2. t^2 + fo.t)
# Sample period
ts = 1 / fs
ph0 = 0 # start phase of the chirp
pi2 = 2*np.pi
for i in range(len(t_samps)):
# Not used
#r_t(i) = d0 + v0*t(i)
#td(i) = 2*r_t(i)/c
#Tx(i) = cos(2*pi*(fc*t(i) + slope*t(i)^2/2))
#w[i] = cos(2*pi*(fc*it(i) - td(i)) + i*\(t(i) - td(i))^2/2))
# ----------
t = i * ts / osf # time corresponding to index
# this should be in t_samps.. FIX
f_t[i] = c*t + f0 # frequency
if negateFrequency:
ph = ph0 + pi2 * (c/2 * t*t - f0 * t)
else:
ph = ph0 + pi2 * (c/2 * t*t + f0 * t)
ph_mod = math.fmod(ph, pi2) # phase modulo 2pi
if not makeComplex:
w[i] =
|
cos(ph_mod)
|
numpy.cos
|
import os
import glob
import torch
import random
import numpy as np
from tqdm import tqdm
import torch.nn as nn
from PIL import Image
from SODData import SODData
import torch.optim as optim
from torchvision import models
import torch.nn.functional as F
import pydensecrf.densecrf as dcrf
from torchvision import transforms
from alisuretool.Tools import Tools
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, Dataset
from torchvision.models.resnet import BasicBlock as ResBlock
from pydensecrf.utils import unary_from_softmax, unary_from_labels
#######################################################################################################################
# 0 CRF
class CRFTool(object):
@staticmethod
def crf(img, annotation, t=5): # [3, w, h], [1, w, h]
img = np.ascontiguousarray(img)
annotation = np.concatenate([annotation, 1 - annotation], axis=0)
h, w = img.shape[:2]
d = dcrf.DenseCRF2D(w, h, 2)
unary = unary_from_softmax(annotation)
unary = np.ascontiguousarray(unary)
d.setUnaryEnergy(unary)
# DIAG_KERNEL CONST_KERNEL FULL_KERNEL
# NORMALIZE_BEFORE NORMALIZE_SYMMETRIC NO_NORMALIZATION NORMALIZE_AFTER
d.addPairwiseGaussian(sxy=3, compat=3, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC)
d.addPairwiseBilateral(sxy=80, srgb=13, rgbim=np.copy(img), compat=10,
kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC)
q = d.inference(t)
result = np.array(q).reshape((2, h, w))
return result[0]
@staticmethod
def crf_label(image, annotation, t=5, n_label=2):
image = np.ascontiguousarray(image)
h, w = image.shape[:2]
annotation = np.squeeze(np.array(annotation))
a, b = (0.8, 0.1)
if np.max(annotation) > 1:
a, b = a * 255, b * 255
pass
label_extend = np.zeros_like(annotation)
label_extend[annotation > a] = 2
label_extend[annotation < b] = 1
_, label = np.unique(label_extend, return_inverse=True)
d = dcrf.DenseCRF2D(w, h, n_label)
u = unary_from_labels(label, n_label, gt_prob=0.7, zero_unsure=True)
u = np.ascontiguousarray(u)
d.setUnaryEnergy(u)
d.addPairwiseGaussian(sxy=(3, 3), compat=3)
d.addPairwiseBilateral(sxy=(80, 80), srgb=(13, 13, 13), rgbim=image, compat=10)
q = d.inference(t)
map_result = np.argmax(q, axis=0)
result = map_result.reshape((h, w))
return result
@classmethod
def crf_torch(cls, img, annotation, t=5):
img_data = np.asarray(img, dtype=np.uint8)
annotation_data = np.asarray(annotation)
result = []
for img_data_one, annotation_data_one in zip(img_data, annotation_data):
img_data_one = np.transpose(img_data_one, axes=(1, 2, 0))
result_one = cls.crf(img_data_one, annotation_data_one, t=t)
# result_one = cls.crf_label(img_data_one, annotation_data_one, t=t)
result.append(
|
np.expand_dims(result_one, axis=0)
|
numpy.expand_dims
|
import logging
import time
import warnings
from itertools import product, repeat
from multiprocessing import Pool, cpu_count
from pathlib import Path
from typing import List, Optional, Tuple
import importlib_resources
import numpy as np
import spiceypy as spice
from shapely.geometry import Point, Polygon, box
from tqdm import tqdm
from uvisaurorae.data_retrieval import make_metakernel
logger = logging.getLogger(__name__)
class UVISAuroralProjector(object):
"""
Class for managing auroral projections.
"""
def __init__(
self,
nbins_lon: int,
nbins_lat: int,
spice_dir: Path,
raise_spice_insufficient: bool = True,
):
"""
Constructor method.
:param nbins_lon: Number of projection bins in longitude (0...360 deg).
:param nbins_lat: Number of projection bins in latitude (-90...90 deg).
:param spice_dir: Root directory of SPICE kernels.
:param raise_spice_insufficient: Whether the projector should raise an exception if a record is not covered by
SPICE kernels. If `false`, will silently skip records which cannot be projected.
"""
# Set up binning
self.lon_bins = np.linspace(0, 360, num=nbins_lon + 1)
self.lat_bins = np.linspace(-90, 90, num=nbins_lat + 1)
self.lon_centers = self.lon_bins[:-1] + np.diff(self.lon_bins) / 2
self.lat_centers = self.lat_bins[:-1] + np.diff(self.lat_bins) / 2
# Determine bin-binning for polar pixels (combine bins near the pole for projection speed)
self.bin_map = np.full((len(self.lon_centers), len(self.lat_centers)), np.nan)
self.is_master_bin = np.zeros(self.bin_map.shape, dtype=bool)
bin_colat_start = 85
for lat_idx in np.where(np.abs(self.lat_centers) > bin_colat_start)[0]:
n_bins = (
(90 - np.abs(self.lat_centers[lat_idx])) / (90 - bin_colat_start)
) ** 2 * len(self.lon_centers)
self.bin_map[:, lat_idx] = (
np.digitize(self.lon_centers, np.linspace(0, 360, int(n_bins) + 1)) - 1
)
for i in np.unique(self.bin_map[:, lat_idx]):
tmp = np.where(self.bin_map[:, lat_idx] == i)[0]
self.is_master_bin[int(np.mean(tmp)), lat_idx] = True
self.spice_dir = spice_dir
self.metakernels: List[Path] = []
self.raise_spice_insufficient = raise_spice_insufficient
self.reset_spice()
def reset_spice(self) -> None:
"""
Clear SPICE kernel cache and reload all necessary kernels, needed for loading updated metakernels. Will create
a new unique metakernel and add its path to ``self.metakernels`` to make sure it can be deleted later.
:return: None
"""
# Clear SPICE kernels
spice.kclear()
# Load SPICE kernels
metakernel = make_metakernel(self.spice_dir)
time.sleep(0.5) # Need to wait for a bit here to avoid file reading errors
spice.furnsh(str(metakernel))
self.metakernels.append(metakernel)
# Load some additional SPICE kernels if not loaded already
# Note that "saturn1100.tpc" redefines the size of Saturn (may be needed to refresh the kernel pool before other
# calculations using this parameter are performed)
for kernel in ["naif0012.tls", "saturn1100.tpc", "frame_ksmag.tf"]:
k = importlib_resources.files("uvisaurorae.resources").joinpath(kernel) # type: ignore
try:
spice.kinfo(str(k))
except spice.stypes.SpiceyError:
spice.furnsh(str(k))
def remove_metakernels(self) -> None:
"""
Delete all metakernels which are listed in ``self.metakernels``.
:return: None
"""
for mk in self.metakernels:
if mk.exists():
mk.unlink()
self.metakernels = []
@staticmethod
def get_fov_vectors(
line_bin: int, ul_corner_line: int, lr_corner_line: int
) -> Tuple[np.ndarray, np.ndarray]:
"""
Return field of view vectors of a pixel binning in the SPICE UVIS frame.
:param line_bin: Number of neighboring pixels binned together.
:param ul_corner_line: First used pixel.
:param lr_corner_line: Last used pixel.
:return: Array with pixel center vectors of shape (# pixels, 3), array with pixel edge/corner vectors of shape
(# pixels, # edge/corner vectors / hardcoded 12, 3).
"""
# Calculate number of pixels along the sensor
npx = int(np.ceil((lr_corner_line + 1 - ul_corner_line) / line_bin))
# Get UVIS_FUV FOV (NAIF ID of UVIS_FUV -82840, max number of vectors returned)
_, _, boresight, _, boundvec = spice.getfov(-82840, 100)
# Get all angles between corner points of the field of view
all_angles = []
for iii in range(len(boundvec)):
for jjj in range(iii + 1, len(boundvec)):
all_angles.append(np.arccos(
|
np.dot(boundvec[iii], boundvec[jjj])
|
numpy.dot
|
import pickle
import os
import sys
import numpy as np
import torch
import torch.utils.data as torch_data
from torch.utils.data import DataLoader
class ScannetDataset(torch_data.Dataset):
def __init__(self,
root= '/data/eva_share_users/zhaotianchen/scannet/raw/scannet_pickles',
npoints=10240,
split='train',
with_dropout=False,
with_norm=True,
with_rgb=True,
with_seg=False,
with_instance=False,
with_pred=False,
sample_rate=None):
super().__init__()
print(' ---- load data from', root)
self.NUM_LABELS = 20
self.NUM_IN_CHANNEL = 3
self.NEED_PRED_POSTPROCESSING = False
self.npoints = npoints
self.with_dropout = with_dropout
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
# assert only 1 of the with_instance/pred/seg is True
assert sum([with_instance, with_seg, with_pred is not None]) <= 1
self.with_aux = with_instance or with_seg or with_pred
print('load scannet dataset <{}> with npoint {}, indices: {}.'.format(split, npoints, self.indices))
# deprecated version of pickle load
# data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
# with open(data_filename, 'rb') as fp:
# self.scene_points_list = pickle.load(fp)
# self.semantic_labels_list = pickle.load(fp)
# # scene_points_id = pickle.load(fp)
# num_point_all = pickle.load(fp)
# TEST: newer loading of the pth file
data_filename = os.path.join(root, 'new_{}.pth'.format(split))
data_dict = torch.load(data_filename)
self.scene_points_list = data_dict['data']
self.semantic_labels_list = data_dict['label']
if self.with_aux:
if with_instance:
self.instance_label_list = data_dict['instance']
elif with_seg:
self.instance_label_list = data_dict['label']
elif with_pred:
self.instance_label_list = torch.load(os.path.join(with_pred, "{}_pred.pth".format(split)))['pred']
else:
pass
#scene_points_id = pickle.load(fp)
num_point_all = data_dict['npoints']
if split == 'train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
# self.labelweights = 1/np.log(1.2+labelweights)
self.labelweights = np.power(np.amax(labelweights[1:]) / labelweights, 1 / 3.0)
elif split == 'eval' or split == 'test' or split == 'debug':
self.labelweights = np.ones(21)
else:
raise ValueError('split must be train or eval.')
# sample & repeat scenes, older version deprecated
if sample_rate is not None:
num_point = npoints
sample_prob = num_point_all / np.sum(num_point_all)
num_iter = int(np.sum(num_point_all) * sample_rate / num_point)
room_idxs = []
for index in range(len(self.scene_points_list)):
repeat_times = round(sample_prob[index] * num_iter)
repeat_times = int(max(repeat_times, 1))
room_idxs.extend([index] * repeat_times)
self.room_idxs = np.array(room_idxs)
np.random.seed(123)
np.random.shuffle(self.room_idxs)
else:
self.room_idxs = np.arange(len(self.scene_points_list))
print("Totally {} samples in {} set.".format(len(self.room_idxs), split))
def __getitem__(self, index):
index = self.room_idxs[index]
data_set = self.scene_points_list[index]
point_set = data_set[:, :3]
if self.with_aux:
instance_set = self.instance_label_list[index]
semantic_seg = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set, axis=0)
coordmin = np.min(point_set, axis=0)
smpmin = np.maximum(coordmax-[2, 2, 3.0], coordmin)
smpmin[2] = coordmin[2]
smpsz = np.minimum(coordmax-smpmin,[2,2,3.0])
smpsz[2] = coordmax[2]-coordmin[2]
isvalid = False
# randomly choose a point as center point and sample <n_points> points in the box area of center-point
for i in range(10):
curcenter = point_set[np.random.choice(len(semantic_seg),1)[0],:]
curmin = curcenter - [1, 1, 1.5]
curmax = curcenter + [1, 1, 1.5]
curmin[2] = coordmin[2]
curmax[2] = coordmax[2]
curchoice = np.sum((point_set >= (curmin - 0.2)) * (point_set <= (curmax + 0.2)), axis=1) == 3
cur_point_set = point_set[curchoice, :]
cur_data_set = data_set[curchoice, :]
if self.with_aux:
try:
cur_instance_set = instance_set[curchoice]
except IndexError:
import ipdb; ipdb.set_trace()
cur_semantic_seg = semantic_seg[curchoice]
if len(cur_semantic_seg) == 0:
continue
mask = np.sum((cur_point_set >= (curmin - 0.01)) * (cur_point_set <= (curmax + 0.01)), axis=1) == 3
vidx = np.ceil((cur_point_set[mask, :] - curmin) / (curmax - curmin) * [31.0, 31.0, 62.0])
vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 + vidx[:, 2])
isvalid = np.sum(cur_semantic_seg > 0) / len(cur_semantic_seg) >= 0.7 and len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02
if isvalid:
break
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
semantic_seg = cur_semantic_seg[choice]
if self.with_aux:
instance_seg = cur_instance_set[choice]
mask = mask[choice]
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask
selected_points = cur_data_set[choice, :] # np * 6, xyz + rgb
point_set = np.zeros((self.npoints, 9)) # xyz, norm_xyz, rgb
point_set[:, :3] = selected_points[:, :3] # xyz
for i in range(3): # normalized_xyz
point_set[:, 3 + i] = (selected_points[:, i] - coordmin[i]) / (coordmax[i] - coordmin[i])
point_set[:, 6:] = selected_points[:, 3:] / 255.0 # rgb
if self.with_dropout:
dropout_ratio = np.random.random() * 0.875 # 0 ~ 0.875
drop_idx = np.where(np.random.random((self.npoints)) <= dropout_ratio)[0]
point_set[drop_idx, :] = point_set[0, :]
semantic_seg[drop_idx] = semantic_seg[0]
sample_weight[drop_idx] *= 0
point_set = point_set[:, self.indices]
# WARNING: the deprecated(failed attempt) of the instance_relatiion dict
# if self.with_instance:
# k = self.k
# idxes = [np.where(instance_seg == x)[0] for x in np.unique(instance_seg)]
# instance_relations = np.full([instance_seg.size,k], -1)
# for i, idx in enumerate(idxes):
# choices = np.random.choice(idxes[i], (idxes[i].size,k))
# instance_relations[idxes[i]] = choices
# instance_relations[:,0] = np.arange(instance_relations.shape[0])
# instance_relations = instance_relations.astype(int)
if self.with_aux:
return point_set, semantic_seg, sample_weight, instance_seg
else:
return point_set, semantic_seg, sample_weight
def __len__(self):
return len(self.room_idxs)
# return len(self.scene_points_list)
class ScannetDatasetWholeScene(torch_data.IterableDataset):
def __init__(self, root=None, npoints=10240, split='train', with_norm=True, with_rgb=True):
super().__init__()
print(' ---- load data from', root)
self.npoints = npoints
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
print('load scannet <whole scene> dataset <{}> with npoint {}, indices: {}.'.format(split, npoints, self.indices))
self.temp_data = []
self.temp_index = 0
self.now_index = 0
data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
with open(data_filename, 'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split == 'train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
# self.labelweights = 1 / np.log(1.2 + labelweights)
self.labelweights = np.power(np.amax(labelweights[1:]) / labelweights, 1 / 3.0)
elif split == 'eval' or split == 'test':
self.labelweights = np.ones(21)
def get_data(self):
idx = self.temp_index
self.temp_index += 1
return self.temp_data[idx]
def reset(self):
self.temp_data = []
self.temp_index = 0
self.now_index = 0
def __iter__(self):
self.reset()
return self
def __next__(self):
if self.now_index >= len(self.scene_points_list) and self.temp_index >= len(self.temp_data):
raise StopIteration()
if self.temp_index < len(self.temp_data):
return self.get_data()
index = self.now_index
self.now_index += 1
self.temp_data = []
self.temp_index = 0
# print(self.temp_index, self.now_index, len(self.scene_points_list))
data_set_ini = self.scene_points_list[index]
point_set_ini = data_set_ini[:,:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini,axis=0)
coordmin = np.min(point_set_ini,axis=0)
grid_size=2
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/grid_size).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/grid_size).astype(np.int32)
point_sets = list()
semantic_segs = list()
sample_weights = list()
isvalid = False
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*grid_size,j*grid_size,0]
curmax = coordmin+[(i+1)*grid_size,(j+1)*grid_size,coordmax[2]-coordmin[2]]
curchoice = np.sum((point_set_ini>=(curmin-0.2))*(point_set_ini<=(curmax+0.2)),axis=1)==3
cur_point_set = point_set_ini[curchoice,:]
cur_data_set = data_set_ini[curchoice,:]
cur_semantic_seg = semantic_seg_ini[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set >= (curmin - 0.001)) * (cur_point_set <= (curmax + 0.001)), axis=1) == 3
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=len(cur_semantic_seg) < self.npoints)
semantic_seg = cur_semantic_seg[choice] # N
mask = mask[choice]
if sum(mask) / float(len(mask)) < 0.01:
continue
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask # N
selected_points = cur_data_set[choice, :] # Nx6
point_set = np.zeros([self.npoints, 9])
point_set[:, :3] = selected_points[:, :3] # xyz
for k in range(3): # normalized_xyz
point_set[:, 3 + k] = (selected_points[:, k] - coordmin[k]) / (coordmax[k] - coordmin[k])
point_set[:, 6:] = selected_points[:, 3:] / 255.0 # rgb
point_set = point_set[:, self.indices]
self.temp_data.append((point_set, semantic_seg, sample_weight))
return self.get_data()
class ScannetDatasetWholeScene_evaluation(torch_data.IterableDataset):
#prepare to give prediction on each points
def __init__(self, root=None, scene_list_dir=None, split='test', num_class=21, block_points=81932, with_norm=True, with_rgb=True, with_seg=False,with_instance=False, \
with_pred=None, delta=1.0):
super().__init__()
print(' ---- load data from', root)
self.NUM_LABELS = 20
self.NUM_IN_CHANNEL = 3
self.NEED_PRED_POSTPROCESSING = False
self.block_points = block_points
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
assert sum([with_instance, with_seg, with_pred is not None]) <= 1
self.with_aux = with_instance or with_seg or with_pred
print('load scannet <TEST> dataset <{}> with npoint {}, indices: {}.'.format(split, block_points, self.indices))
self.delta = delta
self.point_num = []
self.temp_data = []
self.temp_index = 0
self.now_index = 0
'''
the deprecated version of the pickle loading
data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
with open(data_filename, 'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
# self.scene_points_id = pickle.load(fp)
self.scene_points_num = pickle.load(fp)
file_path = os.path.join(scene_list_dir, 'scannetv2_{}.txt'.format(split))
'''
data_filename = os.path.join(root, 'new_{}.pth'.format(split))
data_dict = torch.load(data_filename)
self.scene_points_list = data_dict['data']
self.semantic_labels_list = data_dict['label']
# give the aux supervision, packed in self.instance_label_list
if self.with_aux:
if with_instance:
self.instance_label_list = data_dict['instance']
elif with_seg:
self.instance_label_list = data_dict['label']
elif with_pred:
self.instance_label_list = torch.load(os.path.join(with_pred, "{}_pred.pth".format(split)))['pred']
else:
pass
self.scene_points_num = data_dict['npoints']
file_path = os.path.join(scene_list_dir, 'scannetv2_{}.txt'.format(split))
num_class = 21
if split == 'test' or split == 'eval' or split == 'train' or split == 'debug':
self.labelweights = np.ones(num_class)
for seg in self.semantic_labels_list:
self.point_num.append(seg.shape[0])
with open(file_path) as fl:
self.scene_list = fl.read().splitlines()
else:
raise ValueError('split must be test or eval, {}'.format(split))
def reset(self):
self.temp_data = []
self.temp_index = 0
self.now_index = 0
def __iter__(self):
if self.now_index >= len(self.scene_points_list):
print(' ==== reset dataset index ==== ')
self.reset()
self.gen_batch_data()
return self
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_data(self, data, idx):
new_data = []
for i in range(len(idx)):
new_data += [data[idx[i]]]
return new_data
def nearest_dist(self, block_center, block_center_list):
num_blocks = len(block_center_list)
dist = np.zeros(num_blocks)
for i in range(num_blocks):
dist[i] = np.linalg.norm(block_center_list[i] - block_center, ord = 2) #i->j
return np.argsort(dist)[0]
def gen_batch_data(self):
index = self.now_index
self.now_index += 1
self.temp_data = []
self.temp_index = 0
print(' ==== generate batch data of {} ==== '.format(self.scene_list[index]))
delta = self.delta
# delta = 1.0
# delta = 4.0
# if self.with_rgb:
point_set_ini = self.scene_points_list[index]
# else:
# point_set_ini = self.scene_points_list[index][:, 0:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
if self.with_aux:
instance_seg_ini = self.instance_label_list[index].astype(np.int32)
coordmax = np.max(point_set_ini[:, 0:3],axis=0)
coordmin =
|
np.min(point_set_ini[:, 0:3],axis=0)
|
numpy.min
|
from __future__ import division, print_function
import importlib
import numpy as np
EPSILON = 0.01
def constrain(x, a, b):
return np.minimum(np.maximum(x, a), b)
def not_zero(x):
if abs(x) > EPSILON:
return x
elif x > 0:
return EPSILON
else:
return -EPSILON
def wrap_to_pi(x):
return ((x+np.pi) % (2*np.pi)) - np.pi
def point_in_rectangle(point, rect_min, rect_max):
"""
Check if a point is inside a rectangle
:param point: a point (x, y)
:param rect_min: x_min, y_min
:param rect_max: x_max, y_max
"""
return rect_min[0] <= point[0] <= rect_max[0] and rect_min[1] <= point[1] <= rect_max[1]
def point_in_rotated_rectangle(point, center, length, width, angle):
"""
Check if a point is inside a rotated rectangle
:param point: a point
:param center: rectangle center
:param length: rectangle length
:param width: rectangle width
:param angle: rectangle angle [rad]
"""
c, s = np.cos(angle), np.sin(angle)
r = np.array([[c, -s], [s, c]])
ru = r.dot(point - center)
return point_in_rectangle(ru, [-length/2, -width/2], [length/2, width/2])
def point_in_ellipse(point, center, angle, length, width):
"""
Check if a point is inside an ellipse
:param point: a point
:param center: ellipse center
:param angle: ellipse main axis angle
:param length: ellipse big axis
:param width: ellipse small axis
"""
c, s = np.cos(angle), np.sin(angle)
r = np.matrix([[c, -s], [s, c]])
ru = r.dot(point - center)
return np.sum(np.square(ru / np.array([length, width]))) < 1
def rotated_rectangles_intersect(rect1, rect2):
"""
Do two rotated rectangles intersect?
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
"""
return has_corner_inside(rect1, rect2) or has_corner_inside(rect2, rect1)
def has_corner_inside(rect1, rect2):
"""
Check if rect1 has a corner inside rect2
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
"""
(c1, l1, w1, a1) = rect1
(c2, l2, w2, a2) = rect2
c1 = np.array(c1)
l1v = np.array([l1/2, 0])
w1v = np.array([0, w1/2])
r1_points = np.array([[0, 0],
- l1v, l1v, w1v, w1v,
- l1v - w1v, - l1v + w1v, + l1v - w1v, + l1v + w1v])
c, s = np.cos(a1), np.sin(a1)
r = np.array([[c, -s], [s, c]])
rotated_r1_points = r.dot(r1_points.transpose()).transpose()
return any([point_in_rotated_rectangle(c1+
|
np.squeeze(p)
|
numpy.squeeze
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""nn test cases"""
import paddle.fluid as fluid
import paddle
import numpy as np
import math
import tools
import platform
from paddle.fluid.dygraph.base import to_variable
def test_L1Loss():
"""
test L1 loss reduction=none
Returns:
None
"""
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
l1_loss = paddle.nn.loss.L1Loss("none")
input = fluid.data(name="input", shape=[3, 3])
label = fluid.data(name="label", shape=[3, 3])
output = l1_loss(input, label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]).astype("float32")
label_data = np.array([[2, 3, 2], [3, 1, 2], [1, 1, 1]]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input": input_data, "label": label_data},
fetch_list=[output],
return_numpy=True)
expect = np.array([[0, 1, 0], [1, 1, 0], [1, 1, 1]], dtype=np.float32)
tools.compare(output_data[0], expect)
def test_L1Loss1():
"""
test L1 loss reduction=sum
Returns:
None
"""
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
l1_loss = paddle.nn.loss.L1Loss("sum")
input = fluid.data(name="input", shape=[3, 3])
label = fluid.data(name="label", shape=[3, 3])
output = l1_loss(input, label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]).astype("float32")
label_data = np.array([[2, 3, 2], [3, 1, 2], [1, 1, 1]]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input": input_data, "label": label_data},
fetch_list=[output],
return_numpy=True)
expect = np.array([6], dtype=np.float32)
tools.compare(output_data[0], expect)
def test_L1Loss2():
"""
test L1 loss reduction=mean
Returns:
None
"""
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
l1_loss = paddle.nn.loss.L1Loss("mean")
input = fluid.data(name="input", shape=[3, 3])
label = fluid.data(name="label", shape=[3, 3])
output = l1_loss(input, label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]).astype("float32")
label_data = np.array([[2, 3, 2], [3, 1, 2], [1, 1, 1]]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input": input_data, "label": label_data},
fetch_list=[output],
return_numpy=True)
expect = np.array([0.66666666], dtype=np.float32)
tools.compare(output_data[0], expect)
def test_L1Loss3():
"""
test L1 loss type = int32
Returns:
None
"""
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
l1_loss = paddle.nn.loss.L1Loss("sum")
input = fluid.data(name="input", shape=[3, 3], dtype="int32")
label = fluid.data(name="label", shape=[3, 3], dtype="int32")
output = l1_loss(input, label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]).astype("int32")
label_data = np.array([[2, 3, 2], [3, 1, 2], [1, 1, 1]]).astype("int32")
output_data = exe.run(fluid.default_main_program(),
feed={"input": input_data, "label": label_data},
fetch_list=[output],
return_numpy=True)
expect = np.array([6], dtype=np.int32)
tools.compare(output_data[0], expect)
def test_L1Loss4():
"""
test L1 loss type = int64
Returns:
None
"""
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
l1_loss = paddle.nn.loss.L1Loss("mean")
input = fluid.data(name="input", shape=[3, 3], dtype="int64")
label = fluid.data(name="label", shape=[3, 3], dtype="int64")
output = l1_loss(input, label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]).astype("int64")
label_data = np.array([[2, 3, 2], [3, 1, 2], [1, 1, 1]]).astype("int64")
output_data = exe.run(fluid.default_main_program(),
feed={"input": input_data, "label": label_data},
fetch_list=[output],
return_numpy=True)
expect = np.array([0], dtype=np.int64)
tools.compare(output_data[0], expect)
def test_L1Loss5():
"""
test L1 loss type = float64
Returns:
None
"""
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
l1_loss = paddle.nn.loss.L1Loss("mean")
input = fluid.data(name="input", shape=[3, 3], dtype="float64")
label = fluid.data(name="label", shape=[3, 3], dtype="float64")
output = l1_loss(input, label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]).astype("float64")
label_data = np.array([[2, 3, 2], [3, 1, 2], [1, 1, 1]]).astype("float64")
output_data = exe.run(fluid.default_main_program(),
feed={"input": input_data, "label": label_data},
fetch_list=[output],
return_numpy=True)
expect = np.array([0.66666666], dtype=np.float64)
tools.compare(output_data[0], expect)
def __allclose__(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
compute allclose
Args:
input: input tensor
other: other tensor
rtol: relative tolerance
atol: absolute tolerance
equal_nan: if true ,two nans will be equal
name: name
Returns:
Boolean
"""
arr = abs(input - other) <= atol + rtol * abs(other)
if False in arr:
return [False]
else:
return [True]
def test_allclose():
"""
test allclose
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([100, 200]).astype("float32"))
other = to_variable(np.array([100, 2000]).astype("float32"))
res = paddle.tensor.logic.allclose(input, other)
expect = __allclose__(np.array([100, 200]).astype("float32"), np.array([100, 2000]).astype("float32"))
tools.compare(res.numpy(), expect)
def test_allclose1():
"""
test allclose
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([100, 200]).astype("float32"))
other = to_variable(np.array([100, 200]).astype("float32"))
res = paddle.tensor.logic.allclose(input, other)
expect = __allclose__(np.array([100, 200]).astype("float32"), np.array([100, 200]).astype("float32"))
tools.compare(res.numpy(), expect)
def test_allclose2():
"""
test allclose type=float64
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([100, 200]).astype("float64"))
other = to_variable(np.array([100, 2000]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other)
expect = __allclose__(np.array([100, 200]).astype("float64"), np.array([100, 2000]).astype("float64"))
tools.compare(res.numpy(), expect)
def test_allclose3():
"""
test allclose type=float64
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([100, 200]).astype("float64"))
other = to_variable(np.array([100, 200]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other)
expect = __allclose__(np.array([100, 200]).astype("float64"), np.array([100, 200]).astype("float64"))
tools.compare(res.numpy(), expect)
def test_allclose4():
"""
test allclose type=float64 rtol=0
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([100, 1e-7]).astype("float64"))
other = to_variable(np.array([100, 1e-8]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other, rtol=0)
expect = __allclose__(np.array([100, 1e-7]).astype("float64"), np.array([100, 1e-8]).astype("float64"),
rtol=0)
tools.compare(res.numpy(), expect)
def test_allclose5():
"""
test allclose type=float64 rtol=0
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([100, 1e-9]).astype("float64"))
other = to_variable(np.array([100, 1e-8]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other, rtol=0)
expect = __allclose__(np.array([100, 1e-9]).astype("float64"), np.array([100, 1e-8]).astype("float64"),
rtol=0)
tools.compare(res.numpy(), expect)
def test_allclose6():
"""
test allclose type=float64 atol=0
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([1+1e-8, 1e-8]).astype("float64"))
other = to_variable(np.array([1, 1e-8]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other, atol=0)
expect = __allclose__(np.array([1+1e-8, 1e-8]).astype("float64"), np.array([1, 1e-8]).astype("float64"),
atol=0)
tools.compare(res.numpy(), expect)
def test_allclose7():
"""
test allclose type=float64 atol=0
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([1+1e-4, 1e-8]).astype("float64"))
other = to_variable(np.array([1, 1e-8]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other, atol=0)
expect = __allclose__(np.array([1+1e-4, 1e-8]).astype("float64"), np.array([1, 1e-8]).astype("float64"),
atol=0)
tools.compare(res.numpy(), expect)
def test_allclose8():
"""
test allclose type=float64 equal_nan=False
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([math.nan, 1e-8]).astype("float64"))
other = to_variable(np.array([math.nan, 1e-8]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other, equal_nan=False)
tools.compare(res.numpy(), [False])
def test_allclose9():
"""
test allclose type=float64 equal_nan=True
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([math.nan, 1e-8]).astype("float64"))
other = to_variable(np.array([math.nan, 1e-8]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other, equal_nan=True)
tools.compare(res.numpy(), [True])
def test_allclose10():
"""
test allclose type=float64 name=ss
Returns:
None
"""
with fluid.dygraph.guard():
input = to_variable(np.array([math.nan, 1e-8]).astype("float64"))
other = to_variable(np.array([math.nan, 1e-8]).astype("float64"))
res = paddle.tensor.logic.allclose(input, other, equal_nan=True, name="ss")
tools.compare(res.numpy(), [True])
def test_dot():
"""
test dot
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.array([1, 2, 3, 4, 5]).astype(np.float64)
y = np.array([1, 2, 3, 4, 5]).astype(np.float64)
x = to_variable(x)
y = to_variable(y)
res = paddle.dot(x, y)
expect = [55]
tools.compare(res.numpy(), expect)
def test_dot1():
"""
test dot dtype=float32
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float32)
y = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float32)
x = to_variable(x)
y = to_variable(y)
res = paddle.dot(x, y)
expect = [55]
tools.compare(res.numpy(), expect)
def test_dot2():
"""
test dot dtype=int32
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.int32)
y = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.int32)
x = to_variable(x)
y = to_variable(y)
res = paddle.dot(x, y)
expect = [55.0]
tools.compare(res.numpy(), expect)
def test_dot3():
"""
test dot dtype=int64
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([1, 2, 3, 4, 5]).astype(np.int64)
x = to_variable(x)
y = to_variable(y)
res = paddle.dot(x, y)
expect = [55.0]
tools.compare(res.numpy(), expect)
def test_dot4():
"""
test dot dtype=int64 name=ss
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([1, 2, 3, 4, 5]).astype(np.int64)
x = to_variable(x)
y = to_variable(y)
res = paddle.dot(x, y, name="ss")
expect = [55.0]
tools.compare(res.numpy(), expect)
def math_logsumexp(data):
"""
achieve logsumexp by numpy
Args:
data: float array
Returns:
Float
"""
res = []
for i in data:
res.append(math.exp(i))
return math.log(sum(res))
def test_logsumexp():
"""
test logsumexp
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([1, 2, 3, 4, 5]).astype(np.float32))
res = paddle.logsumexp(x).numpy()
expect = math_logsumexp([1, 2, 3, 4, 5])
tools.compare(res[0], expect, 1e-7)
def test_logsumexp1():
"""
test logsumexp dim=-1
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[2, 2], [3, 3]]).astype(np.float32))
res = paddle.logsumexp(x, dim=-1).numpy()
expect = [2.6931472, 3.6931472]
tools.compare(res, expect, 1e-7)
def test_logsumexp2():
"""
test logsumexp dim=0
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[2, 2], [3, 3]]).astype(np.float32))
res = paddle.logsumexp(x, dim=0).numpy()
expect = [3.3132617, 3.3132617]
tools.compare(res, expect, 1e-7)
def test_logsumexp3():
"""
test logsumexp dim=1
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[2, 2], [3, 3]]).astype(np.float32))
res = paddle.logsumexp(x, dim=1).numpy()
print(res)
expect = [2.6931472, 3.6931472]
tools.compare(res, expect, 1e-7)
def test_logsumexp4():
"""
test logsumexp keep_dim=True
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[2, 2], [3, 3]]).astype(np.float32))
res = paddle.logsumexp(x, dim=1, keepdim=True).numpy()
expect = [[2.6931472], [3.6931472]]
tools.compare(res, expect, 1e-7)
def test_logsumexp5():
"""
test logsumexp keep_dim=True
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[2, 2], [3, 3]]).astype(np.float32))
res = paddle.logsumexp(x, dim=None, keepdim=True).numpy()
expect = [[4.0064087]]
tools.compare(res, expect, 1e-7)
def test_logsumexp6():
"""
test logsumexp keep_dim=True largedim
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[[2, 2], [3, 3]], [[2, 2], [3, 3]]]).astype(np.float32))
res = paddle.logsumexp(x, dim=1, keepdim=True).numpy()
expect = [[[3.3132617, 3.3132617]], [[3.3132617, 3.3132617]]]
tools.compare(res, expect, 1e-7)
def test_logsumexp7():
"""
test logsumexp keep_dim=False largedim
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[[2, 2], [3, 3]], [[2, 2], [3, 3]]]).astype(np.float32))
res = paddle.logsumexp(x, dim=1, keepdim=False).numpy()
expect = [[3.3132617, 3.3132617], [3.3132617, 3.3132617]]
tools.compare(res, expect, 1e-7)
def test_logsumexp8():
"""
test logsumexp keep_dim=True largedim dim=[0, 1, 2]
Returns:
None
"""
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[[2, 2], [3, 3]], [[2, 2], [3, 3]]]).astype(np.float32))
res = paddle.logsumexp(x, dim=[1, 2], keepdim=False).numpy()
expect = [4.0064087, 4.0064087]
tools.compare(res, expect, 1e-7)
def test_full():
"""
test full default value
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.full(shape=[1000, 1000], fill_value=3.3, dtype=np.float32)
expect = np.ones(shape=[1000, 1000]).astype(np.float32) * 3.3
tools.compare(x.numpy(), expect)
def test_full1():
"""
test full different dtype
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.fill_constant(shape=[1000, 1000], value=3.3, dtype=np.float16)
print(x.numpy())
x1 = paddle.full(shape=[1000, 1000], fill_value=3.3, dtype=np.float16)
expect1 = (np.ones(shape=[1000, 1000]) * 3.3).astype(np.float16)
x2 = paddle.full(shape=[1000, 1000], fill_value=3.3, dtype=np.float64)
expect2 = (np.ones(shape=[1000, 1000]) * 3.3).astype(np.float64)
x3 = paddle.full(shape=[1000, 1000], fill_value=3.3, dtype=np.int32)
expect3 = (np.ones(shape=[1000, 1000]) * 3.3).astype(np.int32)
x4 = paddle.full(shape=[1000, 1000], fill_value=3.3, dtype=np.int64)
expect4 = (np.ones(shape=[1000, 1000]) * 3.3).astype(np.int64)
x5 = paddle.full(shape=[1000, 1000], fill_value=3.3, dtype=np.bool)
expect5 = (np.ones(shape=[1000, 1000]) * 3.3).astype(np.bool)
tools.compare(x1.numpy(), expect1, delta=0.01)
tools.compare(x2.numpy(), expect2)
tools.compare(x3.numpy(), expect3)
tools.compare(x4.numpy(), expect4)
tools.compare(x5.numpy(), expect5)
def test_full2():
"""
test full device = cpu
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.full(shape=[1000, 1000], fill_value=3.3, dtype=np.float32, device="cpu")
expect = np.ones(shape=[1000, 1000]).astype(np.float32) * 3.3
tools.compare(x.numpy(), expect)
def test_full3():
"""
test full name=ss
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.full(shape=[1000, 1000], fill_value=3.3, dtype=np.float32, name="ss")
expect = np.ones(shape=[1000, 1000]).astype(np.float32) * 3.3
tools.compare(x.numpy(), expect)
def test_full4():
"""
test full device = cpu out=a
Returns:
None
"""
with fluid.dygraph.guard():
a = to_variable(np.ones(shape=[1000, 1000]).astype(np.float32))
x = paddle.full(out=a, shape=[1000, 1000], fill_value=3.3, dtype=np.float32, device="cpu")
expect = np.ones(shape=[1000, 1000]).astype(np.float32) * 3.3
tools.compare(a.numpy(), expect)
def test_zeros_like():
"""
test zeros_like
Returns:
None
"""
with fluid.dygraph.guard():
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.zeros_like(x)
expect = np.zeros(shape=[100, 100, 100])
tools.compare(res.numpy(), expect)
def test_zeros_like1():
"""
test zeros_like different dtype
Returns:
None
"""
with fluid.dygraph.guard():
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.zeros_like(x, dtype=np.bool)
expect = np.zeros(shape=[100, 100, 100]).astype(np.bool)
tools.compare(res.numpy(), expect)
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.zeros_like(x, dtype=np.float32)
expect = np.zeros(shape=[100, 100, 100]).astype(np.float32)
tools.compare(res.numpy(), expect)
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.zeros_like(x, dtype=np.float64)
expect = np.zeros(shape=[100, 100, 100]).astype(np.float64)
tools.compare(res.numpy(), expect)
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.zeros_like(x, dtype=np.int32)
expect = np.zeros(shape=[100, 100, 100]).astype(np.int32)
tools.compare(res.numpy(), expect)
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.zeros_like(x, dtype=np.int64)
expect = np.zeros(shape=[100, 100, 100]).astype(np.int64)
tools.compare(res.numpy(), expect)
def test_zeros_like2():
"""
test zeros_like device=cpu
Returns:
None
"""
with fluid.dygraph.guard():
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.zeros_like(x, device="cpu")
expect = np.zeros(shape=[100, 100, 100])
tools.compare(res.numpy(), expect)
def test_zeros_like3():
"""
test zeros_like device=cpu name=ss
Returns:
None
"""
with fluid.dygraph.guard():
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.zeros_like(x, device="cpu", name="ss")
expect = np.zeros(shape=[100, 100, 100])
tools.compare(res.numpy(), expect)
def test_ones_like():
"""
test ones_like
Returns:
None
"""
with fluid.dygraph.guard():
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.ones_like(x)
expect = np.ones(shape=[100, 100, 100])
tools.compare(res.numpy(), expect)
def test_ones_like1():
"""
test ones_like different dtype
Returns:
None
"""
with fluid.dygraph.guard():
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.ones_like(x, dtype=np.bool)
expect = np.ones(shape=[100, 100, 100]).astype(np.bool)
tools.compare(res.numpy(), expect)
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.ones_like(x, dtype=np.float32)
expect = np.ones(shape=[100, 100, 100]).astype(np.float32)
tools.compare(res.numpy(), expect)
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.ones_like(x, dtype=np.float64)
expect = np.ones(shape=[100, 100, 100]).astype(np.float64)
tools.compare(res.numpy(), expect)
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.ones_like(x, dtype=np.int32)
expect = np.ones(shape=[100, 100, 100]).astype(np.int32)
tools.compare(res.numpy(), expect)
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.ones_like(x, dtype=np.int64)
expect = np.ones(shape=[100, 100, 100]).astype(np.int64)
tools.compare(res.numpy(), expect)
def test_ones_like2():
"""
test ones_like device=cpu
Returns:
None
"""
with fluid.dygraph.guard():
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.ones_like(x, device="cpu")
expect = np.ones(shape=[100, 100, 100])
tools.compare(res.numpy(), expect)
def test_ones_like3():
"""
test ones_like device=cpu name=ss
Returns:
None
"""
with fluid.dygraph.guard():
x = to_variable(np.random.random(size=[100, 100, 100]))
res = paddle.ones_like(x, device="cpu", name="ss")
expect = np.ones(shape=[100, 100, 100])
tools.compare(res.numpy(), expect)
def test_elementwise_equal():
"""
test elementwise equal
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([1, 2, 3, 4, 5])
y = np.array([1, 2, 3, 4, 4])
paddle_x = to_variable(x)
paddle_y = to_variable(y)
res = paddle.elementwise_equal(paddle_x, paddle_y)
expect = [1, 1, 1, 1, 0]
tools.compare(res.numpy(), expect)
def test_elementwise_equal1():
"""
test elementwise equal 2 dig
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 1], [2, 2], [3, 3]])
y = np.array([[1, 1], [2, 1], [1, 3]])
paddle_x = to_variable(x)
paddle_y = to_variable(y)
res = paddle.elementwise_equal(paddle_x, paddle_y)
expect = [[1, 1], [1, 0], [0, 1]]
tools.compare(res.numpy(), expect)
def test_elementwise_equal2():
"""
test elementwise equal name = ss
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 1], [2, 2], [3, 3]])
y = np.array([[1, 1], [2, 1], [1, 3]])
paddle_x = to_variable(x)
paddle_y = to_variable(y)
res = paddle.elementwise_equal(paddle_x, paddle_y, name="ss")
expect = [[1, 1], [1, 0], [0, 1]]
tools.compare(res.numpy(), expect)
def test_randint():
"""
test randint
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
x = paddle.tensor.randint(low=1, high=5, shape=[3, 3], seed=33)
if platform.system() == "Darwin":
expect = [[3, 4, 1], [2, 4, 1], [1, 2, 3]]
elif platform.system() == "Linux":
expect = [[1, 4, 4], [2, 4, 2], [4, 1, 3]]
else:
expect = [[3, 2, 1], [2, 1, 2], [3, 3, 4]]
tools.compare(x.numpy(), expect)
def test_randint1():
"""
test randint high=None
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
x = paddle.tensor.randint(low=1, shape=[3, 3])
expect = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
tools.compare(x.numpy(), expect)
def test_randint2():
"""
test randint device="cpu", name="ss"
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
x = paddle.tensor.randint(low=1, high=5, shape=[3, 3], seed=33, device="cpu", name="ss")
if platform.system() == "Darwin":
expect = [[3, 4, 1], [2, 4, 1], [1, 2, 3]]
elif platform.system() == "Linux":
expect = [[1, 4, 4], [2, 4, 2], [4, 1, 3]]
else:
expect = [[3, 2, 1], [2, 1, 2], [3, 3, 4]]
tools.compare(x.numpy(), expect)
def test_randint3():
"""
test randint device="cpu", name="ss" out=a
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
a = to_variable(np.ones(shape=[3, 3]).astype(np.int32))
x = paddle.tensor.randint(out=a, low=1, high=5, shape=[3, 3], seed=33, device="cpu", name="ss")
if platform.system() == "Darwin":
expect = [[3, 4, 1], [2, 4, 1], [1, 2, 3]]
elif platform.system() == "Linux":
expect = [[1, 4, 4], [2, 4, 2], [4, 1, 3]]
else:
expect = [[3, 2, 1], [2, 1, 2], [3, 3, 4]]
tools.compare(a.numpy(), expect)
def test_manual_seed():
"""
manual seed
Returns:
None
"""
prog1 = fluid.default_startup_program()
prog2 = fluid.Program()
tools.compare(prog1.random_seed, 0)
tools.compare(prog2.random_seed, 0)
print(prog1.random_seed)
print(prog2.random_seed)
paddle.manual_seed(33)
prog3 = fluid.Program()
# default prog会被修改
tools.compare(prog1.random_seed, 33)
# 自定义的不会被修改
tools.compare(prog2.random_seed, 0)
tools.compare(prog3.random_seed, 33)
def test_diag_embed():
"""
test diag embed default
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
x = np.array([[1, 2, 3], [1, 2, 3]])
res = paddle.nn.functional.diag_embed(to_variable(x))
expect = [[[1, 0, 0], [0, 2, 0], [0, 0, 3]],
[[1, 0, 0], [0, 2, 0], [0, 0, 3]]]
tools.compare(res.numpy(), expect)
def test_diag_embed1():
"""
test diag embed offset=1
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
x = np.array([[1, 2, 3], [1, 2, 3]])
res = paddle.nn.functional.diag_embed(to_variable(x), offset=1)
expect = [[[0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 0]],
[[0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 0]]]
tools.compare(res.numpy(), expect)
def test_diag_embed2():
"""
test diag embed dim1=0, dim2=1
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
x = np.array([[1, 2, 3], [1, 2, 3]])
res = paddle.nn.functional.diag_embed(to_variable(x), dim1=0, dim2=1)
expect = [[[1, 1], [0, 0], [0, 0]],
[[0, 0], [2, 2], [0, 0]],
[[0, 0], [0, 0], [3, 3]]]
tools.compare(res.numpy(), expect)
def test_diag_embed3():
"""
test diag embed dim1=0, dim2=2
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
x = np.array([[1, 2, 3], [1, 2, 3]])
res = paddle.nn.functional.diag_embed(to_variable(x), dim1=0, dim2=2)
expect = [[[1, 0, 0], [1, 0, 0]],
[[0, 2, 0], [0, 2, 0]],
[[0, 0, 3], [0, 0, 3]]]
tools.compare(res.numpy(), expect)
def test_nn_relu():
"""
test nn.relu
Returns:
None
"""
with fluid.dygraph.guard():
x_np = np.random.uniform(-1, 1, [10, 12, 128, 128]).astype('float32')
x = fluid.dygraph.to_variable(x_np)
my_relu=paddle.nn.ReLU()
out = my_relu(x)
arr = []
for i in x_np.flatten():
if i < 0:
arr.append(0)
else:
arr.append(i)
expect = np.array(arr).reshape(10, 12, 128, 128)
tools.compare(out.numpy(), expect)
def test_elementwise_sum():
"""
test elementwise_sum
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 1], [2, 2], [3, 3]])
y = np.array([[1, 1], [2, 1], [1, 3]])
paddle_x = to_variable(x)
paddle_y = to_variable(y)
res = paddle.elementwise_sum([paddle_x, paddle_y], name="ss")
expect = [[2, 2], [4, 3], [4, 6]]
tools.compare(res.numpy(), expect)
def test_matmul():
"""
test matmul
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([1, 2, 3]).astype(np.float32)
paddle_x = to_variable(x)
paddle_y = to_variable(y)
res = paddle.matmul(paddle_x, paddle_y, name="ss")
expect = [14]
tools.compare(res.numpy(), expect)
print(res.numpy())
def test_matmul1():
"""
test matmul alpha=2
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([1, 2, 3]).astype(np.float32)
paddle_x = to_variable(x)
paddle_y = to_variable(y)
res = paddle.matmul(paddle_x, paddle_y, alpha=2, name="ss")
expect = [28]
tools.compare(res.numpy(), expect)
print(res.numpy())
def test_matmul2():
"""
test matmul
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [1, 2, 3]]).astype(np.float32)
y = np.array([[1, 2, 3], [1, 2, 3]]).astype(np.float32)
paddle_x = to_variable(x)
paddle_y = to_variable(y)
res = paddle.matmul(paddle_x, paddle_y, transpose_y=True, alpha=1, name="ss")
expect = [[14, 14], [14, 14]]
tools.compare(res.numpy(), expect)
def test_logsoftmax():
"""
test logsoftmax
Returns:
None
"""
data = np.array([[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]]).astype('float32')
my_log_softnmax = paddle.nn.LogSoftmax()
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
res = my_log_softnmax(data)
expect = [[[ -7.1278, -2.1278, -9.1278, -0.1278],
[ -2.1271, -9.1271, -0.1271, -11.1271],
[-16.3133, -17.3133, -1.3133, -0.3133]],
[[ -3.0518, -6.0518, -7.0518, -0.0518],
[-12.3133, -1.3133, -0.3133, -15.3133],
[ -3.4402, -2.4402, -1.4402, -0.4402]]]
tools.compare(res.numpy(), expect, delta=1e-3)
def test_logsoftmax1():
"""
test logsoftmax axis=0
Returns:
None
"""
data = np.array([[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]]).astype('float32')
my_log_softnmax = paddle.nn.LogSoftmax(0)
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
res = my_log_softnmax(data)
expect = [[[-3.0486e+00, -6.7153e-03, -1.3133e+00, -3.1326e-01],
[-3.3541e-04, -1.0000e+01, -2.1269e+00, -1.2693e-01],
[-1.3000e+01, -1.5000e+01, -6.9315e-01, -6.9315e-01]],
[[-4.8587e-02, -5.0067e+00, -3.1326e-01, -1.3133e+00],
[-8.0003e+00, -4.5399e-05, -1.2693e-01, -2.1269e+00],
[-2.2603e-06, -3.0590e-07, -6.9315e-01, -6.9315e-01]]]
tools.compare(res.numpy(), expect, delta=1e-3)
def test_meshgrid():
"""
test meshgrid
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5]).astype(np.float32)
z = np.array([6, 7, 8]).astype(np.float32)
paddle_x = to_variable(x)
paddle_y = to_variable(y)
paddle_z = to_variable(z)
res = paddle.tensor.meshgrid([paddle_x, paddle_y, paddle_z])
expect1 = [[[1., 1., 1.],
[1., 1., 1.]],
[[2., 2., 2.],
[2., 2., 2.]],
[[3., 3., 3.],
[3., 3., 3.]]]
expect2 = [[[4., 4., 4.],
[5., 5., 5.]],
[[4., 4., 4.],
[5., 5., 5.]],
[[4., 4., 4.],
[5., 5., 5.]]]
expect3 = [[[6., 7., 8.],
[6., 7., 8.]],
[[6., 7., 8.],
[6., 7., 8.]],
[[6., 7., 8.],
[6., 7., 8.]]]
tools.compare(res[0].numpy(), expect1)
tools.compare(res[1].numpy(), expect2)
tools.compare(res[2].numpy(), expect3)
def test_arange():
"""
test arange default
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.arange(1, 5, 0.5)
expect = [1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000, 4.5000]
tools.compare(x.numpy(), expect)
def test_arange1():
"""
test arange dtype=int32
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.arange(1, 5, 1, dtype=np.int32)
expect = [1, 2, 3, 4]
tools.compare(x.numpy(), expect)
def test_arange2():
"""
test arange dtype=int64
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.arange(1, 5, 1, dtype=np.int64)
expect = [1, 2, 3, 4]
tools.compare(x.numpy(), expect)
def test_arange3():
"""
test arange dtype=float32
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.arange(1, 5, 0.5, dtype=np.float32)
expect = [1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000, 4.5000]
tools.compare(x.numpy(), expect)
def test_arange4():
"""
test arange dtype=float64
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.arange(1, 5, 0.5, dtype=np.float64)
expect = [1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000, 4.5000]
tools.compare(x.numpy(), expect)
def test_arange5():
"""
test arange name param BUG!!!!
None
"""
with fluid.dygraph.guard():
x = paddle.arange(1, 5, 0.5, dtype=np.float64, name="ss")
expect = [1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000, 4.5000]
tools.compare(x.numpy(), expect)
def test_bmm():
"""
test bmm
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
a = np.random.random(size=[3, 1, 2])
b = np.random.random(size=[3, 2, 1])
x = to_variable(a)
y = to_variable(b)
res = paddle.bmm(x, y)
expect = [[[0.43382605]], [[0.40628374]], [[0.91274966]]]
tools.compare(res.numpy(), expect, delta=1e-4)
def test_interpolate():
"""
test interpolate
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
data = np.random.random((1, 1, 3, 3)).astype("float64")
x = to_variable(data)
align_corners = True
out_shape = [4, 4]
res = paddle.nn.functional.interpolate(x, size=out_shape, mode='BICUBIC', align_mode=0,
align_corners=align_corners)
expect = [[[[0.2485, 0.3909, 0.4601, 0.4109],
[0.2833, 0.6472, 0.6122, 0.2011],
[0.1859, 0.7745, 0.8299, 0.3159],
[0.0197, 0.6897, 0.9711, 0.6805]]]]
tools.compare(res.numpy(), expect, delta=1e-4)
def test_interpolate1():
"""
test interpolate1 align_corners=False
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
data = np.random.random((1, 1, 3, 3)).astype("float64")
x = to_variable(data)
align_corners = False
out_shape = [4, 4]
res = paddle.nn.functional.interpolate(x, size=out_shape, mode='BICUBIC', align_mode=0,
align_corners=align_corners)
expect = [[[[0.2353, 0.3570, 0.4414, 0.4277],
[0.2518, 0.6105, 0.5774, 0.1763],
[0.1195, 0.7352, 0.8122, 0.2951],
[-0.0663, 0.6411, 0.9767, 0.6986]]]]
tools.compare(res.numpy(), expect, delta=1e-4)
def test_NLLLoss():
"""
test NLLLoss
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=(20, 200)).astype(np.float32)
label = np.random.randint(0, 100, size=(20,)).astype(np.int64)
p_input = to_variable(input)
p_label = to_variable(label)
nll_loss = paddle.nn.loss.NLLLoss()
res = nll_loss(p_input, p_label)
expect = [-0.5075191]
tools.compare(res.numpy(), expect)
def test_NLLLoss1():
"""
test NLLLoss add weight
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=(20, 200)).astype(np.float32)
label = np.random.randint(0, 100, size=(20,)).astype(np.int64)
weight = np.random.random(size=[200]).astype(np.float32)
p_input = to_variable(input)
p_label = to_variable(label)
nll_loss = paddle.nn.loss.NLLLoss(to_variable(weight))
res = nll_loss(p_input, p_label)
expect = [-0.47225362]
tools.compare(res.numpy(), expect)
def test_NLLLoss2():
"""
test NLLLoss reducetion=sum
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=(20, 200)).astype(np.float32)
label = np.random.randint(0, 100, size=(20,)).astype(np.int64)
weight = np.random.random(size=[200]).astype(np.float32)
p_input = to_variable(input)
p_label = to_variable(label)
nll_loss = paddle.nn.loss.NLLLoss(to_variable(weight), reduction="sum")
res = nll_loss(p_input, p_label)
expect = [-4.3605204]
tools.compare(res.numpy(), expect)
def test_NLLLoss3():
"""
test NLLLoss reducetion=None
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=(20, 200)).astype(np.float32)
label = np.random.randint(0, 100, size=(20,)).astype(np.int64)
weight = np.random.random(size=[200]).astype(np.float32)
p_input = to_variable(input)
p_label = to_variable(label)
nll_loss = paddle.nn.loss.NLLLoss(to_variable(weight), reduction="none")
res = nll_loss(p_input, p_label)
expect = [-0.1155, -0.0369, -0.5154, -0.7624, -0.0933, -0.0631, -0.0307, -0.1075,
-0.1835, -0.1925, -0.3282, -0.2857, -0.1193, -0.2945, -0.0721, -0.0174,
-0.0599, -0.5841, -0.4217, -0.0769]
tools.compare(res.numpy(), expect, delta=1e-4)
def test_std():
"""
test std
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.random.uniform(1, 100, size=[3, 3])
res = paddle.std(to_variable(x))
expect = [31.62808741]
tools.compare(res.numpy(), expect, delta=1e-4)
def test_std1():
"""
test std keepdim=True
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.random.uniform(1, 100, size=[3, 3])
res = paddle.std(to_variable(x), axis=[0, 1], keepdim=True)
expect = [[31.62808741]]
tools.compare(res.numpy(), expect, delta=1e-4)
def test_std2():
"""
test std keepdim=True axis=1
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.random.uniform(1, 100, size=[3, 3])
res = paddle.std(to_variable(x), axis=[1], keepdim=True)
expect = [[10.57769871], [37.20946482], [47.52437458]]
tools.compare(res.numpy(), expect, delta=1e-4)
def test_std3():
"""
test std keepdim=True axis=1 unbiased=False
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.random.uniform(1, 100, size=[3, 3])
res = paddle.std(to_variable(x), axis=[1], keepdim=True, unbiased=False)
expect = [[ 8.63665483], [30.3814008], [38.80348936]]
tools.compare(res.numpy(), expect, delta=1e-4)
def test_std4():
"""
test std keepdim=True axis=1 unbiased=False name=ss
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.random.uniform(1, 100, size=[3, 3])
aaa = to_variable(np.ones(shape=(3, 1)))
res = paddle.std(to_variable(x), axis=[1], keepdim=True, unbiased=False, name="ss", out=aaa)
expect = [[8.63665483], [30.3814008], [38.80348936]]
tools.compare(aaa.numpy(), expect, delta=1e-4)
def test_clamp():
"""
test clamp
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
x = to_variable(x)
res = paddle.tensor.math.clamp(x, min=3, max=7)
expect = [[3, 3, 3], [4, 5, 6], [7, 7, 7]]
tools.compare(res.numpy(), expect)
def test_clamp1():
"""
test clamp dtype=np.float64
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float64)
x = to_variable(x)
res = paddle.tensor.math.clamp(x, min=3, max=7)
expect = [[3, 3, 3], [4, 5, 6], [7, 7, 7]]
tools.compare(res.numpy(), expect)
def test_BCELoss():
"""
test BCELoss
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=[100, 100, 33]).astype(np.float32)
label = np.random.random(size=[100, 100, 33]).astype(np.float32)
p_input = to_variable(input)
p_label = to_variable(label)
loss = paddle.nn.BCELoss()
res = loss(p_input, p_label)
expect = [1.0012524]
tools.compare(res.numpy(), expect)
def test_BCELoss1():
"""
test BCELoss weight param
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=[100, 100, 33]).astype(np.float32)
label = np.random.random(size=[100, 100, 33]).astype(np.float32)
weight = np.random.random(size=[100, 100, 33]).astype(np.float32)
p_input = to_variable(input)
p_label = to_variable(label)
p_weight = to_variable(weight)
loss = paddle.nn.BCELoss(weight=p_weight)
res = loss(p_input, p_label)
expect = [0.4997204]
tools.compare(res.numpy(), expect)
def test_BCELoss2():
"""
test BCELoss reduce=sum
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=[3, 33, 33]).astype(np.float32)
label = np.random.randint(2, size=[3, 33, 33]).astype(np.float32)
weight = np.random.random(size=[3, 33, 33]).astype(np.float32)
p_input = to_variable(input)
p_label = to_variable(label)
p_weight = to_variable(weight)
loss = paddle.nn.BCELoss(weight=p_weight, reduction="sum")
res = loss(p_input, p_label)
expect = [1641.069]
tools.compare(res.numpy(), expect)
def test_BCELoss3():
"""
test BCELoss reduce=none
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=[2, 2, 2]).astype(np.float32)
label = np.random.randint(2, size=[2, 2, 2]).astype(np.float32)
weight = np.random.random(size=[2, 2, 2]).astype(np.float32)
p_input = to_variable(input)
p_label = to_variable(label)
p_weight = to_variable(weight)
loss = paddle.nn.BCELoss(weight=p_weight, reduction="none")
res = loss(p_input, p_label)
expect = [[[0.1108, 0.2806],
[0.1455, 0.2964]],
[[1.7994, 0.8336],
[0.0080, 0.0216]]]
tools.compare(res.numpy(), expect, 1e-4)
def test_tril():
"""
test tril
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
p_x = to_variable(x)
res = paddle.tensor.tril(p_x)
expect = [[1, 0, 0], [4, 5, 0], [7, 8, 9]]
tools.compare(res.numpy(), expect)
def test_tril1():
"""
test tril diagonal=1
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
p_x = to_variable(x)
res = paddle.tensor.tril(p_x, diagonal=1)
expect = [[1, 2, 0], [4, 5, 6], [7, 8, 9]]
tools.compare(res.numpy(), expect)
def test_tril2():
"""
test tril diagonal=-1
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
p_x = to_variable(x)
res = paddle.tensor.tril(p_x, diagonal=-1)
expect = [[0, 0, 0], [4, 0, 0], [7, 8, 0]]
tools.compare(res.numpy(), expect)
def test_triu():
"""
test triu
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
p_x = to_variable(x)
res = paddle.tensor.triu(p_x)
expect = [[1, 2, 3], [0, 5, 6], [0, 0, 9]]
tools.compare(res.numpy(), expect)
def test_triu1():
"""
test triu diagonal=1
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
p_x = to_variable(x)
res = paddle.tensor.triu(p_x, diagonal=1)
expect = [[0, 2, 3], [0, 0, 6], [0, 0, 0]]
tools.compare(res.numpy(), expect)
def test_triu2():
"""
test triu diagonal=-1
Returns:
None
"""
with fluid.dygraph.guard():
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
p_x = to_variable(x)
res = paddle.tensor.triu(p_x, diagonal=-1)
expect = [[1, 2, 3], [4, 5, 6], [0, 8, 9]]
tools.compare(res.numpy(), expect)
def test_addmm():
"""
test addmm
Returns:
None
"""
with fluid.dygraph.guard():
input = np.array([[3, 3], [3, 3]]).astype(np.float32)
y = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
p_i = to_variable(input)
p_x = to_variable(x)
p_y = to_variable(y)
res = paddle.addmm(p_i, p_x, p_y, alpha=1, beta=1)
expect = [[25., 31.],
[52., 67.]]
tools.compare(res.numpy(), expect)
def test_addmm1():
"""
test addmm broadcast
Returns:
None
"""
with fluid.dygraph.guard():
input = np.array([[3, 3]]).astype(np.float32)
y = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
p_i = to_variable(input)
p_x = to_variable(x)
p_y = to_variable(y)
res = paddle.addmm(p_i, p_x, p_y, alpha=1, beta=1)
expect = [[25., 31.],
[52., 67.]]
tools.compare(res.numpy(), expect)
def test_addmm2():
"""
test addmm broadcast beta=-1
Returns:
None
"""
with fluid.dygraph.guard():
input = np.array([[3, 3]]).astype(np.float32)
y = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
p_i = to_variable(input)
p_x = to_variable(x)
p_y = to_variable(y)
res = paddle.addmm(p_i, p_x, p_y, alpha=1, beta=-1)
expect = [[19., 25.],
[46., 61.]]
tools.compare(res.numpy(), expect)
def test_addmm3():
"""
test addmm broadcast alpha=2 beta=0
Returns:
None
"""
with fluid.dygraph.guard():
input = np.array([[3, 3]]).astype(np.float32)
y = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
p_i = to_variable(input)
p_x = to_variable(x)
p_y = to_variable(y)
res = paddle.addmm(p_i, p_x, p_y, alpha=2, beta=0)
expect = [[ 44., 56.],
[ 98., 128.]]
tools.compare(res.numpy(), expect)
def test_index_sample():
"""
test index_sample
Returns:
None
"""
with fluid.dygraph.guard():
input = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
index = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]]).astype(np.int32)
x = to_variable(input)
y = to_variable(index)
res = paddle.index_sample(x, y)
expect = [[1, 1, 1], [5, 5, 5], [9, 9, 9]]
tools.compare(res.numpy(), expect)
def test_index_sample1():
"""
test index_sample different shape
Returns:
None
"""
with fluid.dygraph.guard():
input = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
index = np.array([[0, ], [1, ], [2, ]]).astype(np.int32)
x = to_variable(input)
y = to_variable(index)
res = paddle.index_sample(x, y)
expect = [[1], [5], [9]]
tools.compare(res.numpy(), expect)
def test_cholesky():
"""
test cholesky
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.random.rand(3, 3)
x_t = np.transpose(x, [1, 0])
data = np.matmul(x, x_t)
p = to_variable(data)
res = paddle.cholesky(p, upper=False)
expect = [[0.6581, 0.0000, 0.0000],
[0.8090, 0.4530, 0.0000],
[1.0841, 0.1849, 0.4033]]
tools.compare(res.numpy(), expect, 1e-4)
def test_cholesky1():
"""
test cholesky
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
x = np.random.rand(3, 3)
x_t = np.transpose(x, [1, 0])
data = np.matmul(x, x_t)
p = to_variable(data)
res = paddle.cholesky(p, upper=True)
expect = [[0.6581, 0.8090, 1.0841],
[0.0000, 0.4530, 0.1849],
[0.0000, 0.0000, 0.4033]]
tools.compare(res.numpy(), expect, 1e-4)
def test_CrossEntropyLoss():
"""
test CrossEntropyLoss
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=[5, 100]).astype(np.float32)
label = np.array([1, 2, 3, 4, 5]).astype(np.int64)
weight = np.random.random(size=[5]).astype("float32")
p_i = to_variable(input)
p_l = to_variable(label)
celoss = paddle.nn.loss.CrossEntropyLoss()
res = celoss(p_i, p_l)
expect = [4.575522]
tools.compare(res.numpy(), expect)
def test_CrossEntropyLoss1():
"""
test CrossEntropyLoss add weight
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=[5, 100]).astype(np.float32)
label = np.array([1, 2, 3, 4, 5]).astype(np.int64)
weight = np.random.random(size=[100]).astype("float32")
p_i = to_variable(input)
p_l = to_variable(label)
p_w = to_variable(weight)
celoss = paddle.nn.loss.CrossEntropyLoss(weight=p_w)
res = celoss(p_i, p_l)
expect = [4.535555]
tools.compare(res.numpy(), expect)
def test_CrossEntropyLoss2():
"""
test CrossEntropyLoss reduction=sum
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=[5, 100]).astype(np.float32)
label = np.array([1, 2, 3, 4, 5]).astype(np.int64)
weight = np.random.random(size=[100]).astype("float32")
p_i = to_variable(input)
p_l = to_variable(label)
p_w = to_variable(weight)
celoss = paddle.nn.loss.CrossEntropyLoss(reduction="sum")
res = celoss(p_i, p_l)
expect = [22.87761]
tools.compare(res.numpy(), expect)
def test_CrossEntropyLoss3():
"""
test CrossEntropyLoss reduction=none
Returns:
None
"""
with fluid.dygraph.guard():
np.random.seed(33)
input = np.random.random(size=[5, 100]).astype(np.float32)
label = np.array([1, 2, 3, 4, 5]).astype(np.int64)
weight = np.random.random(size=[100]).astype("float32")
p_i = to_variable(input)
p_l = to_variable(label)
p_w = to_variable(weight)
celoss = paddle.nn.loss.CrossEntropyLoss(reduction="none")
res = celoss(p_i, p_l)
expect = [4.6951137, 4.709591, 4.709876, 4.5558195, 4.207209]
tools.compare(res.numpy(), expect)
def test_log1p():
"""
test log1p
Returns:
None
"""
with fluid.dygraph.guard():
|
np.random.seed(33)
|
numpy.random.seed
|
"""
This function will train the model
"""
# import multiprocessing
# from multiprocessing import Manager
import threading
import tensorflow as tf
import numpy as np
import time
import random
from copy import deepcopy
import os
from hashlib import sha512
import matplotlib.pyplot as plt
import matplotlib
from tkinter import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.animation as animation
from game import Game2048
from game import CASE_COLOR_GAME, ALL_BLOCK_POSSIBLE, GRID_SIZE_Y, GRID_SIZE_X
from model import Model2048
from model import new_generation, grid_to_input, model_mutation
matplotlib.use('TkAgg')
GUI: bool = False # if you want see your models train
Q_LEARNING_DURING_ENV: bool = False # enable training during env simulation
Q_LEARNING_AFTER: bool = True
GENETIC_ALGORITHM: bool = True
# EPOCHS: int = 10_000
EPOCHS: int = 10_000
HISTORY_SIZE: int = 10_000
# create gui disposition
Y_MODEL: int = 4
X_MODEL: int = 6
TOTAL_MODEL: int = int(Y_MODEL * X_MODEL)
# color map for gui
FULL_MAP: list = [CASE_COLOR_GAME[0]] + [CASE_COLOR_GAME[2 ** i] for i in range(1, ALL_BLOCK_POSSIBLE + 1)]
FULL_BOUNDS: list = [-0.5] + [2 ** i - 0.5 for i in range(1, ALL_BLOCK_POSSIBLE + 2)]
COLOR_MAP = matplotlib.colors.ListedColormap(FULL_MAP)
NORM = matplotlib.colors.BoundaryNorm(FULL_BOUNDS, COLOR_MAP.N)
# q learning
LEARNING_RATE: float = 0.1 # alpha
DISCOUNT_FACTOR: float = 0.99 # gamma
# model gradiant tape
MODEL_OPTIMIZER = tf.keras.optimizers.Adam()
MODEL_LOSS = tf.keras.losses.Huber()
# @tf.function
def environment(index) -> None:
"""
This function will create a environment for each model
:param index: the index of the environment
:return: None
"""
# number of actions
n_action: int = 0
# while the game is not finished
while not list_game[index].check_end(list_game[index].grid):
n_action += 1
# add the actual grid to the history
board_history_x[index].append(list_game[i].grid)
# get the action of the model for the actual grid
model_action = list_model[index].take_action(list_game[index].grid)
# categorical to value
model_action_index = np.argmax(model_action)
# add to history the action choose by the model
board_history_y[index].append(model_action)
# simulate the action
new_grid, reward = list_game[index].action(list_game[index].grid, model_action_index)
# add reward to history
board_history_r[index].append(reward)
# do simple q learning for only this model
# get model action for future
future_action = list_model[index].take_action(new_grid)
future_action = future_action[np.argmax(future_action)] # = Q(st+1, at+1)
# Q(st, at) = Q(st, at) + α *(rt + Ɣ * Q(st+1, at+1) - Q(st, at))
model_action[model_action_index] = model_action[model_action_index] + LEARNING_RATE * (
reward + DISCOUNT_FACTOR * future_action - model_action[model_action_index])
# train model
"""
list_model[index].fit(
np.array([grid_to_input(list_game[index].grid)]),
np.array([model_action]),
verbose=0,
batch_size=1)"""
# train model
if Q_LEARNING_DURING_ENV:
with tf.GradientTape() as tape:
logits = list_model[index](
np.array([grid_to_input(list_game[index].grid)], dtype=np.float32), training=True
)
# Compute the loss value for this minibatch.
loss_value = MODEL_LOSS(np.array([model_action]), logits)
grads = tape.gradient(loss_value, list_model[index].trainable_weights)
MODEL_OPTIMIZER.apply_gradients(zip(grads, list_model[index].trainable_weights))
# set new environement
list_game[index].grid = new_grid
list_game[index].score += reward
list_max_block[index] = max(list_game[index].grid.flatten().tolist())
list_score[index] = list_game[index].score
fitness[index] = list_game[index].score * list_max_block[index]
list_n_action[index] = n_action
# if the maximum bloc is in the corner
if list_game[index].grid[-1][-1] == list_max_block[index]:
fitness[index] *= 4
elif list_game[index].grid[0][-1] == list_max_block[index]:
fitness[index] *= 2
elif list_game[index].grid[0][0] == list_max_block[index]:
fitness[index] *= 2
elif list_game[index].grid[-1][0] == list_max_block[index]:
fitness[index] *= 2
def gui() -> None:
"""
This function create a gui that show the advancement of all games
:return: None
"""
# create gui
fig, axs = plt.subplots(Y_MODEL, X_MODEL)
for y in range(Y_MODEL):
for x in range(X_MODEL):
# creation de l'affichage de chaque terrain
temp_mat = axs[y, x].matshow(
np.array([[0 for _ in range(GRID_SIZE_X)] for _ in range(GRID_SIZE_Y)]),
cmap=COLOR_MAP,
norm=NORM)
temp_mat.axes.xaxis.set_visible(False)
temp_mat.axes.yaxis.set_visible(False)
graph_list.append(temp_mat)
# create a tkinter window that contain all matplotlib game graph
window = Tk()
window.config(bg="white")
window.title("2048 AI")
canvas = FigureCanvasTkAgg(fig, window)
canvas.get_tk_widget().pack(side="top", fill='both', expand=True)
ani = animation.FuncAnimation(fig,
update_graph,
interval=1)
window.mainloop()
def update_graph(i) -> None:
"""
This function update the graph for each game
:param i:
:return: None
"""
for z in range(TOTAL_MODEL):
# afficher sur le graph
try:
graph_list[z].set_data(list_game[z].grid)
except ValueError as _:
pass
def hash_array(array: np.array) -> str:
"""
This function hash a numpy array
:param array: a numpy array
:return: hashed numpy array
"""
return sha512(repr(array).encode()).hexdigest()
if __name__ == '__main__':
# init q table
Q: dict = {} # q table
# list for animated graph
graph_list = []
# create models
list_model: list = [Model2048() for _ in range(TOTAL_MODEL)]
# create game for each model
list_game: list = [Game2048() for _ in range(TOTAL_MODEL)]
# start window
if GUI:
thread_window = threading.Thread(target=gui)
thread_window.start()
# for global history of each game
memory_history_x: list = []
memory_history_y: list = []
memory_history_r: list = []
# for each epochs
for epoch in range(EPOCHS):
# create a board history
board_history_x: list = [[] for _ in range(TOTAL_MODEL)]
# create a move history
board_history_y: list = [[] for _ in range(TOTAL_MODEL)]
# create reward history
board_history_r: list = [[] for _ in range(TOTAL_MODEL)]
t0 = time.time()
action_taken: int = 0
# reset games
list_game: list = [Game2048() for _ in range(TOTAL_MODEL)]
# for scores
list_score: [int] = [0 for _ in range(TOTAL_MODEL)]
list_max_block: [int] = [0 for _ in range(TOTAL_MODEL)]
list_n_action: [int] = [0 for _ in range(TOTAL_MODEL)]
fitness: list = [0 for _ in range(TOTAL_MODEL)]
# create score for each env
score: list = [0 for _ in range(TOTAL_MODEL)]
multi_env: list = []
# time for games
t1 = time.time()
# start every thread for each env
for i in range(TOTAL_MODEL):
multi_env.append(
threading.Thread(target=environment, args=(i,))
)
multi_env[-1].start()
for env in multi_env:
env.join()
t1 = time.time() - t1
# genetic algorithm
t2 = time.time()
if GENETIC_ALGORITHM:
# get weight of all model
model_weight = [m.get_weights() for m in list_model]
new_gen = new_generation(model_weight, list_score)
# add mutation
for i in range(TOTAL_MODEL):
new_gen[i] = model_mutation(new_gen[i])
# load models
for i, model in enumerate(list_model):
model.set_weights(new_gen[i])
t2 = time.time() - t2
# global history
for i in range(len(board_history_x)):
memory_history_x.append(board_history_x[i])
memory_history_y.append(board_history_y[i])
memory_history_r.append(board_history_r[i])
# reinforce Q learning with all data
t3 = time.time()
X_TRAIN: list = []
if Q_LEARNING_AFTER:
# create a q table with all element in memory
for i in range(len(memory_history_x)):
for j in range(len(memory_history_x[i]) - 1):
if hash_array(memory_history_x[i][j]) not in Q:
Q[hash_array(memory_history_x[i][j])] = memory_history_y[i][j]
# do q learning il already in memory
else:
q_value_H = Q[hash_array(memory_history_x[i][j])]
output_H = memory_history_y[i][j]
# update q value
future_action_H = memory_history_y[i][j][np.argmax(memory_history_y[i][j])]
max_output_H = np.argmax(output_H)
q_value_H[max_output_H] = q_value_H[max_output_H] + LEARNING_RATE * (
memory_history_r[i][j] + DISCOUNT_FACTOR * future_action_H - q_value_H[max_output_H])
# update q table
Q[hash_array(memory_history_x[i][j])] = q_value_H
# train all models with new q value
X_TRAIN: list = []
Y_TRAIN: list = []
for i in range(len(memory_history_x)):
for j in range(len(memory_history_x[i]) - 1):
X_TRAIN.append(grid_to_input(memory_history_x[i][j]))
Y_TRAIN.append(Q[hash_array(memory_history_x[i][j])])
def train(model_train):
with tf.device('/device:cpu:0'):
model_train.fit(
|
np.array(X_TRAIN, dtype=np.float32)
|
numpy.array
|
import logging
import numpy as np
import scipy.spatial
import scipy.sparse.csgraph as graph
import scipy.sparse
import shapely.geometry
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import cppimport.import_hook
import tectosaur.util.geometry
import tectosaur.nearfield.edge_adj_setup as edge_adj_setup
import tectosaur_topo as tt
from . import mesh_fncs
from . import slip_vectors
from . import collect_dem
def tri_side(tri1, tri2, threshold = 1e-12):
tri1_normal = tectosaur.util.geometry.tri_normal(tri1, normalize = True)
tri1_center = np.mean(tri1, axis = 0)
tri2_center = np.mean(tri2, axis = 0)
direction = tri2_center - tri1_center
direction /= np.linalg.norm(direction)
dot_val = direction.dot(tri1_normal)
if dot_val > threshold:
return 0
elif dot_val < -threshold:
return 1
else:
return 2
def plot_side_of_fault(m, side, view_R = 1.0):
fC, R = get_fault_centered_view(m)
C = np.ones(m.pts.shape[0])
plt.figure(figsize = (10, 10))
for i in range(3):
which_tris = m.tris[side == i + 1]
if which_tris.shape[0] == 0:
continue
plt.tripcolor(m.pts[:,0], m.pts[:,1], which_tris, C * i, vmin = 0, vmax = 3, cmap = 'hsv')
vW = view_R * R
plt.xlim([fC[0] - view_R * R, fC[0] + view_R * R])
plt.ylim([fC[1] - view_R * R, fC[1] + view_R * R])
plt.show()
def get_side_of_fault(m):
fault_start_idx = m.get_start('fault')
connectivity = mesh_fncs.tri_connectivity_graph(m.tris)
fault_touching_pair = np.where(np.logical_and(
connectivity.row < fault_start_idx,
connectivity.col >= fault_start_idx
))[0]
side = np.zeros(m.n_tris())
shared_verts = np.zeros(m.n_tris())
fault_surf_tris = m.pts[m.tris[connectivity.col[fault_touching_pair]]]
for i in range(fault_touching_pair.shape[0]):
surf_tri_idx = connectivity.row[fault_touching_pair[i]]
surf_tri = m.tris[surf_tri_idx]
fault_tri = m.tris[connectivity.col[fault_touching_pair[i]]]
which_side = tri_side(m.pts[fault_tri], m.pts[surf_tri])
n_shared_verts = 0
for d in range(3):
if surf_tri[d] in fault_tri:
n_shared_verts += 1
if shared_verts[surf_tri_idx] < 2:
side[surf_tri_idx] = int(which_side) + 1
shared_verts[surf_tri_idx] = n_shared_verts
return side
def plot_fault_trace(m):
fault_tris = m.get_tris('fault')
for e in mesh_fncs.get_surf_fault_edges(m.get_tris('surf'), fault_tris):
i1, d1 = e[0]
i2, d2 = e[1]
pts = m.pts[[fault_tris[i1,d1], fault_tris[i2,d2]]]
plt.plot(pts[:,0], pts[:,1], 'k-', markersize = 10)
def get_fault_centered_view(m):
fault_pts = m.get_tri_pts('fault').reshape((-1,3))
fC = np.mean(fault_pts, axis = 0)
R = np.sqrt(np.max(np.sum((fault_pts - fC) ** 2, axis = 1)))
return fC, R
def plot_surf_disp(m, side, field, name, vmin = None, vmax = None, filename = None, view_R = 1.0, proj = None, latlon_step = 0.5):
fC, R = get_fault_centered_view(m)
if vmin is None:
vmin =
|
np.min(field)
|
numpy.min
|
#!/usr/bin/env python3
""" Use TGlauberMC to calculate expected path lengths with respect to event plane orientations.
"""
from dataclasses import dataclass
import enlighten
import numpy as np
import os
import logging
from pathlib import Path
import requests
import tarfile
from typing import Any, cast, List
# NOTE: This is out of the expected order, but it must be here to prevent ROOT from stealing the command
# line options
from jet_hadron.base.typing_helpers import Hist # noqa: F401
from pachyderm import histogram
from jet_hadron.base import analysis_manager
from jet_hadron.base import analysis_objects
from jet_hadron.base import params
from jet_hadron.plot import base as plot_base
import ROOT
logger = logging.getLogger(__name__)
def _setup_TGlauberMC(version: str, path: Path) -> bool:
""" Setup TGlauberMC by downloading and extracting it.
Args:
version: TGlauberMC version.
path: Path to the directory where TGlauberMC will be stored.
Returns:
True if it was successfully setup.
Raises:
RuntimeError: If the package was not downloaded succefully.
"""
# Setup
url = f"https://tglaubermc.hepforge.org/downloads/?f=TGlauberMC-{version}.tar.gz"
filename = path / url[url.find("=") + 1:]
filename.parent.mkdir(parents = True, exist_ok = True)
# Request the file
logger.debug(f"Downloading TGlauberMC v. {version}")
r = requests.get(url)
if not r.status_code == requests.codes.ok:
raise RuntimeError(f"Unable to download package at {url}")
# Save it.
with open(filename, "wb") as f:
f.write(r.content)
# Untar
logger.debug("Extracting...")
with tarfile.open(filename, "r:gz") as f_tar:
# Help out mypy...
f_tar.extractall(path = str(filename.parent))
# Remove the tgz file - it's not needed anymore.
#filename.unlink()
return True
def _configure_TGlauberMC(version: str, path: Path) -> bool:
""" Configure and compile TGlauberMC so that it's accessible through ROOT.
Args:
version: TGlauberMC version.
path: Path to the directory where TGlauberMC is stored.
Returns:
True if it was successfully configured.
"""
logger.info("Configuring TGlauberMC")
# Pre-requisite for running the code
ROOT.gSystem.Load("libMathMore")
# Compile the Glauber code.
logger.debug("Compiling TGlauberMC. This may take a moment...")
ROOT.gROOT.LoadMacro(os.path.join(path, f"runglauber_v{version}.C+"))
# The Glauber classes can now be accessed through ROOT.
return True
@dataclass
class CrossSection:
""" Define an input cross section. """
value: float
width: float
def _calculate_array_RMS(arr: np.ndarray) -> float:
""" Calculate the RMS of the given array using the same proceudre as ROOT. """
return cast(float, np.sqrt(1 / len(arr) * np.sum((arr - np.mean(arr)) ** 2)))
class GlauberPathLengthAnalysis:
def __init__(self, cross_section: CrossSection, impact_parameter_range: params.SelectedRange) -> None:
# Store base propreties.
self.cross_section = cross_section
self.impact_parameter_range = impact_parameter_range
# Store output from each event
self.max_x: np.ndarray = []
self.max_y: np.ndarray = []
self.eccentricity: np.ndarray = []
# Calculated length ratio
self.ratio: np.ndarray
# The actual Glauber object
self.glauber: Any
def setup(self) -> bool:
# Setup the glauber object
# Arguments are (Nuclei, Nuclei, cross section, cross section width)
self.glauber = ROOT.TGlauberMC("Pb", "Pb", self.cross_section.value, self.cross_section.width)
# Specify the impact parameters
self.glauber.SetBmin(self.impact_parameter_range.min)
self.glauber.SetBmax(self.impact_parameter_range.max)
# Recommended value.
self.glauber.SetMinDistance(0.4)
return True
def event_loop(self, n_events: int, progress_manager: enlighten._manager.Manager) -> bool:
""" Run the Glauber event loop.
Args:
n_events: Number of events to run.
progress_manager: Progress manager to keep track of execution progress.
Returns:
True if executed successfully.
"""
# Temporary variables to store the event-by-event results
# We will store these in numpy arrays, but it's not convenient to expand those arrays,
# so we will store them temporarily in easily expandable lists and then store them
# in the analysis object after the event-by-event process is completed.
max_x: List[float] = []
max_y: List[float] = []
eccentricity: List[float] = []
#c = ROOT.TCanvas("c", "c")
with progress_manager.counter(total = n_events,
desc = "Calculating:",
unit = "glauber events") as progress:
for i in range(n_events):
# Run one event and retrieve the nucleons.
self.glauber.Run(1)
nucleons = self.glauber.GetNucleons()
x_values = []
y_values = []
for nucleon in nucleons:
# We only care about nucleons which participate.
if nucleon.IsWounded():
x_values.append(nucleon.GetX())
y_values.append(nucleon.GetY())
max_x.append(np.max(x_values))
max_y.append(np.max(y_values))
eccentricity.append(self.glauber.GetEcc(2))
# Uncomment if we want to save plots of the individual events.
#glauber.Draw()
#c.SaveAs(f"glauber_{i}.pdf")
progress.update()
# Convert all of the stored values to numpy arrays for convenience.
self.max_x =
|
np.array(max_x)
|
numpy.array
|
"""
This module facilitates the rapid construction of the GT blast pulse synthetic,
its integral and derivatives, and its spectrum
References:
- <NAME>. (2019). Explosion Source Models,
Chapter in Infrasound Monitoring for Atmospheric Studies,
Second Edition, Springer, Switzerland, DOI 10.1007/978-3-319-75140_5, p. 273-345.
- <NAME>., <NAME>, <NAME>, <NAME> (2020).
Improved parametric models for explosion pressure signals derived from large datasets,
Seism. Res. Lett.
- <NAME>, <NAME>, <NAME>, and <NAME> (2021).
Empirical Acoustic Source Model for Chemical Explosions in Air.
Bulletin of the Seismological Society of America
"""
import numpy as np
from typing import Optional, Tuple, Union
from libquantum.synthetics import white_noise_fbits, antialias_halfNyquist
from libquantum.scales import EPSILON
def gt_blast_period_center(time_center_s: np.ndarray,
pseudo_period_s: float) -> np.ndarray:
"""
GT blast pulse
:param time_center_s: array with time
:param pseudo_period_s: period in seconds
:return: numpy array with GT blast pulse
"""
# With the +1, tau is the zero crossing time - time_start renamed to time_zero for first zero crossing.
time_pos_s = pseudo_period_s/4.
tau = time_center_s/time_pos_s + 1.
# Initialize GT
p_GT = np.zeros(tau.size) # Granstrom-Triangular (GT), 2019
# Initialize time ranges
sigint1 = np.where((0.0 <= tau) & (tau <= 1.)) # ONLY positive pulse
sigintG17 = np.where((1. < tau) & (tau <= 1 + np.sqrt(6.))) # GT balanced pulse
p_GT[sigint1] = (1. - tau[sigint1])
p_GT[sigintG17] = 1./6. * (1. - tau[sigintG17]) * (1. + np.sqrt(6) - tau[sigintG17]) ** 2.
return p_GT
def gt_hilbert_blast_period_center(time_center_s: np.ndarray,
pseudo_period_s: float) -> np.ndarray:
"""
Hilbert transform of the GT blast pulse
:param time_center_s: array with time
:param pseudo_period_s: period in seconds
:return: numpy array with Hilbert transform of the GT blast pulse
"""
# With the +1, tau is the zero crossing time - time_start renamed to time_zero for first zero crossing.
time_pos_s = pseudo_period_s/4.
tau = time_center_s/time_pos_s + 1.
a = 1 + np.sqrt(6)
# Initialize GT
p_GT_H = np.zeros(tau.size) # Hilbert of Granstrom-Triangular (GT), 2019
# Initialize time ranges
sigint1 = np.where((0.0 <= tau) & (tau <= 1.)) # ONLY positive pulse
sigint2 = np.where((1. < tau) & (tau <= 1 + np.sqrt(6.))) # GT balanced pulse
tau1 = tau[sigint1]
tau2 = tau[sigint2]
p_GT_H[sigint1] = 1. + (1-tau1)*np.log(tau1+EPSILON) - (1-tau1)*np.log(1-tau1+EPSILON)
p_GT_H21 = (a-1)/6. * (a*(2*a+5) - 1 + 6*tau2**2 - 3*tau2*(1+3*a))
p_GT_H22 = (tau2-1)*(a-tau2)**2 * (np.log(a-tau2+EPSILON) - np.log(tau2-1+EPSILON))
p_GT_H[sigint2] = 1./6. * (p_GT_H21 + p_GT_H22)
p_GT_H /= np.pi
return p_GT_H
def gt_blast_center_fast(frequency_peak_hz: float = 6.3,
sample_rate_hz: float = 100.,
noise_std_loss_bits: float = 16) -> Tuple[np.ndarray, np.ndarray]:
"""
Fast computation of GT pulse with noise
:param frequency_peak_hz: peak frequency, nominal 6.3 Hz for 1 tonne TNT
:param sample_rate_hz: sample rate, nominal 100 Hz
:param noise_std_loss_bits: noise loss relative to signal variance
:return: centered time in seconds, GT pulse with white noise
"""
duration_s = 16/frequency_peak_hz # 16 cycles for 6th octave (M = 14)
pseudo_period_s = 1/frequency_peak_hz
duration_points = int(duration_s*sample_rate_hz)
time_center_s = np.arange(duration_points)/sample_rate_hz
time_center_s -= time_center_s[-1]/2.
sig_gt = gt_blast_period_center(time_center_s, pseudo_period_s)
sig_noise = white_noise_fbits(sig_gt, noise_std_loss_bits)
gt_white = sig_gt + sig_noise
# AA filter
gt_white_aa = antialias_halfNyquist(gt_white)
return time_center_s, gt_white_aa
def gt_blast_center_noise(duration_s: float = 16,
frequency_peak_hz: float = 6.3,
sample_rate_hz: float = 100,
noise_std_loss_bits: float = 16) -> Tuple[np.ndarray, np.ndarray]:
"""
Fast computation of GT pulse with noise for a specified duration in seconds
:param duration_s: signal duration in seconds
:param frequency_peak_hz: peak frequency, nominal 6.3 Hz for 1 tonne TNT
:param sample_rate_hz: sample rate, nominal 100 Hz
:param noise_std_loss_bits: noise loss relative to signal variance
:return: centered time in seconds, GT pulse with white noise
"""
pseudo_period_s = 1/frequency_peak_hz
duration_points = int(duration_s*sample_rate_hz)
time_center_s = np.arange(duration_points)/sample_rate_hz
time_center_s -= time_center_s[-1]/2.
sig_gt = gt_blast_period_center(time_center_s, pseudo_period_s)
sig_noise = white_noise_fbits(sig_gt, noise_std_loss_bits)
gt_white = sig_gt + sig_noise
# AA filter
gt_white_aa = antialias_halfNyquist(gt_white)
return time_center_s, gt_white_aa
def gt_blast_center_noise_uneven(sensor_epoch_s: np.array,
noise_std_loss_bits: float = 2,
frequency_center_hz: Optional[float] = None) -> np.ndarray:
"""
Construct the GT explosion pulse of Garces (2019) for even or uneven sensor time
in Gaussion noise with SNR in bits re signal STD.
This is a very flexible variation.
:param sensor_epoch_s: array with timestamps for signal in epoch seconds
:param noise_std_loss_bits: number of bits below signal standard deviation. Default is 2
:param frequency_center_hz: center frequency in Hz. Optional
:return: numpy array with anti-aliased GT explosion pulse with Gaussian noise
"""
time_duration_s = sensor_epoch_s[-1]-sensor_epoch_s[0]
if frequency_center_hz:
pseudo_period_s = 1/frequency_center_hz
else:
pseudo_period_s = time_duration_s/4.
# Convert to seconds
time_center_s = sensor_epoch_s - sensor_epoch_s[0] - time_duration_s/2.
sig_gt = gt_blast_period_center(time_center_s, pseudo_period_s)
sig_noise = white_noise_fbits(
|
np.copy(sig_gt)
|
numpy.copy
|
"""
Author: <NAME>
Last modfied: 10/31/2020
Description:
This module consists of simulations of the spread of multiple
contigions on a single network under the threshold model.
"""
#--------------------------- Imports ------------------------------#
import numpy as np
import mms.utility as mu
from scipy import sparse
#----------------------- Funciton Defintions ----------------------#
def isolate_threshold_count(A, B, T, k, r = 0):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions are not interrelated.
Parameters
----------
A: scipy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: scipy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
k: int
The number of system iterations
r: float, optional
The recovery probability. In each iteration, each vertex has a probability
r changing the state to 0 for each contigion.
Returns
-------
B: numpy array
The final configuration
"""
# Make all 1s along the diagonal of A (since we are considering the closed neighborhood)
#np.fill_diagonal(A, 1)
# The recovery probability
recovery = False
if r != 0:
recovery = True
# The main loop
for i in range(k):
# matrix operation
B_last = B
B = A @ B - T #B = np.matmul(A, B_last) - T
# update states
B[B >= 0] = 1
B[B < 0] = 0
# If a recovery probability is set
if recovery:
B[np.random.rand(*B.shape) < r] = 0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
print("Max number of iteratios reached")
return B
###########################################################################################
def correlate_threshold_weight(A, B, T, W, k, r = 0):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions are interrelated as described by the thrid model.
Parameters
----------
A: numpy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: numpy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
W: numpy array, float [0, 1]
The weight matrix where $W_{ij}$ is the weight of contagion j w.r.t
contagion i
k: int
The number of system iterations
r: float, optional
The recovery probability. In each iteration, each vertex has a probability
r changing the state to 0 for each contigion.
Returns
-------
B: numpy array
The final configuration
"""
# Make all 1s along the diagonal of A (since we are considering the closed neighborhood)
#A.setdiag(1)
# The recovery probability
recovery = False
if r != 0:
recovery = True
# Take the transpose of the weight matrix
W = np.transpose(W)
# The main loop
for i in range(k):
# matrix operation
B_last = B
#B = np.linalg.multi_dot([A, B_last, W]) - T
B = A @ B_last @ W - T
# update states
B[B >= 0] = 1
B[B < 0] = 0
# If a recovery probability is set
if recovery:
B[np.random.rand(*B.shape) < r] = 0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
#h = hpy()
#print(h.heap())
print("Max number of iteratios reached")
return B
def correlate_threshold_density(A, B, T, d, k):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions interrelated as described by the second model.
Parameters
----------
A: numpy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: numpy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
d: numpy array, int
The density vector
k: int
The number of system iterations
Returns
-------
B: numpy array
The final configuration
"""
# Compute the reciprocal
d_bar = np.transpose( np.reciprocal(d.astype(float)) ) # Make sure that d is a column vector
# The number of contagions
c = np.shape(T)[1]
# k * 1 ones
one = np.ones((c, 1), dtype = 'float')
# The main loop
for i in range(k):
B_last = B
# Compute M
M = B @ one @ d_bar #M = np.linalg.multi_dot([B, one, d_bar])
M[M >= 1.0] = 1.0
M[M < 1.0] = 0.0
#B = np.matmul(A, M) - T
B = A @ M - T
# update states
B[B >= 0.0] = 1.0
B[B < 0.0] = 0.0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
print("Max number of iteratios reached")
return B
def covid_mask(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
def covid_mask_sym_fear(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k, sym_ratio):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - sym_ratio * a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
def covid_mask_peak_diff(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac_1 = 0.0
# The second largest fraction of infection reached throughout the time
max_frac_2 = 0.0
# The time where the largest infection occurs
peak_time_1 = 0
# The time where the second largest infection occurs
peak_time_2 = 0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac_1:
max_frac_2 = max_frac_1
peak_time_2 = peak_time_1
max_frac_1 = a_3
peak_time_1 = i
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
return peak_time
return peak_time
def covid_mask_control(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n =
|
np.shape(A_1)
|
numpy.shape
|
import torch
import numpy as np
import logging
import daisy
from time import time as now
from .hemibrain_dataset import HemibrainDataset
from .hemibrain_graph_unmasked import HemibrainGraphUnmasked
from .hemibrain_graph_masked import HemibrainGraphMasked
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class HemibrainDatasetBlockwise(HemibrainDataset):
def prepare(self):
self.define_blocks()
def define_blocks(self):
"""
block size from global config file, roi_offset and roi_shape
are local attributes
"""
logger.debug(f'block size: {self.config.block_size}')
logger.debug(f'max padding: {self.config.block_padding}')
# Blockwise dataset should cover the entire dataset
# Therefore the last block in each dimension will be
# - overlapping with the penultimate
# - shrunk
blocks_per_dim = np.ceil(
(np.array(
self.roi_shape) /
np.array(
self.config.block_size))).astype(np.int_)
logger.debug(f'blocks per dim: {blocks_per_dim}')
self.len = int(np.prod(blocks_per_dim))
logger.info(f'num blocks in dataset: {self.len}')
self.block_offsets = []
self.block_shapes = []
if self.config.block_fit == 'overlap':
assert np.all(np.array(self.config.block_size)
<= np.array(self.roi_shape))
# Create offsets for all blocks in ROI
for i in range(blocks_per_dim[0]):
for j in range(blocks_per_dim[1]):
for k in range(blocks_per_dim[2]):
if self.config.block_fit == 'shrink':
block_offset_new = (
np.array(self.roi_offset, dtype=np.int_) +
np.array([i, j, k], dtype=np.int_) *
np.array(self.config.block_size, dtype=np.int_)
).astype(np.int_)
block_shape_new = (
np.minimum(
block_offset_new +
np.array(self.config.block_size, dtype=np.int_),
np.array(self.roi_offset, dtype=np.int_) + np.array(self.roi_shape, dtype=np.int_)
) - block_offset_new
).astype(np.int_)
elif self.config.block_fit == 'overlap':
block_offset_new = np.minimum(
np.array(self.roi_offset, dtype=np.int_) +
np.array([i, j, k], dtype=np.int_) *
np.array(self.config.block_size, dtype=np.int_),
np.array(self.roi_offset, dtype=np.int_) +
np.array(self.roi_shape, dtype=np.int_) -
np.array(self.config.block_size, dtype=np.int_)
).astype(np.int_)
block_shape_new = np.array(
self.config.block_size, dtype=np.int_)
else:
raise NotImplementedError(
f'block_fit {self.config.block_fit} not implemented')
# TODO remove asserts
# lower corner
assert np.all(block_offset_new >=
np.array(self.roi_offset, dtype=np.int_))
# shape
assert np.all(block_shape_new <= np.array(
self.config.block_size, dtype=np.int_))
# upper corner
assert np.all(
block_offset_new +
block_shape_new <= np.array(
self.roi_offset, dtype=np.int_) +
np.array(
self.roi_shape, dtype=np.int_))
self.block_offsets.append(block_offset_new)
self.block_shapes.append(block_shape_new)
logger.debug('generated blocks')
for o, s in zip(self.block_offsets, self.block_shapes):
logger.debug(daisy.Roi(offset=o, shape=s))
# check whether the entire ROI seems to be covered by the created blocks
lower_corner_idx = np.array(self.block_offsets, dtype=np.int_).sum(axis=1).argmin()
assert np.array_equal(
self.block_offsets[lower_corner_idx], np.array(self.roi_offset, dtype=np.int_))
upper_corner_idx = (np.array(self.block_offsets, dtype=np.int_) +
np.array(self.block_shapes, dtype=np.int_)).sum(axis=1).argmax()
assert np.array_equal(
self.block_offsets[upper_corner_idx] +
self.block_shapes[upper_corner_idx],
np.array(self.roi_offset, dtype=np.int_) +
|
np.array(self.roi_shape, dtype=np.int_)
|
numpy.array
|
from __future__ import print_function
from __future__ import division
from builtins import str
from flarestack.utils.prepare_catalogue import ps_catalogue_name
from flarestack.data.icecube.ps_tracks.ps_v002_p01 import IC86_1_dict, IC86_234_dict
from flarestack.core.results import ResultsHandler
from flarestack.cluster import run_desy_cluster as rd
from flarestack.shared import plot_output_dir, scale_shortener, make_analysis_pickle
import matplotlib.pyplot as plt
import numpy as np
seasons = [IC86_1_dict, IC86_234_dict]
all_res = dict()
basename = "analyses/angular_error_floor/compare_seasons/"
for gamma in [2.0, 3.0, 3.5]:
gamma_name = basename + str(gamma) + "/"
injection_energy = {
"Name": "Power Law",
"Gamma": gamma,
}
injection_time = {"Name": "Steady"}
inj_dict = {
"Injection Energy PDF": injection_energy,
"Injection Time PDF": injection_time,
"Poisson Smear?": False,
"fixed_n": 100,
}
# sin_decs = np.linspace(1.00, -1.00, 41)
#
# print sin_decs
sin_decs = np.linspace(0.9, -0.9, 37)
# print sin_decs
# raw_input("prompt")
# sin_decs = [-0.5, 0.0, 0.5]
res_dict = dict()
for pull_corrector in ["no_pull", "median_1d"]:
# for pull_corrector in ["median_1d_e", ]:
root_name = gamma_name + pull_corrector + "/"
if "_e" in pull_corrector:
root_key = "Dynamic Pull Corrector " + pull_corrector[-4] + "D "
elif pull_corrector == "no_pull":
root_key = "Base Case"
else:
root_key = "Static Pull Corrector " + pull_corrector[-2] + "D "
for floor in ["no_floor"]:
seed_name = root_name + floor + "/"
if floor == "no_floor":
key = root_key + " (No floor)"
else:
key = root_key + " (" + floor + ")"
config_mh = []
for season in seasons:
name = seed_name + season["Data Sample"] + "/" + season["Name"] + "/"
print(name)
llh_dict = {
"name": "spatial",
"LLH Energy PDF": injection_energy,
"LLH Time PDF": injection_time,
"pull_name": pull_corrector,
"floor_name": floor,
}
# scale = flux_to_k(reference_sensitivity(sin_dec, gamma)) * 10
mh_dict = {
"name": name,
"mh_name": "fixed_weights",
"datasets": [IC86_1_dict],
"catalogue": ps_catalogue_name(-0.2),
"llh_dict": llh_dict,
"inj kwargs": inj_dict,
"n_trials": 50,
"n_steps": 2,
"scale": 1.0,
}
pkl_file = make_analysis_pickle(mh_dict)
# rd.submit_to_cluster(pkl_file, n_jobs=50)
#
# mh = MinimisationHandler.create(mh_dict_power_law)
# mh.iterate_run(n_steps=2, n_trials=10)
config_mh.append(mh_dict)
res_dict[key] = config_mh
all_res[gamma] = res_dict
rd.wait_for_cluster()
for (gamma, res_dict) in all_res.items():
gamma_name = basename + str(gamma) + "/"
sens_dict = dict()
med_bias_dict = dict()
mean_bias_dict = dict()
disc_dict = dict()
for (config, mh_list) in res_dict.items():
sens = []
med_biases = []
mean_biases = []
disc = []
for mh_dict in mh_list:
rh = ResultsHandler(mh_dict)
max_scale = scale_shortener(
max([float(x) for x in list(rh.results.keys())])
)
sens.append(rh.sensitivity)
disc.append(rh.disc_potential)
fit = rh.results[max_scale]["Parameters"]["n_s"]
inj = rh.inj[max_scale]["n_s"]
med_bias = np.median(fit) / inj
med_biases.append(med_bias)
mean_biases.append(
|
np.mean(fit)
|
numpy.mean
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, List, Dict, Tuple
from collections import defaultdict, deque
import numpy as np
from scipy import stats
import cma
from . import base
from . import mutations
from .base import registry
# families of optimizers
# pylint: disable=unused-wildcard-import,wildcard-import
from .differentialevolution import *
from .oneshot import *
from .recastlib import *
# # # # # optimizers # # # # #
@registry.register
class OnePlusOne(base.Optimizer):
"""Simple but sometimes powerful optimization algorithm.
We use the one-fifth adaptation rule, going back to Schumer and Steiglitz (1968).
It was independently rediscovered by Devroye (1972) and Rechenberg (1973).
We use asynchronous updates, so that the 1+1 can actually be parallel and even
performs quite well in such a context - this is naturally close to 1+lambda.
"""
def __init__(self, dimension: int, budget: Optional[int] = None, num_workers: int = 1) -> None:
super().__init__(dimension, budget=budget, num_workers=num_workers)
self.sigma: float = 1
def _internal_ask(self) -> base.ArrayLike:
if not self._num_suggestions:
return
|
np.zeros(self.dimension)
|
numpy.zeros
|
import dgl
import numpy as np
import torch
from torch import FloatTensor
from torch.utils.data import Dataset
from experiments.toy_optimisation.opt_potential import update_potential_values
from utils.utils_data import update_relative_positions
DTYPE = np.float32
DTYPE_TORCH = torch.float32
class OptDataset(Dataset):
def __init__(self, FLAGS, split):
"""Create a dataset of graphs. Each graph represents a set of points in 3D space, where each pair of points
interacts according to a randomly parameterised potential. The parameter(s) of this potential are stored in the
graph as edge information.
Node data has shape (num_points, num_channels, data_dimensionality)
Edge data has shape (num_edges, num_channels, data_dimensionality)
The node indices on the graphs are ascending-ordered by destination node followed by source node. Other code
may depend on this ordering."""
self.n_points = FLAGS.n_points
self.len = FLAGS.epoch_length
self.split = split
assert self.split in ["log", "train"]
@property
def _num_directed_edges(self):
return self.n_points * (self.n_points - 1)
# noinspection PyArgumentList
@staticmethod
def _generate_potential_parameters():
return FloatTensor(1).uniform_(0.0, 1.0)
def _generate_graph_edges_with_parameters(self):
"""Generate source node indices, destination node indices, and potential parameters for a fully connected graph.
The returned indices are ascending-ordered by destination node followed by source node."""
src = []
dst = []
list_of_parameters = []
parameters_dict = {}
for i in range(self.n_points):
for j in range(self.n_points):
key = frozenset({i, j})
if i != j:
if key not in parameters_dict.keys():
parameters_dict[key] = self._generate_potential_parameters()
# Add indices and parameters for the j -> i edge.
dst.append(i)
src.append(j)
list_of_parameters.append(parameters_dict[key])
parameters_shape = (self._num_directed_edges, 1, 1)
parameters_tensor = torch.tensor(list_of_parameters).reshape(parameters_shape)
return np.array(src),
|
np.array(dst)
|
numpy.array
|
# -*- coding: utf-8 -*-
#GSASIIpwdGUI - powder data display routines
########### SVN repository information ###################
# $Date: 2019-09-13 14:54:35 -0500 (Fri, 13 Sep 2019) $
# $Author: vondreele $
# $Revision: 4146 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIpwdGUI.py $
# $Id: GSASIIpwdGUI.py 4146 2019-09-13 19:54:35Z vondreele $
########### SVN repository information ###################
'''
*GSASIIpwdGUI: Powder Pattern GUI routines*
-------------------------------------------
Used to define GUI controls for the routines that interact
with the powder histogram (PWDR) data tree items.
'''
from __future__ import division, print_function
import platform
import sys
import os.path
# Don't depend on graphics for scriptable
try:
import wx
import wx.grid as wg
except ImportError:
pass
import numpy as np
import numpy.linalg as nl
import numpy.ma as ma
import math
import copy
import random as ran
if '2' in platform.python_version_tuple()[0]:
import cPickle
else:
import pickle as cPickle
import scipy.interpolate as si
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 4146 $")
import GSASIImath as G2mth
import GSASIIpwd as G2pwd
import GSASIIfiles as G2fil
import GSASIIobj as G2obj
import GSASIIlattice as G2lat
import GSASIIspc as G2spc
import GSASIIindex as G2indx
import GSASIIplot as G2plt
import GSASIIdataGUI as G2gd
import GSASIIphsGUI as G2phsG
import GSASIIctrlGUI as G2G
import GSASIIElemGUI as G2elemGUI
import GSASIIElem as G2elem
import GSASIIsasd as G2sasd
import G2shapes
VERY_LIGHT_GREY = wx.Colour(235,235,235)
WACV = wx.ALIGN_CENTER_VERTICAL
if '2' in platform.python_version_tuple()[0]:
GkDelta = unichr(0x0394)
Pwr10 = unichr(0x0b9)+unichr(0x2070)
Pwr20 = unichr(0x0b2)+unichr(0x2070)
Pwrm1 = unichr(0x207b)+unichr(0x0b9)
Pwrm2 = unichr(0x207b)+unichr(0x0b2)
Pwrm6 = unichr(0x207b)+unichr(0x2076)
Pwrm4 = unichr(0x207b)+unichr(0x2074)
Angstr = unichr(0x00c5)
else:
GkDelta = chr(0x0394)
Pwr10 = chr(0x0b9)+chr(0x2070)
Pwr20 = chr(0x0b2)+chr(0x2070)
Pwrm1 = chr(0x207b)+chr(0x0b9)
Pwrm2 = chr(0x207b)+chr(0x0b2)
Pwrm6 = chr(0x207b)+chr(0x2076)
Pwrm4 = chr(0x207b)+chr(0x2074)
Angstr = chr(0x00c5)
# trig functions in degrees
sind = lambda x: math.sin(x*math.pi/180.)
tand = lambda x: math.tan(x*math.pi/180.)
cosd = lambda x: math.cos(x*math.pi/180.)
asind = lambda x: 180.*math.asin(x)/math.pi
################################################################################
###### class definitions
################################################################################
class SubCellsDialog(wx.Dialog):
def __init__(self,parent,title,controls,SGData,items,phaseDict):
wx.Dialog.__init__(self,parent,-1,title,
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.panel = None
self.controls = controls
self.SGData = SGData #for parent phase
self.items = items
self.phaseDict = phaseDict
self.Draw()
def Draw(self):
def RefreshGrid(event):
r,c = event.GetRow(),event.GetCol()
br = self.items[r]
phase = self.phaseDict[br]
rLab = magDisplay.GetRowLabelValue(r)
pname = '(%s) %s'%(rLab,phase['Name'])
if c == 0:
mSGData = phase['SGData']
text,table = G2spc.SGPrint(mSGData,AddInv=True)
if 'magAtms' in phase:
msg = 'Magnetic space group information'
text[0] = ' Magnetic Space Group: '+mSGData['MagSpGrp']
text[3] = ' The magnetic lattice point group is '+mSGData['MagPtGp']
OprNames,SpnFlp = G2spc.GenMagOps(mSGData)
G2G.SGMagSpinBox(self.panel,msg,text,table,mSGData['SGCen'],OprNames,
mSGData['SpnFlp'],False).Show()
else:
msg = 'Space Group Information'
G2G.SGMessageBox(self.panel,msg,text,table).Show()
elif c == 1:
maxequiv = phase['maxequiv']
mSGData = phase['SGData']
Uvec = phase['Uvec']
Trans = phase['Trans']
ifMag = False
if 'magAtms' in phase:
ifMag = True
allmom = phase.get('allmom',False)
magAtms = phase.get('magAtms','')
mAtoms = TestMagAtoms(phase,magAtms,self.SGData,Uvec,Trans,allmom,maxequiv)
else:
mAtoms = TestAtoms(phase,self.controls[15],self.SGData,Uvec,Trans,maxequiv)
Atms = []
AtCods = []
atMxyz = []
for ia,atom in enumerate(mAtoms):
atom[0] += '_%d'%ia
SytSym,Mul,Nop,dupDir = G2spc.SytSym(atom[2:5],mSGData)
Atms.append(atom[:2]+['',]+atom[2:5])
AtCods.append('1')
if 'magAtms' in phase:
MagSytSym = G2spc.MagSytSym(SytSym,dupDir,mSGData)
CSI = G2spc.GetCSpqinel(mSGData['SpnFlp'],dupDir)
atMxyz.append([MagSytSym,CSI[0]])
else:
CSI = G2spc.GetCSxinel(SytSym)
atMxyz.append([SytSym,CSI[0]])
G2phsG.UseMagAtomDialog(self.panel,pname,Atms,AtCods,atMxyz,ifMag=ifMag,ifOK=True).Show()
elif c in [2,3]:
if c == 2:
title = 'Conjugacy list for '+pname
items = phase['altList']
elif c == 3:
title = 'Super groups list list for '+pname
items = phase['supList']
if not items[0]:
wx.MessageBox(pname+' is a maximal subgroup',caption='Super group is parent',style=wx.ICON_INFORMATION)
return
SubCellsDialog(self.panel,title,self.controls,self.SGData,items,self.phaseDict).Show()
if self.panel: self.panel.Destroy()
self.panel = wx.Panel(self)
rowLabels = [str(i+1) for i in range(len(self.items))]
colLabels = ['Space Gp','Uniq','nConj','nSup','Trans','Vec','a','b','c','alpha','beta','gamma','Volume']
Types = [wg.GRID_VALUE_STRING,]+3*[wg.GRID_VALUE_LONG,]+2*[wg.GRID_VALUE_STRING,]+ \
3*[wg.GRID_VALUE_FLOAT+':10,5',]+3*[wg.GRID_VALUE_FLOAT+':10,3',]+[wg.GRID_VALUE_FLOAT+':10,2']
table = []
for ip in self.items:
phase = self.phaseDict[ip]
natms = phase.get('nAtoms',1)
try:
nConj = len(phase['altList'])
nSup = len(phase['supList'])
except KeyError:
nConj = 0
nSup = 0
cell = list(phase['Cell'])
trans = G2spc.Trans2Text(phase['Trans'])
vec = G2spc.Latt2text([phase['Uvec'],])
row = [phase['Name'],natms,nConj,nSup,trans,vec]+cell
table.append(row)
CellsTable = G2G.Table(table,rowLabels=rowLabels,colLabels=colLabels,types=Types)
mainSizer = wx.BoxSizer(wx.VERTICAL)
magDisplay = G2G.GSGrid(self.panel)
magDisplay.SetTable(CellsTable, True)
magDisplay.Bind(wg.EVT_GRID_CELL_LEFT_CLICK,RefreshGrid)
magDisplay.AutoSizeColumns(False)
mainSizer.Add(magDisplay,0,WACV)
OkBtn = wx.Button(self.panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.Destroy()
# self.EndModal(wx.ID_OK)
class RDFDialog(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,-1,'Background radial distribution function',
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.panel = None
self.result = {'UseObsCalc':'obs-calc','maxR':20.0,'Smooth':'linear'}
self.Draw()
def Draw(self):
def OnUseOC(event):
self.result['UseObsCalc'] = useOC.GetValue()
def OnSmCombo(event):
self.result['Smooth'] = smCombo.GetValue()
if self.panel: self.panel.Destroy()
self.panel = wx.Panel(self)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(wx.StaticText(self.panel,label='Background RDF controls:'),0,WACV)
plotType = wx.BoxSizer(wx.HORIZONTAL)
plotType.Add(wx.StaticText(self.panel,label=' Select plot type:'),0,WACV)
Choices = ['obs-back','calc-back','obs-calc']
useOC = wx.ComboBox(self.panel,value=Choices[2],choices=Choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
useOC.SetValue(self.result['UseObsCalc'])
useOC.Bind(wx.EVT_COMBOBOX,OnUseOC)
plotType.Add(useOC,0,WACV)
mainSizer.Add(plotType,0,WACV)
dataSizer = wx.BoxSizer(wx.HORIZONTAL)
dataSizer.Add(wx.StaticText(self.panel,label=' Smoothing type: '),0,WACV)
smChoice = ['linear','nearest',]
smCombo = wx.ComboBox(self.panel,value=self.result['Smooth'],choices=smChoice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
smCombo.Bind(wx.EVT_COMBOBOX, OnSmCombo)
dataSizer.Add(smCombo,0,WACV)
dataSizer.Add(wx.StaticText(self.panel,label=' Maximum radial dist.: '),0,WACV)
maxR = G2G.ValidatedTxtCtrl(self.panel,self.result,'maxR',nDig=(10,1),min=10.,max=50.,
typeHint=float)
dataSizer.Add(maxR,0,WACV)
mainSizer.Add(dataSizer,0,WACV)
OkBtn = wx.Button(self.panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(self.panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
btnSizer.Add(cancelBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def GetSelection(self):
return self.result
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
################################################################################
##### Setup routines
################################################################################
def GetFileBackground(G2frame,xye,Pattern):
bxye = np.zeros(len(xye[1]))
if 'BackFile' in Pattern[0]:
backfile,mult = Pattern[0]['BackFile'][:2]
if backfile:
bId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,backfile)
if bId:
bxye = mult*G2frame.GPXtree.GetItemPyData(bId)[1][1]
else:
print('Error: background PWDR {} not found'.format(backfile))
Pattern[0]['BackFile'][0] = ''
return bxye
def IsHistogramInAnyPhase(G2frame,histoName):
'''Tests a Histogram to see if it is linked to any phases.
Returns the name of the first phase where the histogram is used.
'''
phases = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases')
if phases:
item, cookie = G2frame.GPXtree.GetFirstChild(phases)
while item:
data = G2frame.GPXtree.GetItemPyData(item)
histoList = data['Histograms'].keys()
if histoName in histoList:
return G2frame.GPXtree.GetItemText(item)
item, cookie = G2frame.GPXtree.GetNextChild(phases, cookie)
return False
else:
return False
def SetupSampleLabels(histName,dataType,histType):
'''Setup a list of labels and number formatting for use in
labeling sample parameters.
:param str histName: Name of histogram, ("PWDR ...")
:param str dataType:
'''
parms = []
parms.append(['Scale','Histogram scale factor: ',[10,7]])
if 'C' in histType:
parms.append(['Gonio. radius','Goniometer radius (mm): ',[10,3]])
if 'PWDR' in histName:
if dataType == 'Debye-Scherrer':
if 'T' in histType:
parms += [['Absorption',u'Sample absorption (\xb5\xb7r/l): ',[10,4]],]
else:
parms += [['DisplaceX',u'Sample X displ. perp. to beam (\xb5m): ',[10,3]],
['DisplaceY',u'Sample Y displ. || to beam (\xb5m): ',[10,3]],
['Absorption',u'Sample absorption (\xb5\xb7r): ',[10,4]],]
elif dataType == 'Bragg-Brentano':
parms += [['Shift',u'Sample displacement(\xb5m): ',[10,4]],
['Transparency',u'Sample transparency(1/\xb5eff, cm): ',[10,3]],
['SurfRoughA','Surface roughness A: ',[10,4]],
['SurfRoughB','Surface roughness B: ',[10,4]]]
elif 'SASD' in histName:
parms.append(['Thick','Sample thickness (mm)',[10,3]])
parms.append(['Trans','Transmission (meas)',[10,3]])
parms.append(['SlitLen',u'Slit length (Q,\xc5'+Pwrm1+')',[10,3]])
parms.append(['Omega','Goniometer omega:',[10,3]])
parms.append(['Chi','Goniometer chi:',[10,3]])
parms.append(['Phi','Goniometer phi:',[10,3]])
parms.append(['Azimuth','Detector azimuth:',[10,3]])
parms.append(['Time','Clock time (s):',[12,3]])
parms.append(['Temperature','Sample temperature (K): ',[10,3]])
parms.append(['Pressure','Sample pressure (MPa): ',[10,3]])
return parms
def SetDefaultSASDModel():
'Fills in default items for the SASD Models dictionary'
return {'Back':[0.0,False],
'Size':{'MinDiam':50,'MaxDiam':10000,'Nbins':100,'logBins':True,'Method':'MaxEnt',
'Distribution':[],'Shape':['Spheroid',1.0],
'MaxEnt':{'Niter':100,'Precision':0.01,'Sky':-3},
'IPG':{'Niter':100,'Approach':0.8,'Power':-1},'Reg':{},},
'Pair':{'Method':'Moore','MaxRadius':100.,'NBins':100,'Errors':'User',
'Percent error':2.5,'Background':[0,False],'Distribution':[],
'Moore':10,'Dist G':100.,'Result':[],},
'Particle':{'Matrix':{'Name':'vacuum','VolFrac':[0.0,False]},'Levels':[],},
'Shapes':{'outName':'run','NumAA':100,'Niter':1,'AAscale':1.0,'Symm':1,'bias-z':0.0,
'inflateV':1.0,'AAglue':0.0,'pdbOut':False,'boxStep':4.0},
'Current':'Size dist.','BackFile':'',
}
def SetDefaultREFDModel():
'''Fills in default items for the REFD Models dictionary which are
defined as follows for each layer:
* Name: name of substance
* Thick: thickness of layer in Angstroms (not present for top & bottom layers)
* Rough: upper surface roughness for layer (not present for toplayer)
* Penetration: mixing of layer substance into layer above-is this needed?
* DenMul: multiplier for layer scattering density (default = 1.0)
Top layer defaults to vacuum (or air/any gas); can be substituted for some other substance.
Bottom layer default: infinitely thisck Silicon; can be substituted for some other substance.
'''
return {'Layers':[{'Name':'vacuum','DenMul':[1.0,False],}, #top layer
{'Name':'vacuum','Rough':[0.,False],'Penetration':[0.,False],'DenMul':[1.0,False],}], #bottom layer
'Scale':[1.0,False],'FltBack':[0.0,False],'Zero':'Top','dQ type':'None','Layer Seq':[], #globals
'Minimizer':'LMLS','Resolution':[0.,'Const dq/q'],'Recomb':0.5,'Toler':0.5, #minimizer controls
'DualFitFiles':['',],'DualFltBacks':[[0.0,False],],'DualScales':[[1.0,False],]} #optional stuff for multidat fits?
def SetDefaultSubstances():
'Fills in default items for the SASD Substances dictionary'
return {'Substances':{'vacuum':{'Elements':{},'Volume':1.0,'Density':0.0,'Scatt density':0.0,'XImag density':0.0},
'unit scatter':{'Elements':None,'Volume':None,'Density':None,'Scatt density':1.0,'XImag density':1.0}}}
def GetFileList(G2frame,fileType):
fileList = []
Id, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while Id:
name = G2frame.GPXtree.GetItemText(Id)
if fileType in name.split()[0]:
fileList.append(name)
Id, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
return fileList
def GetHistsLikeSelected(G2frame):
'''Get the histograms that match the current selected one:
The histogram prefix and data type (PXC etc.), the number of
wavelengths and the instrument geometry (Debye-Scherrer etc.)
must all match. The current histogram is not included in the list.
:param wx.Frame G2frame: pointer to main GSAS-II data tree
'''
histList = []
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))
hType = inst['Type'][0]
if 'Lam1' in inst:
hLam = 2
elif 'Lam' in inst:
hLam = 1
else:
hLam = 0
sample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Sample Parameters'))
# hGeom = sample.get('Type')
hstName = G2frame.GPXtree.GetItemText(G2frame.PatternId)
hPrefix = hstName.split()[0]+' '
# cycle through tree looking for items that match the above
item, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while item:
name = G2frame.GPXtree.GetItemText(item)
if name.startswith(hPrefix) and name != hstName:
cGeom,cType,cLam, = '?','?',-1
subitem, subcookie = G2frame.GPXtree.GetFirstChild(item)
while subitem:
subname = G2frame.GPXtree.GetItemText(subitem)
if subname == 'Sample Parameters':
sample = G2frame.GPXtree.GetItemPyData(subitem)
# cGeom = sample.get('Type')
elif subname == 'Instrument Parameters':
inst,inst2 = G2frame.GPXtree.GetItemPyData(subitem)
cType = inst['Type'][0]
if 'Lam1' in inst:
cLam = 2
elif 'Lam' in inst:
cLam = 1
else:
cLam = 0
subitem, subcookie = G2frame.GPXtree.GetNextChild(item, subcookie)
if cLam == hLam and cType == hType: # and cGeom == hGeom:
if name not in histList: histList.append(name)
item, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
return histList
def SetCopyNames(histName,dataType,addNames=[]):
'''Determine the items in the sample parameters that should be copied,
depending on the histogram type and the instrument type.
'''
copyNames = ['Scale',]
histType = 'HKLF'
if 'PWDR' in histName:
histType = 'PWDR'
if 'Debye' in dataType:
copyNames += ['DisplaceX','DisplaceY','Absorption']
else: #Bragg-Brentano
copyNames += ['Shift','Transparency','SurfRoughA','SurfRoughB']
elif 'SASD' in histName:
histType = 'SASD'
copyNames += ['Materials','Thick',]
if len(addNames):
copyNames += addNames
return histType,copyNames
def CopyPlotCtrls(G2frame):
'''Global copy: Copy plot controls from current histogram to others.
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No other histograms match '+hst,G2frame)
return
sourceData = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
if 'Offset' not in sourceData[0]: #patch for old data
sourceData[0].update({'Offset':[0.0,0.0],'delOffset':0.02,'refOffset':-1.0,
'refDelt':0.01,})
G2frame.GPXtree.SetItemPyData(G2frame.PatternId,sourceData)
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy plot controls from\n'+str(hst[5:])+' to...',
'Copy plot controls', histList)
results = []
try:
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetSelections()
finally:
dlg.Destroy()
copyList = []
for i in results:
copyList.append(histList[i])
keys = ['Offset','delOffset','refOffset','refDelt']
source = dict(zip(keys,[sourceData[0][item] for item in keys]))
for hist in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
data = G2frame.GPXtree.GetItemPyData(Id)
data[0].update(source)
G2frame.GPXtree.SetItemPyData(Id,data)
print ('Copy of plot controls successful')
def CopySelectedHistItems(G2frame):
'''Global copy: Copy items from current histogram to others.
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No other histograms match '+hst,G2frame)
return
choices = ['Limits','Background','Instrument Parameters','Sample Parameters']
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy which histogram sections from\n'+str(hst[5:]),
'Select copy sections', choices, filterBox=False)
dlg.SetSelections(range(len(choices)))
choiceList = []
if dlg.ShowModal() == wx.ID_OK:
choiceList = [choices[i] for i in dlg.GetSelections()]
if not choiceList: return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy parameters from\n'+str(hst[5:])+' to...',
'Copy parameters', histList)
results = []
try:
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetSelections()
finally:
dlg.Destroy()
copyList = []
for i in results:
copyList.append(histList[i])
if 'Limits' in choiceList: # Limits
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Limits'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Limits'),
copy.deepcopy(data))
if 'Background' in choiceList: # Background
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Background'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Background'),
copy.deepcopy(data))
if 'Instrument Parameters' in choiceList: # Instrument Parameters
# for now all items in Inst. parms are copied
data,data1 = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId,'Instrument Parameters'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters')
)[0].update(copy.deepcopy(data))
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters')
)[1].update(copy.deepcopy(data1))
if 'Sample Parameters' in choiceList: # Sample Parameters
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId,'Sample Parameters'))
# selects items to be copied
histType,copyNames = SetCopyNames(hst,data['Type'],
addNames = ['Omega','Chi','Phi','Gonio. radius','InstrName'])
copyDict = {parm:data[parm] for parm in copyNames}
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters')
).update(copy.deepcopy(copyDict))
def TestMagAtoms(phase,magAtms,SGData,Uvec,Trans,allmom,maxequiv=100,maximal=False):
found = False
anymom = False
phase['Keep'] = False
if not magAtms:
phase['Keep'] = True
return []
invTrans = nl.inv(Trans)
atCodes = []
Phase = {'General':{'AtomPtrs':[2,1],'SGData':copy.deepcopy(phase['SGData'])},'Atoms':[]}
for matm in magAtms:
XYZ = G2spc.GenAtom(matm[3:6],SGData,False,Move=True)
xyzs = [xyz[0] for xyz in XYZ]
atCodes += len(xyzs)*['1',]
xyzs,atCodes = G2lat.ExpandCell(xyzs,atCodes,0,Trans)
for ix,x in enumerate(xyzs):
xyz = G2lat.TransformXYZ(x-Uvec,invTrans.T,np.zeros(3))%1.
Phase['Atoms'].append(matm[:2]+list(xyz))
SytSym,Mul,Nop,dupDir = G2spc.SytSym(xyz,phase['SGData'])
CSI = G2spc.GetCSpqinel(phase['SGData']['SpnFlp'],dupDir)
if any(CSI[0]):
anymom = True
if allmom:
if not any(CSI[0]):
phase['Keep'] = False
found = True
uAtms = G2lat.GetUnique(Phase,atCodes)[0]
natm = len(uAtms)
if anymom and natm <= maxequiv and not found:
phase['Keep'] = True
if maximal and phase['supList'][0]:
phase['Keep'] = False
return uAtms
def TestAtoms(phase,magAtms,SGData,Uvec,Trans,maxequiv=100,maximal=False):
phase['Keep'] = True
invTrans = nl.inv(Trans)
atCodes = []
Phase = {'General':{'AtomPtrs':[2,1],'SGData':copy.deepcopy(phase['SGData'])},'Atoms':[]}
for matm in magAtms:
XYZ = G2spc.GenAtom(matm[3:6],SGData,False,Move=True)
xyzs = [xyz[0] for xyz in XYZ]
atCodes += len(xyzs)*['1',]
xyzs,atCodes = G2lat.ExpandCell(xyzs,atCodes,0,Trans)
for ix,x in enumerate(xyzs):
xyz = G2lat.TransformXYZ(x-Uvec,invTrans.T,np.zeros(3))%1.
Phase['Atoms'].append(matm[:2]+list(xyz))
uAtms = G2lat.GetUnique(Phase,atCodes)[0]
natm = len(uAtms)
if natm > maxequiv: #too many allowed atoms found
phase['Keep'] = False
if maximal and phase['supList'][0]:
phase['Keep'] = False
return uAtms
################################################################################
##### Powder Peaks
################################################################################
def UpdatePeakGrid(G2frame, data):
'''respond to selection of PWDR powder peaks data tree item.
'''
def OnAutoSearch(event):
PatternId = G2frame.PatternId
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
profile = Pattern[1]
bxye = GetFileBackground(G2frame,profile,Pattern)
x0 = profile[0]
iBeg = np.searchsorted(x0,limits[0])
iFin = np.searchsorted(x0,limits[1])
x = x0[iBeg:iFin]
y0 = (profile[1]+bxye)[iBeg:iFin]
ysig = 1.0*np.std(y0)
offset = [-1,1]
ymask = ma.array(y0,mask=(y0<ysig))
for off in offset:
ymask = ma.array(ymask,mask=(ymask-np.roll(y0,off)<=0.))
indx = ymask.nonzero()
mags = ymask[indx]
poss = x[indx]
refs = list(zip(poss,mags))
if 'C' in Inst['Type'][0]:
refs = G2mth.sortArray(refs,0,reverse=True) #small 2-Thetas first
else: #'T'OF
refs = G2mth.sortArray(refs,0,reverse=False) #big TOFs first
for i,ref1 in enumerate(refs): #reject picks closer than 1 FWHM
for ref2 in refs[i+1:]:
if abs(ref2[0]-ref1[0]) < 2.*G2pwd.getFWHM(ref1[0],inst):
del(refs[i])
if 'C' in Inst['Type'][0]:
refs = G2mth.sortArray(refs,1,reverse=True)
else: #'T'OF
refs = G2mth.sortArray(refs,1,reverse=False)
for pos,mag in refs:
data['peaks'].append(G2mth.setPeakparms(inst,inst2,pos,mag))
UpdatePeakGrid(G2frame,data)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnCopyPeaks(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy peak list from\n'+str(hst[5:])+' to...',
'Copy peaks', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Peak List'),copy.deepcopy(data))
def OnLoadPeaks(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II PWDR peaks list file', pth, '',
'PWDR peak list files (*.pkslst)|*.pkslst',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
peaks = []
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
while S:
if '#' in S:
S = File.readline()
continue
try:
peaks.append(eval(S))
except:
break
S = File.readline()
File.close()
finally:
dlg.Destroy()
data = {'peaks':peaks,'sigDict':{}}
UpdatePeakGrid(G2frame,data)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnSavePeaks(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II PWDR peaks list file', pth, '',
'PWDR peak list files (*.pkslst)|*.pkslst',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .pkslst
filename = os.path.splitext(filename)[0]+'.pkslst'
File = open(filename,'w')
File.write("#GSAS-II PWDR peaks list file; do not add/delete items!\n")
for item in data:
if item == 'peaks':
for pk in data[item]:
File.write(str(pk)+'\n')
File.close()
print ('PWDR peaks list saved to: '+filename)
finally:
dlg.Destroy()
def OnUnDo(event):
DoUnDo()
G2frame.dataWindow.UnDo.Enable(False)
def DoUnDo():
print ('Undo last refinement')
file = open(G2frame.undofile,'rb')
PatternId = G2frame.PatternId
for item in ['Background','Instrument Parameters','Peak List']:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item),cPickle.load(file))
if G2frame.dataWindow.GetName() == item:
if item == 'Background':
UpdateBackground(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
elif item == 'Instrument Parameters':
UpdateInstrumentGrid(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
elif item == 'Peak List':
UpdatePeakGrid(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
print (item+' recovered')
file.close()
def SaveState():
G2frame.undofile = os.path.join(G2frame.dirname,'GSASII.save')
file = open(G2frame.undofile,'wb')
PatternId = G2frame.PatternId
for item in ['Background','Instrument Parameters','Peak List']:
cPickle.dump(G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId,item)),file,1)
file.close()
G2frame.dataWindow.UnDo.Enable(True)
def OnLSQPeakFit(event):
if reflGrid.IsCellEditControlEnabled(): # complete any grid edits in progress
reflGrid.HideCellEditControl()
reflGrid.DisableCellEditControl()
if not G2frame.GSASprojectfile: #force a save of the gpx file so SaveState can write in the same directory
G2frame.OnFileSaveas(event)
wx.CallAfter(OnPeakFit,'LSQ')
def OnOneCycle(event):
if reflGrid.IsCellEditControlEnabled(): # complete any grid edits in progress
reflGrid.HideCellEditControl()
reflGrid.DisableCellEditControl()
wx.CallAfter(OnPeakFit,'LSQ',oneCycle=True)
def OnSeqPeakFit(event):
histList = G2gd.GetGPXtreeDataNames(G2frame,['PWDR',])
od = {'label_1':'Copy to next','value_1':False,'label_2':'Reverse order','value_2':False}
dlg = G2G.G2MultiChoiceDialog(G2frame, 'Sequential peak fits',
'Select dataset to include',histList,extraOpts=od)
names = []
if dlg.ShowModal() == wx.ID_OK:
for sel in dlg.GetSelections():
names.append(histList[sel])
dlg.Destroy()
if not names:
return
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Sequential peak fit results')
if Id:
SeqResult = G2frame.GPXtree.GetItemPyData(Id)
else:
SeqResult = {}
Id = G2frame.GPXtree.AppendItem(parent=G2frame.root,text='Sequential peak fit results')
SeqResult = {'SeqPseudoVars':{},'SeqParFitEqList':[]}
SeqResult['histNames'] = names
dlg = wx.ProgressDialog('Sequential peak fit','Data set name = '+names[0],len(names),
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
controls = {'deriv type':'analytic','min dM/M':0.001,}
print ('Peak Fitting with '+controls['deriv type']+' derivatives:')
oneCycle = False
FitPgm = 'LSQ'
prevVaryList = []
peaks = None
varyList = None
if od['value_2']:
names.reverse()
try:
for i,name in enumerate(names):
print (' Sequential fit for '+name)
GoOn = dlg.Update(i,newmsg='Data set name = '+name)[0]
if not GoOn:
dlg.Destroy()
break
PatternId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,name)
if i and od['value_1']:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),copy.deepcopy(peaks))
prevVaryList = varyList[:]
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
data = Pattern[1]
fixback = GetFileBackground(G2frame,data,Pattern)
peaks['sigDict'],result,sig,Rvals,varyList,parmDict,fullvaryList,badVary = G2pwd.DoPeakFit(FitPgm,peaks['peaks'],
background,limits,inst,inst2,data,fixback,prevVaryList,oneCycle,controls)
if len(result[0]) != len(fullvaryList):
dlg.Destroy()
print (' ***** Sequential peak fit stopped at '+name+' *****')
break
else:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),copy.deepcopy(peaks))
SeqResult[name] = {'variables':result[0],'varyList':varyList,'sig':sig,'Rvals':Rvals,
'covMatrix':np.eye(len(result[0])),'title':name,'parmDict':parmDict,
'fullVary':fullvaryList,'badVary':badVary}
print (' ***** Sequential peak fit successful *****')
finally:
dlg.Destroy()
SeqResult['histNames'] = histList
G2frame.GPXtree.SetItemPyData(Id,SeqResult)
G2frame.G2plotNB.Delete('Sequential refinement') #clear away probably invalid plot
G2frame.GPXtree.SelectItem(Id)
def OnClearPeaks(event):
dlg = wx.MessageDialog(G2frame,'Delete all peaks?','Clear peak list',wx.OK|wx.CANCEL)
try:
if dlg.ShowModal() == wx.ID_OK:
peaks = {'peaks':[],'sigDict':{}}
finally:
dlg.Destroy()
UpdatePeakGrid(G2frame,peaks)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnPeakFit(FitPgm,oneCycle=False):
SaveState()
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
if not controls:
controls = {'deriv type':'analytic','min dM/M':0.001,} #fill in defaults if needed
print ('Peak Fitting with '+controls['deriv type']+' derivatives:')
PatternId = G2frame.PatternId
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
if not peaks:
G2frame.ErrorDialog('No peaks!','Nothing to fit!')
return
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
data = Pattern[1]
bxye = GetFileBackground(G2frame,data,Pattern)
dlg = wx.ProgressDialog('Residual','Peak fit Rwp = ',101.0,
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
screenSize = wx.ClientDisplayRect()
Size = dlg.GetSize()
if 50 < Size[0] < 500: # sanity check on size, since this fails w/Win & wx3.0
dlg.SetSize((int(Size[0]*1.2),Size[1])) # increase size a bit along x
dlg.SetPosition(wx.Point(screenSize[2]-Size[0]-305,screenSize[1]+5))
try:
peaks['sigDict'] = G2pwd.DoPeakFit(FitPgm,peaks['peaks'],background,limits,inst,inst2,data,bxye,[],oneCycle,controls,dlg)[0]
finally:
# dlg.Destroy()
print ('finished')
newpeaks = copy.copy(peaks)
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),newpeaks)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdatePeakGrid,G2frame,newpeaks)
def OnResetSigGam(event):
PatternId = G2frame.PatternId
Inst,Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
if not peaks['peaks']:
G2frame.ErrorDialog('No peaks!','Nothing to do!')
return
newpeaks = {'peaks':[],'sigDict':{}}
for peak in peaks['peaks']:
newpeaks['peaks'].append(G2mth.setPeakparms(Inst,Inst2,peak[0],peak[2]))
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),newpeaks)
UpdatePeakGrid(G2frame,newpeaks)
# def RefreshPeakGrid(event):
#
# event.StopPropagation()
# data['peaks'] = G2frame.PeakTable.GetData()
# T = []
# for peak in data['peaks']:T.append(peak[0])
# D = dict(zip(T,data['peaks']))
# T.sort()
# X = []
# for key in T: X.append(D[key])
# data['peaks'] = X
def setBackgroundColors():
for r in range(reflGrid.GetNumberRows()):
for c in range(reflGrid.GetNumberCols()):
if reflGrid.GetColLabelValue(c) in ['position','intensity','alpha','beta','sigma','gamma']:
if float(reflGrid.GetCellValue(r,c)) < 0.:
reflGrid.SetCellBackgroundColour(r,c,wx.RED)
else:
reflGrid.SetCellBackgroundColour(r,c,wx.WHITE)
def KeyEditPeakGrid(event):
'''Respond to pressing a key to act on selection of a row, column or cell
in the Peak List table
'''
rowList = reflGrid.GetSelectedRows()
colList = reflGrid.GetSelectedCols()
selectList = reflGrid.GetSelectedCells()
data = G2frame.GPXtree.GetItemPyData(G2frame.PickId)
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif rowList and (event.GetKeyCode() == wx.WXK_DELETE or event.GetKeyCode() == 8):
# pressing the delete key or backspace deletes selected peak(s)
reflGrid.ClearSelection()
reflGrid.ClearGrid()
rowList.sort()
rowList.reverse()
nDel = 0
for row in rowList:
G2frame.PeakTable.DeleteRow(row)
nDel += 1
if nDel:
msg = wg.GridTableMessage(G2frame.PeakTable,
wg.GRIDTABLE_NOTIFY_ROWS_DELETED,0,nDel)
reflGrid.ProcessTableMessage(msg)
data['peaks'] = G2frame.PeakTable.GetData()[:-nDel]
G2frame.GPXtree.SetItemPyData(G2frame.PickId,data)
setBackgroundColors()
elif colList and (event.GetKeyCode() == 89 or event.GetKeyCode() == 78):
reflGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if G2frame.PeakTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(G2frame.PeakTable.GetNumberRows()): data['peaks'][row][col]=True
elif key == 78: #'N'
for row in range(G2frame.PeakTable.GetNumberRows()): data['peaks'][row][col]=False
elif selectList and (event.GetKeyCode() == 89 or event.GetKeyCode() == 78):
reflGrid.ClearSelection()
key = event.GetKeyCode()
for row,col in selectList:
if G2frame.PeakTable.GetTypeName(row,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
data['peaks'][row][col]=True
elif key == 78: #'N'
data['peaks'][row][col]=False
else:
event.Skip()
return
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdatePeakGrid,G2frame,data)
def SelectVars(rows):
'''Set or clear peak refinement variables for peaks listed in rows
'''
refOpts = {reflGrid.GetColLabelValue(i):i+1 for i in range(reflGrid.GetNumberCols()) if reflGrid.GetColLabelValue(i) != "refine"}
dlg = G2G.G2MultiChoiceDialog(G2frame,'Select columns to refine',
'Refinement Selection', sorted(refOpts.keys()),
filterBox=False,toggle=False)
sels = []
try:
if dlg.ShowModal() == wx.ID_OK:
sels = [sorted(refOpts.keys())[i] for i in dlg.GetSelections()]
else:
return
finally:
dlg.Destroy()
for r in rows:
for lbl,c in refOpts.items():
data['peaks'][r][c] = lbl in sels
UpdatePeakGrid(G2frame,data)
def OnRefineSelected(event):
'''set refinement flags for the selected peaks
'''
rows = list(set([row for row,col in reflGrid.GetSelectedCells()] +
reflGrid.GetSelectedRows()))
if not rows:
wx.MessageBox('No selected rows. You must select rows or cells before using this command',
caption='No selected peaks')
return
SelectVars(rows)
def OnRefineAll(event):
'''set refinement flags for all peaks
'''
SelectVars(range(reflGrid.GetNumberRows()))
# def onCellListSClick(event):
# '''Called when a peak is selected so that it can be highlighted in the plot
# '''
# event.Skip()
# c = event.GetRow(),event.GetCol()
# if c < 0: # replot except whan a column is selected
# wx.CallAfter(G2plt.PlotPatterns,G2frame,plotType='PWDR')
#
def onCellListDClick(event):
'''Called after a double-click on a cell label'''
r,c = event.GetRow(),event.GetCol()
if r < 0 and c < 0:
for row in range(reflGrid.GetNumberRows()):
reflGrid.SelectRow(row,True)
for col in range(reflGrid.GetNumberCols()):
reflGrid.SelectCol(col,True)
elif r > 0: #row label: select it and replot!
reflGrid.ClearSelection()
reflGrid.SelectRow(r,True)
wx.CallAfter(G2frame.reflGrid.ForceRefresh)
wx.CallAfter(G2plt.PlotPatterns,G2frame,plotType='PWDR')
elif c > 0: #column label: just select it (& redisplay)
reflGrid.ClearSelection()
reflGrid.SelectCol(c,True)
if reflGrid.GetColLabelValue(c) != 'refine': return
choice = ['Y - vary all','N - vary none',]
dlg = wx.SingleChoiceDialog(G2frame,'Select refinement option for '+reflGrid.GetColLabelValue(c-1),
'Refinement controls',choice)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
if sel == 0:
for row in range(reflGrid.GetNumberRows()): data['peaks'][row][c]=True
else:
for row in range(reflGrid.GetNumberRows()): data['peaks'][row][c]=False
wx.CallAfter(UpdatePeakGrid,G2frame,data)
#======================================================================
# beginning of UpdatePeakGrid init
#======================================================================
G2frame.GetStatusBar().SetStatusText('Global refine: select refine column & press Y or N',1)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.PeakMenu)
G2frame.Bind(wx.EVT_MENU, OnAutoSearch, id=G2G.wxID_AUTOSEARCH)
G2frame.Bind(wx.EVT_MENU, OnCopyPeaks, id=G2G.wxID_PEAKSCOPY)
G2frame.Bind(wx.EVT_MENU, OnSavePeaks, id=G2G.wxID_PEAKSAVE)
G2frame.Bind(wx.EVT_MENU, OnLoadPeaks, id=G2G.wxID_PEAKLOAD)
G2frame.Bind(wx.EVT_MENU, OnUnDo, id=G2G.wxID_UNDO)
G2frame.Bind(wx.EVT_MENU, OnRefineSelected, id=G2frame.dataWindow.peaksSel.GetId())
G2frame.Bind(wx.EVT_MENU, OnRefineAll, id=G2frame.dataWindow.peaksAll.GetId())
G2frame.Bind(wx.EVT_MENU, OnLSQPeakFit, id=G2G.wxID_LSQPEAKFIT)
G2frame.Bind(wx.EVT_MENU, OnOneCycle, id=G2G.wxID_LSQONECYCLE)
G2frame.Bind(wx.EVT_MENU, OnSeqPeakFit, id=G2G.wxID_SEQPEAKFIT)
G2frame.Bind(wx.EVT_MENU, OnClearPeaks, id=G2G.wxID_CLEARPEAKS)
G2frame.Bind(wx.EVT_MENU, OnResetSigGam, id=G2G.wxID_RESETSIGGAM)
if data['peaks']:
G2frame.dataWindow.AutoSearch.Enable(False)
G2frame.dataWindow.PeakCopy.Enable(True)
G2frame.dataWindow.PeakFit.Enable(True)
G2frame.dataWindow.PFOneCycle.Enable(True)
G2frame.dataWindow.SeqPeakFit.Enable(True)
else:
G2frame.dataWindow.PeakFit.Enable(False)
G2frame.dataWindow.PeakCopy.Enable(False)
G2frame.dataWindow.PFOneCycle.Enable(False)
G2frame.dataWindow.AutoSearch.Enable(True)
G2frame.dataWindow.SeqPeakFit.Enable(False)
G2frame.PickTable = []
rowLabels = []
PatternId = G2frame.PatternId
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))[0]
for i in range(len(data['peaks'])): rowLabels.append(str(i+1))
if 'C' in Inst['Type'][0]:
colLabels = ['position','refine','intensity','refine','sigma','refine','gamma','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,1',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
else:
colLabels = ['position','refine','intensity','refine','alpha','refine',
'beta','refine','sigma','refine','gamma','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,1',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
T = []
for peak in data['peaks']:
T.append(peak[0])
D = dict(zip(T,data['peaks']))
T.sort()
if 'T' in Inst['Type'][0]: #want big TOF's first
T.reverse()
X = []
for key in T: X.append(D[key])
data['peaks'] = X
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
G2frame.GPXtree.SetItemPyData(G2frame.PickId,data)
G2frame.PeakTable = G2G.Table(data['peaks'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Peak List')
G2frame.dataWindow.currentGrids = []
reflGrid = G2G.GSGrid(parent=G2frame.dataWindow)
reflGrid.SetTable(G2frame.PeakTable, True)
setBackgroundColors()
# reflGrid.Bind(wg.EVT_GRID_CELL_CHANGE, RefreshPeakGrid)
reflGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
# reflGrid.Bind(wg.EVT_GRID_LABEL_LEFT_CLICK, onCellListSClick)
# G2frame.dataWindow.Bind(wg.EVT_GRID_CELL_LEFT_CLICK, onCellListSClick)
reflGrid.Bind(wg.EVT_GRID_LABEL_LEFT_DCLICK, onCellListDClick)
# G2frame.dataWindow.Bind(wg.EVT_GRID_CELL_LEFT_DCLICK, onCellListDClick)
reflGrid.AutoSizeColumns(False)
reflGrid.SetScrollRate(10,10)
G2frame.reflGrid = reflGrid
mainSizer.Add(reflGrid,1,wx.ALL|wx.EXPAND,1)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Background
################################################################################
def UpdateBackground(G2frame,data):
'''respond to selection of PWDR background data tree item.
'''
def OnBackFlagCopy(event):
flag = data[0][1]
backDict = data[-1]
if backDict['nDebye']:
DBflags = []
for term in backDict['debyeTerms']:
DBflags.append(term[1::2])
if backDict['nPeaks']:
PKflags = []
for term in backDict['peaksList']:
PKflags.append(term[1::2])
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy bkg ref. flags from\n'+str(hst[5:])+' to...',
'Copy bkg flags', histList)
copyList = []
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
backData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Background'))
backData[0][1] = copy.copy(flag)
bkDict = backData[-1]
if bkDict['nDebye'] == backDict['nDebye']:
for i,term in enumerate(bkDict['debyeTerms']):
term[1::2] = copy.copy(DBflags[i])
if bkDict['nPeaks'] == backDict['nPeaks']:
for i,term in enumerate(bkDict['peaksList']):
term[1::2] = copy.copy(PKflags[i])
def OnBackCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy bkg params from\n'+str(hst[5:])+' to...',
'Copy parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Background'),copy.deepcopy(data))
CalcBack(Id)
def OnBackSave(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II background parameters file', pth, '',
'background parameter files (*.pwdrbck)|*.pwdrbck',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .pwdrbck
filename = os.path.splitext(filename)[0]+'.pwdrbck'
File = open(filename,'w')
File.write("#GSAS-II background parameter file; do not add/delete items!\n")
File.write(str(data[0])+'\n')
for item in data[1]:
if item in ['nPeaks','background PWDR','nDebye'] or not len(data[1][item]):
File.write(item+':'+str(data[1][item])+'\n')
else:
File.write(item+':\n')
for term in data[1][item]:
File.write(str(term)+'\n')
File.close()
print ('Background parameters saved to: '+filename)
finally:
dlg.Destroy()
def OnBackLoad(event):
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II background parameters file', pth, '',
'background parameter files (*.pwdrbck)|*.pwdrbck',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
newback = [[],{}]
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
if S[0] == '#': #skip the heading
S = File.readline() #should contain the std. bck fxn
newback[0] = eval(S[:-1])
S = File.readline()
while S and ':' in S:
[item,vals] = S[:-1].split(':')
if item in ['nPeaks','nDebye']:
newback[1][item] = int(vals)
elif 'PWDR' in item:
newback[1][item] = eval(vals)
elif item in ['FixedPoints','debyeTerms','peaksList']:
newback[1][item] = []
S = File.readline()
while ':' not in S:
newback[1][item].append(eval(S[:-1]))
S = File.readline()
else:
continue
S = File.readline()
File.close()
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Background'),newback)
finally:
dlg.Destroy()
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallLater(100,UpdateBackground,G2frame,newback)
def OnBkgFit(event):
def SetInstParms(Inst):
dataType = Inst['Type'][0]
insVary = []
insNames = []
insVals = []
for parm in Inst:
insNames.append(parm)
insVals.append(Inst[parm][1])
if parm in ['U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)','alpha',
'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q',] and Inst[parm][2]:
Inst[parm][2] = False
# insVary.append(parm)
instDict = dict(zip(insNames,insVals))
instDict['X'] = max(instDict['X'],0.01)
instDict['Y'] = max(instDict['Y'],0.01)
if 'SH/L' in instDict:
instDict['SH/L'] = max(instDict['SH/L'],0.002)
return dataType,instDict,insVary
PatternId = G2frame.PatternId
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
# sort the points for convenience and then separate them; extend the range if needed
if 'FixedPoints' not in background[1]:
msg = ("You have not defined any fixed background points. "+
"Use the Fixed Points/Add menu item to define points that will be fit."+
'\n\nSee the "Fitting the Starting Background using Fixed Points" tutorial for more details.')
print (msg)
G2frame.ErrorDialog('No points',msg)
return
background[1]['FixedPoints'] = sorted(background[1]['FixedPoints'],key=lambda pair:pair[0])
X = [x for x,y in background[1]['FixedPoints']]
Y = [y for x,y in background[1]['FixedPoints']]
if X[0] > limits[0]:
X = [limits[0]] + X
Y = [Y[0]] + Y
if X[-1] < limits[1]:
X += [limits[1]]
Y += [Y[-1]]
# interpolate the fixed points onto the grid of data points within limits
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)[1]
xBeg = np.searchsorted(pwddata[0],limits[0])
xFin = np.searchsorted(pwddata[0],limits[1])
xdata = pwddata[0][xBeg:xFin]
ydata = si.interp1d(X,Y)(ma.getdata(xdata))
W = [1]*len(xdata)
Z = [0]*len(xdata)
# load instrument and background params
print (' NB: Any instrument parameter refinement flags will be cleared')
dataType,insDict,insVary = SetInstParms(inst)
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(background)
# how many background parameters are refined?
if len(bakVary)*1.5 > len(X):
msg = ("You are attempting to vary "+str(len(bakVary))+
" background terms with only "+str(len(X))+" background points"+
"\nAdd more points or reduce the number of terms")
print (msg)
G2frame.ErrorDialog('Too few points',msg)
return
wx.BeginBusyCursor()
try:
G2pwd.DoPeakFit('LSQ',[],background,limits,inst,inst2,
np.array((xdata,ydata,W,Z,Z,Z)),Z,prevVaryList=bakVary,controls=controls)
finally:
wx.EndBusyCursor()
# compute the background values and plot them
parmDict = {}
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(background)
parmDict.update(bakDict)
parmDict.update(insDict)
# Note that this generates a MaskedArrayFutureWarning, but these items are not always masked
pwddata[3][xBeg:xFin] *= 0.
pwddata[5][xBeg:xFin] *= 0.
pwddata[4][xBeg:xFin] = G2pwd.getBackground('',parmDict,bakType,dataType,xdata)[0]
G2plt.PlotPatterns(G2frame,plotType='PWDR')
# show the updated background values
wx.CallLater(100,UpdateBackground,G2frame,data)
def OnBkgClear(event):
if 'FixedPoints' not in data[1]:
return
else:
data[1]['FixedPoints'] = []
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnPeaksMove(event):
if not data[1]['nPeaks']:
G2frame.ErrorDialog('Error','No peaks to move')
return
Peaks = {'peaks':[],'sigDict':{}}
for peak in data[1]['peaksList']:
Peaks['peaks'].append([peak[0],0,peak[2],0,peak[4],0,peak[6],0])
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'),Peaks)
def OnMakeRDF(event):
dlg = RDFDialog(G2frame)
try:
if dlg.ShowModal() == wx.ID_OK:
RDFcontrols = dlg.GetSelection()
else:
return
finally:
dlg.Destroy()
PatternId = G2frame.PatternId
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)[1]
auxPlot = G2pwd.MakeRDF(RDFcontrols,background,inst,pwddata)
if '2' in platform.python_version_tuple()[0]:
superMinusOne = unichr(0xaf)+unichr(0xb9)
else:
superMinusOne = chr(0xaf)+chr(0xb9)
for plot in auxPlot:
XY = np.array(plot[:2])
if 'D(R)' in plot[2]:
xlabel = r'$R, \AA$'
ylabel = r'$D(R), arb. units$'
else:
xlabel = r'$Q,\AA$'+superMinusOne
ylabel = r'$I(Q)$'
G2plt.PlotXY(G2frame,[XY,],Title=plot[2],labelX=xlabel,labelY=ylabel,lines=True)
def BackSizer():
def OnNewType(event):
data[0][0] = bakType.GetValue()
def OnBakRef(event):
data[0][1] = bakRef.GetValue()
def OnBakTerms(event):
data[0][2] = int(bakTerms.GetValue())
M = len(data[0])
N = data[0][2]+3
item = data[0]
if N > M: #add terms
for i in range(M,N):
item.append(0.0)
elif N < M: #delete terms
for i in range(N,M):
del(item[-1])
G2frame.GPXtree.SetItemPyData(BackId,data)
wx.CallLater(100,UpdateBackground,G2frame,data)
def AfterChange(invalid,value,tc):
if invalid: return
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
backSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Background function: '),0,WACV)
bakType = wx.ComboBox(G2frame.dataWindow,value=data[0][0],
choices=Choices,style=wx.CB_READONLY|wx.CB_DROPDOWN)
bakType.Bind(wx.EVT_COMBOBOX, OnNewType)
topSizer.Add(bakType)
topSizer.Add((5,0),0)
bakRef = wx.CheckBox(G2frame.dataWindow,label=' Refine?')
bakRef.SetValue(bool(data[0][1]))
bakRef.Bind(wx.EVT_CHECKBOX, OnBakRef)
topSizer.Add(bakRef,0,WACV)
backSizer.Add(topSizer)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of coeff.: '),0,WACV)
bakTerms = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[0][2]),choices=[str(i+1) for i in range(36)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
bakTerms.Bind(wx.EVT_COMBOBOX,OnBakTerms)
topSizer.Add(bakTerms,0,WACV)
topSizer.Add((5,0),0)
backSizer.Add(topSizer)
backSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Background coefficients:'),0,WACV)
bakSizer = wx.FlexGridSizer(0,5,5,5)
for i,value in enumerate(data[0][3:]):
bakVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[0],i+3,nDig=(10,4),OnLeave=AfterChange)
bakSizer.Add(bakVal,0,WACV)
backSizer.Add(bakSizer)
return backSizer
def DebyeSizer():
def OnDebTerms(event):
data[1]['nDebye'] = int(debTerms.GetValue())
M = len(data[1]['debyeTerms'])
N = data[1]['nDebye']
if N > M: #add terms
for i in range(M,N):
data[1]['debyeTerms'].append([1.0,False,1.0,False,0.010,False])
elif N < M: #delete terms
for i in range(N,M):
del(data[1]['debyeTerms'][-1])
if N == 0:
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdateBackground,G2frame,data)
def KeyEditPeakGrid(event):
colList = debyeGrid.GetSelectedCols()
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
debyeGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if debyeTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(debyeGrid.GetNumberRows()): data[1]['debyeTerms'][row][col]=True
elif key == 78: #'N'
for row in range(debyeGrid.GetNumberRows()): data[1]['debyeTerms'][row][col]=False
def OnCellChange(event):
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
debSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Debye scattering: '),0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of terms: '),0,WACV)
debTerms = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[1]['nDebye']),choices=[str(i) for i in range(21)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
debTerms.Bind(wx.EVT_COMBOBOX,OnDebTerms)
topSizer.Add(debTerms,0,WACV)
topSizer.Add((5,0),0)
debSizer.Add(topSizer)
if data[1]['nDebye']:
debSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Debye diffuse terms:'),0,WACV)
rowLabels = []
for i in range(len(data[1]['debyeTerms'])): rowLabels.append(str(i))
colLabels = ['A','refine','R','refine','U','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,2',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
debyeTable = G2G.Table(data[1]['debyeTerms'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
debyeGrid = G2G.GSGrid(parent=G2frame.dataWindow)
debyeGrid.SetTable(debyeTable, True)
debyeGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
debyeGrid.Bind(wg.EVT_GRID_CELL_CHANGED,OnCellChange)
debyeGrid.AutoSizeColumns(False)
debSizer.Add(debyeGrid)
return debSizer
def PeaksSizer():
def OnPeaks(event):
data[1]['nPeaks'] = int(peaks.GetValue())
M = len(data[1]['peaksList'])
N = data[1]['nPeaks']
if N > M: #add terms
for i in range(M,N):
data[1]['peaksList'].append([1.0,False,1.0,False,0.10,False,0.10,False])
elif N < M: #delete terms
for i in range(N,M):
del(data[1]['peaksList'][-1])
if N == 0:
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdateBackground,G2frame,data)
def KeyEditPeakGrid(event):
colList = peaksGrid.GetSelectedCols()
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
peaksGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if peaksTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(peaksGrid.GetNumberRows()): data[1]['peaksList'][row][col]=True
elif key == 78: #'N'
for row in range(peaksGrid.GetNumberRows()): data[1]['peaksList'][row][col]=False
def OnCellChange(event):
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
peaksSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Peaks in background: '),0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of peaks: '),0,WACV)
peaks = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[1]['nPeaks']),choices=[str(i) for i in range(30)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
peaks.Bind(wx.EVT_COMBOBOX,OnPeaks)
topSizer.Add(peaks,0,WACV)
topSizer.Add((5,0),0)
peaksSizer.Add(topSizer)
G2frame.dataWindow.currentGrids = []
if data[1]['nPeaks']:
peaksSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Peak list:'),0,WACV)
rowLabels = []
for i in range(len(data[1]['peaksList'])): rowLabels.append(str(i))
colLabels = ['pos','refine','int','refine','sig','refine','gam','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,2',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
peaksTable = G2G.Table(data[1]['peaksList'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
peaksGrid = G2G.GSGrid(parent=G2frame.dataWindow)
peaksGrid.SetTable(peaksTable, True)
peaksGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
peaksGrid.Bind(wg.EVT_GRID_CELL_CHANGED,OnCellChange)
peaksGrid.AutoSizeColumns(False)
peaksSizer.Add(peaksGrid)
return peaksSizer
def BackFileSizer():
def OnBackPWDR(event):
data[1]['background PWDR'][0] = back.GetValue()
if data[1]['background PWDR'][0]:
curHist = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data[1]['background PWDR'][0])
if not Id:
G2G.G2MessageBox(G2frame,'Histogram not found -- how did this happen?','Missing histogram')
back.SetValue('')
data[1]['background PWDR'][0] = back.GetValue()
return
bkgHist = G2frame.GPXtree.GetItemPyData(Id)
if len(bkgHist[1][0]) != len(curHist[1][0]):
G2G.G2MessageBox(G2frame,'Histogram have different lengths','Mismatched histograms')
back.SetValue('')
data[1]['background PWDR'][0] = back.GetValue()
return
CalcBack()
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def AfterChange(invalid,value,tc):
if invalid: return
CalcBack()
G2plt.PlotPatterns(G2frame,plotType='PWDR')
fileSizer = wx.BoxSizer(wx.VERTICAL)
fileSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Fixed background file:'),0,WACV)
if 'background PWDR' not in data[1]:
data[1]['background PWDR'] = ['',-1.,False]
backSizer = wx.BoxSizer(wx.HORIZONTAL)
Choices = ['',]+G2gd.GetGPXtreeDataNames(G2frame,['PWDR',])
Source = G2frame.GPXtree.GetItemText(G2frame.PatternId)
Choices.pop(Choices.index(Source))
back = wx.ComboBox(parent=G2frame.dataWindow,value=data[1]['background PWDR'][0],choices=Choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
back.Bind(wx.EVT_COMBOBOX,OnBackPWDR)
backSizer.Add(back)
backSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' multiplier'),0,WACV)
backMult = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[1]['background PWDR'],1,nDig=(10,3),OnLeave=AfterChange)
backSizer.Add(backMult,0,WACV)
fileSizer.Add(backSizer)
return fileSizer
def CalcBack(PatternId=G2frame.PatternId):
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
backData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
dataType = inst['Type'][0]
insDict = {inskey:inst[inskey][1] for inskey in inst}
parmDict = {}
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(data)
parmDict.update(bakDict)
parmDict.update(insDict)
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)
xBeg = np.searchsorted(pwddata[1][0],limits[0])
xFin = np.searchsorted(pwddata[1][0],limits[1])
fixBack = backData[1]['background PWDR']
try: #typically bad grid value or no fixed bkg file
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,fixBack[0])
fixData = G2frame.GPXtree.GetItemPyData(Id)
fixedBkg = {'_fixedVary':False,'_fixedMult':fixBack[1],'_fixedValues':fixData[1][1][xBeg:xFin]}
pwddata[1][4][xBeg:xFin] = G2pwd.getBackground('',parmDict,bakType,dataType,pwddata[1][0][xBeg:xFin],fixedBkg)[0]
except:
pass
# UpdateBackground execution starts here
if len(data) < 2: #add Debye diffuse & peaks scattering here
data.append({'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[]})
if 'nPeaks' not in data[1]:
data[1].update({'nPeaks':0,'peaksList':[]})
G2frame.dataWindow.currentGrids = []
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.BackMenu)
G2frame.Bind(wx.EVT_MENU,OnBackCopy,id=G2G.wxID_BACKCOPY)
G2frame.Bind(wx.EVT_MENU,OnBackFlagCopy,id=G2G.wxID_BACKFLAGCOPY)
G2frame.Bind(wx.EVT_MENU,OnBackSave,id=G2G.wxID_BACKSAVE)
G2frame.Bind(wx.EVT_MENU,OnBackLoad,id=G2G.wxID_BACKLOAD)
G2frame.Bind(wx.EVT_MENU,OnPeaksMove,id=G2G.wxID_BACKPEAKSMOVE)
G2frame.Bind(wx.EVT_MENU,OnMakeRDF,id=G2G.wxID_MAKEBACKRDF)
G2frame.Bind(wx.EVT_MENU,OnBkgFit,id=G2frame.dataWindow.wxID_BackPts['Fit'])
G2frame.Bind(wx.EVT_MENU,OnBkgClear,id=G2frame.dataWindow.wxID_BackPts['Clear'])
BackId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Background')
Choices = ['chebyschev','cosine','Q^2 power series','Q^-2 power series','lin interpolate','inv interpolate','log interpolate']
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(BackSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(DebyeSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(PeaksSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(BackFileSizer())
G2frame.dataWindow.SetDataSize()
################################################################################
##### Limits
################################################################################
def UpdateLimitsGrid(G2frame, data,plottype):
'''respond to selection of PWDR Limits data tree item.
'''
def AfterChange(invalid,value,tc):
if invalid: return
plottype = G2frame.GPXtree.GetItemText(G2frame.PatternId)[:4]
wx.CallAfter(G2plt.PlotPatterns,G2frame,newPlot=False,plotType=plottype) #unfortunately this resets the plot width
def LimitSizer():
limits = wx.FlexGridSizer(0,3,0,5)
labels = ['Tmin','Tmax']
for i in [0,1]:
limits.Add(wx.StaticText(G2frame.dataWindow,
label=' Original {} {:.4f}'.format(labels[i],data[0][i])),0,WACV)
limits.Add(wx.StaticText(G2frame.dataWindow,label=' New: '),0,WACV)
limits.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[1],i, \
min=data[0][0],max=data[0][1],nDig=(10,4),typeHint=float,OnLeave=AfterChange))
return limits
def ExclSizer():
def OnDelExcl(event):
Obj = event.GetEventObject()
item = Indx[Obj.GetId()]
del(data[item+2])
G2plt.PlotPatterns(G2frame,newPlot=False,plotType=plottype)
wx.CallAfter(UpdateLimitsGrid,G2frame,data,plottype)
Indx = {}
excl = wx.FlexGridSizer(0,3,0,5)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' From: '),0,WACV)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' To: '),0,WACV)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' Delete?: '),0,WACV)
for Id,item in enumerate(data[2:]):
for i in [0,1]:
excl.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,item,i, \
min=data[0][0],max=data[0][1],nDig=(10,4),typeHint=float,OnLeave=AfterChange))
delExcl = wx.CheckBox(G2frame.dataWindow,label='')
Indx[delExcl.GetId()] = Id
delExcl.Bind(wx.EVT_CHECKBOX,OnDelExcl)
excl.Add(delExcl,0,WACV)
return excl
def OnAddExcl(event):
G2frame.ifGetExclude = True
print ('Add excluded region')
def OnLimitCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy limits from\n'+str(hst[5:])+' to...',
'Copy limits', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Limits'),copy.deepcopy(data))
finally:
dlg.Destroy()
def Draw():
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Data used in refinement'),0,WACV)
mainSizer.Add((5,5))
mainSizer.Add(LimitSizer())
if len(data)>2:
mainSizer.Add((0,5),0)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Excluded regions:'),0,WACV)
mainSizer.Add(ExclSizer())
G2frame.dataWindow.SetDataSize()
G2frame.ifGetExclude = False
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.LimitMenu)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Limits')
G2frame.Bind(wx.EVT_MENU,OnLimitCopy,id=G2G.wxID_LIMITCOPY)
G2frame.Bind(wx.EVT_MENU,OnAddExcl,id=G2G.wxID_ADDEXCLREGION)
Draw()
################################################################################
##### Instrument parameters
################################################################################
def UpdateInstrumentGrid(G2frame,data):
'''respond to selection of PWDR/SASD/REFD Instrument Parameters
data tree item.
'''
if 'Bank' not in data: #get it from name; absent for default parms selection
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
if 'Bank' in hst:
bank = int(hst.split('Bank')[1].split('_')[0])
data['Bank'] = [bank,bank,0]
else:
data['Bank'] = [1,1,0]
def keycheck(keys):
good = []
for key in keys:
if key in ['Type','Bank','U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)','alpha',
'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','Polariz.',
'Lam','Azimuth','2-theta','fltPath','difC','difA','difB','Zero','Lam1','Lam2']:
good.append(key)
return good
def updateData(inst,ref):
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[0]
for item in data:
try:
data[item] = [data[item][0],inst[item],ref[item]]
except KeyError:
try:
data[item] = [data[item][0],inst[item]]
except KeyError:
pass #skip 'Polariz.' for N-data
def RefreshInstrumentGrid(event,doAnyway=False):
if doAnyway or event.GetRow() == 1:
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'))
newpeaks = []
for peak in peaks['peaks']:
newpeaks.append(G2mth.setPeakparms(data,Inst2,peak[0],peak[2]))
peaks['peaks'] = newpeaks
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'),peaks)
def OnCalibrate(event):
Pattern = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
xye = ma.array(ma.getdata(Pattern[1]))
cw = np.diff(xye[0])
IndexPeaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Index Peak List'))
if not len(IndexPeaks[0]):
G2frame.ErrorDialog('Can not calibrate','Index Peak List empty')
return
if not np.any(IndexPeaks[1]):
G2frame.ErrorDialog('Can not calibrate','Peak positions not refined')
return False
Ok = False
for peak in IndexPeaks[0]:
if peak[2] and peak[3]:
Ok = True
if not Ok:
G2frame.ErrorDialog('Can not calibrate','Index Peak List not indexed')
return
if G2pwd.DoCalibInst(IndexPeaks,data):
UpdateInstrumentGrid(G2frame,data)
XY = []
Sigs = []
for ip,peak in enumerate(IndexPeaks[0]):
if peak[2] and peak[3]:
binwid = cw[np.searchsorted(xye[0],peak[0])]
XY.append([peak[-1],peak[0],binwid])
Sigs.append(IndexPeaks[1][ip])
if len(XY):
XY = np.array(XY)
G2plt.PlotCalib(G2frame,data,XY,Sigs,newPlot=True)
else:
G2frame.ErrorDialog('Can not calibrate','Nothing selected for refinement')
def OnLoad(event):
'''Loads instrument parameters from a G2 .instprm file
in response to the Instrument Parameters-Operations/Load Profile menu
If instprm file has multiple banks each with header #Bank n: ..., this
finds matching bank no. to load - rejects nonmatches.
Note that similar code is found in ReadPowderInstprm (GSASII.py)
'''
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[0]
bank = data['Bank'][0]
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = []
newVals = []
Found = False
while S:
if S[0] == '#':
if Found:
break
if 'Bank' in S:
if bank == int(S.split(':')[0].split()[1]):
S = File.readline()
continue
else:
S = File.readline()
while S and '#Bank' not in S:
S = File.readline()
continue
else: #a non #Bank file
S = File.readline()
continue
Found = True
[item,val] = S[:-1].split(':')
newItems.append(item)
try:
newVals.append(float(val))
except ValueError:
newVals.append(val)
S = File.readline()
File.close()
if Found:
Inst,Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Instrument Parameters'))
if 'Bank' not in Inst: #patch for old .instprm files - may cause faults for TOF data
Inst['Bank'] = [1,1,0]
data = G2fil.makeInstDict(newItems,newVals,len(newVals)*[False,])
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Instrument Parameters'),[data,Inst2])
RefreshInstrumentGrid(event,doAnyway=True) #to get peaks updated
else:
G2frame.ErrorDialog('No match','Bank %d not in %s'%(bank,filename),G2frame)
UpdateInstrumentGrid(G2frame,data)
G2plt.PlotPeakWidths(G2frame)
finally:
dlg.Destroy()
def OnSave(event):
'''Respond to the Instrument Parameters Operations/Save Profile menu
item: writes current parameters to a .instprm file
It does not write Bank n: on # line & thus can be used any time w/o clash of bank nos.
'''
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .instprm
filename = os.path.splitext(filename)[0]+'.instprm'
File = open(filename,'w')
File.write("#GSAS-II instrument parameter file; do not add/delete items!\n")
for item in data:
File.write(item+':'+str(data[item][1])+'\n')
File.close()
print ('Instrument parameters saved to: '+filename)
finally:
dlg.Destroy()
def OnSaveAll(event):
'''Respond to the Instrument Parameters Operations/Save all Profile menu & writes
selected inst parms. across multiple banks into a single file
Each block starts with #Bank n: GSAS-II instrument... where n is bank no.
item: writes parameters from selected PWDR entries to a .instprm file
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
histList.insert(0,hst)
saveList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Save instrument parameters from',
'Save instrument parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
saveList.append(histList[i])
finally:
dlg.Destroy()
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .instprm
filename = os.path.splitext(filename)[0]+'.instprm'
File = open(filename,'w')
for hist in saveList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in inst: #patch
bank = 1
if 'Bank' in hist:
bank = int(hist.split('Bank')[1])
inst['Bank'] = [bank,bank,0]
bank = inst['Bank'][0]
File.write("#Bank %d: GSAS-II instrument parameter file; do not add/delete items!\n"%(bank))
for item in inst:
File.write(item+':'+str(inst[item][1])+'\n')
File.close()
finally:
dlg.Destroy()
def OnReset(event):
insVal.update(insDef)
updateData(insVal,insRef)
RefreshInstrumentGrid(event,doAnyway=True) #to get peaks updated
UpdateInstrumentGrid(G2frame,data)
G2plt.PlotPeakWidths(G2frame)
def OnInstFlagCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
keys = list(data.keys())
try:
keys.remove('Source')
except ValueError:
pass
flags = dict(zip(keys,[data[key][2] for key in keys]))
instType = data['Type'][0]
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy inst ref. flags from\n'+hst[5:],
'Copy refinement flags', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
instData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in instData:
instData['Bank'] = [1,1,0]
if len(data) == len(instData) and instType == instData['Type'][0]: #don't mix data types or lam & lam1/lam2 parms!
for item in instData:
if item not in ['Source',]:
instData[item][2] = copy.copy(flags[item])
else:
print (item+' not copied - instrument parameters not commensurate')
def OnInstCopy(event):
#need fix for dictionary
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
copyData = copy.deepcopy(data)
del copyData['Azimuth'] #not to be copied!
instType = data['Type'][0]
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy inst params from\n'+hst,
'Copy parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
instData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in instData:
instData['Bank'] = [1,1,0]
if len(data) == len(instData) and instType == instData['Type'][0]: #don't mix data types or lam & lam1/lam2 parms!
instData.update(copyData)
else:
print (item+' not copied - instrument parameters not commensurate')
def AfterChange(invalid,value,tc):
if invalid: return
updateData(insVal,insRef)
def NewProfile(invalid,value,tc):
if invalid: return
updateData(insVal,insRef)
G2plt.PlotPeakWidths(G2frame)
def OnItemRef(event):
Obj = event.GetEventObject()
item = RefObj[Obj.GetId()]
insRef[item] = Obj.GetValue()
updateData(insVal,insRef)
def OnCopy1Val(event):
'''Select one instrument parameter value to edit and copy to many histograms
optionally allow values to be edited in a table
'''
updateData(insVal,insRef)
G2G.SelectEdit1Var(G2frame,data,labelLst,elemKeysLst,dspLst,refFlgElem)
insVal.update({key:data[key][1] for key in instkeys})
insRef.update({key:data[key][2] for key in instkeys})
wx.CallAfter(MakeParameterWindow)
def lblWdef(lbl,dec,val):
'Label parameter showing the default value'
fmt = "%15."+str(dec)+"f"
return " " + lbl + " (" + (fmt % val).strip() + "): "
def RefineBox(item):
'Define a refine checkbox with binding'
#wid = wx.CheckBox(G2frame.dataWindow,label=' Refine? ')
wid = wx.CheckBox(G2frame.dataWindow,label='')
wid.SetValue(bool(insRef[item]))
RefObj[wid.GetId()] = item
wid.Bind(wx.EVT_CHECKBOX, OnItemRef)
return wid
def OnLamPick(event):
data['Source'][1] = lamType = event.GetEventObject().GetValue()
if 'P' in insVal['Type']:
insVal['Lam1'] = waves[lamType][0]
insVal['Lam2'] = waves[lamType][1]
elif 'S' in insVal['Type'] and 'synch' not in lamType:
insVal['Lam'] = meanwaves[lamType]
updateData(insVal,insRef)
i,j= wx.__version__.split('.')[0:2]
if int(i)+int(j)/10. > 2.8:
pass # repaint crashes wxpython 2.9
wx.CallLater(100, MakeParameterWindow)
#wx.CallAfter(MakeParameterWindow)
else:
wx.CallAfter(MakeParameterWindow)
def MakeParameterWindow():
'Displays the Instrument parameters in the dataWindow frame'
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
instSizer = wx.FlexGridSizer(0,3,5,5)
subSizer = wx.BoxSizer(wx.HORIZONTAL)
if insVal['Bank'] == None: #patch
insVal['Bank'] = 1
text = ' Histogram Type: %s Bank: %d'%(insVal['Type'],insVal['Bank'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,text),0,WACV)
mainSizer.Add(subSizer)
labelLst[:],elemKeysLst[:],dspLst[:],refFlgElem[:] = [],[],[],[]
if 'P' in insVal['Type']: #powder data
[instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt),0,WACV) for txt in [' Name (default)',' Value','Refine?']]
if 'C' in insVal['Type']: #constant wavelength
labelLst.append('Azimuth angle')
elemKeysLst.append(['Azimuth',1])
dspLst.append([10,2])
refFlgElem.append(None)
if 'Lam1' in insVal:
subSizer = wx.BoxSizer(wx.HORIZONTAL)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: '),0,WACV)
txt = '%7.2f'%(insVal['Azimuth'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Ka1/Ka2: '),0,WACV)
txt = u' %8.6f/%8.6f\xc5'%(insVal['Lam1'],insVal['Lam2'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
waveSizer = wx.BoxSizer(wx.HORIZONTAL)
waveSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Source type: '),0,WACV)
# PATCH?: for now at least, Source is not saved anywhere before here
if 'Source' not in data: data['Source'] = ['CuKa','?']
choice = ['TiKa','CrKa','FeKa','CoKa','CuKa','MoKa','AgKa']
lamPick = wx.ComboBox(G2frame.dataWindow,value=data['Source'][1],choices=choice,style=wx.CB_READONLY|wx.CB_DROPDOWN)
lamPick.Bind(wx.EVT_COMBOBOX, OnLamPick)
waveSizer.Add(lamPick,0)
subSizer.Add(waveSizer,0)
mainSizer.Add(subSizer)
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef('I(L2)/I(L1)',4,insDef['I(L2)/I(L1)'])),0,WACV)
key = 'I(L2)/I(L1)'
labelLst.append(key)
elemKeysLst.append([key,1])
dspLst.append([10,4])
refFlgElem.append([key,2])
ratVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(ratVal,0)
instSizer.Add(RefineBox(key),0,WACV)
else: # single wavelength
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: '),0,WACV)
txt = '%7.2f'%(insVal['Azimuth'])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
instSizer.Add((5,5),0)
key = 'Lam'
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef[key])),0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,6),typeHint=float,OnLeave=AfterChange)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append([key,1])
dspLst.append([10,6])
instSizer.Add(waveVal,0,WACV)
refFlgElem.append([key,2])
instSizer.Add(RefineBox(key),0,WACV)
for item in ['Zero','Polariz.']:
if item in insDef:
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append([10,4])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,4,insDef[item])),0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
refFlgElem.append([item,2])
instSizer.Add(RefineBox(item),0,WACV)
for item in ['U','V','W','X','Y','Z','SH/L']:
nDig = (10,3)
if item == 'SH/L':
nDig = (10,5)
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append(nDig)
refFlgElem.append([item,2])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,nDig[1],insDef[item])),0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=nDig,typeHint=float,OnLeave=NewProfile)
instSizer.Add(itemVal,0,WACV)
instSizer.Add(RefineBox(item),0,WACV)
elif 'T' in insVal['Type']: #time of flight (neutrons)
subSizer = wx.BoxSizer(wx.HORIZONTAL)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Flight path: '),0,WACV)
txt = '%8.3f'%(insVal['fltPath'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('flight path')
elemKeysLst.append(['fltPath',1])
dspLst.append([10,2])
refFlgElem.append(None)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' 2-theta: '),0,WACV)
txt = '%7.2f'%(insVal['2-theta'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('2-theta')
elemKeysLst.append(['2-theta',1])
dspLst.append([10,2])
refFlgElem.append(None)
if 'Pdabc' in Inst2:
Items = ['sig-0','sig-1','sig-2','sig-q','X','Y','Z']
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' difC: '),0,WACV)
txt = '%8.2f'%(insVal['difC'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('difC')
elemKeysLst.append(['difC',1])
dspLst.append([10,2])
refFlgElem.append(None)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' alpha, beta: fixed by table'),0,WACV)
else:
Items = ['difC','difA','difB','Zero','alpha','beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','X','Y','Z']
mainSizer.Add((5,5),0)
mainSizer.Add(subSizer)
mainSizer.Add((5,5),0)
for item in Items:
if item == '':
instSizer.Add((5,5),0)
instSizer.Add((5,5),0)
instSizer.Add((5,5),0)
continue
nDig = (10,3)
if 'beta' in item:
nDig = (12,6)
instSizer.Add(
wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,nDig[1],insDef[item])),
0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=nDig,typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append(nDig)
refFlgElem.append([item,2])
instSizer.Add(RefineBox(item),0,WACV)
elif 'PKS' in insVal['Type']: #peak positions only
key = 'Lam'
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef[key])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,6),typeHint=float,OnLeave=AfterChange)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append([key,1])
dspLst.append([10,6])
instSizer.Add(waveVal,0,WACV)
refFlgElem.append([key,2])
for item in ['Zero',]:
if item in insDef:
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append([10,4])
instSizer.Add(
wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,4,insDef[item])),
0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
refFlgElem.append([item,2])
elif 'S' in insVal['Type']: #single crystal data
if 'C' in insVal['Type']: #constant wavelength
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef['Lam'])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,'Lam',nDig=(10,6),typeHint=float,OnLeave=AfterChange)
instSizer.Add(waveVal,0,WACV)
labelLst.append(u'Lam (\xc5)')
waveSizer = wx.BoxSizer(wx.HORIZONTAL)
waveSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Source type: '),0,WACV)
# PATCH?: for now at least, Source is not saved anywhere before here
if 'Source' not in data: data['Source'] = ['CuKa','?']
choice = ['synchrotron','TiKa','CrKa','FeKa','CoKa','CuKa','MoKa','AgKa']
lamPick = wx.ComboBox(G2frame.dataWindow,value=data['Source'][1],choices=choice,style=wx.CB_READONLY|wx.CB_DROPDOWN)
lamPick.Bind(wx.EVT_COMBOBOX, OnLamPick)
waveSizer.Add(lamPick,0,WACV)
instSizer.Add(waveSizer,0,WACV)
elemKeysLst.append(['Lam',1])
dspLst.append([10,6])
refFlgElem.append(None)
else: #time of flight (neutrons)
pass #for now
elif insVal['Type'][0] in ['L','R',]:
if 'C' in insVal['Type']:
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef['Lam'])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,'Lam',nDig=(10,6),typeHint=float,OnLeave=AfterChange)
instSizer.Add(waveVal,0,WACV)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append(['Lam',1])
dspLst.append([10,6])
refFlgElem.append(None)
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: %7.2f'%(insVal['Azimuth'])),0,WACV)
labelLst.append('Azimuth angle')
elemKeysLst.append(['Azimuth',1])
dspLst.append([10,2])
refFlgElem.append(None)
else: #time of flight (neutrons)
pass #for now
mainSizer.Add(instSizer,0)
G2frame.dataWindow.SetDataSize()
# end of MakeParameterWindow
# beginning of UpdateInstrumentGrid code
#patch: make sure all parameter items are lists
patched = 0
for key in data:
if type(data[key]) is tuple:
data[key] = list(data[key])
patched += 1
if patched: print (patched,' instrument parameters changed from tuples')
if 'Z' not in data:
data['Z'] = [0.0,0.0,False]
#end of patch
labelLst,elemKeysLst,dspLst,refFlgElem = [],[],[],[]
instkeys = keycheck(data.keys())
if 'P' in data['Type'][0]: #powder data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = dict(zip(instkeys,[data[key][2] for key in instkeys]))
if 'NC' in data['Type'][0]:
del(insDef['Polariz.'])
del(insVal['Polariz.'])
del(insRef['Polariz.'])
elif 'S' in data['Type'][0]: #single crystal data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
elif 'L' in data['Type'][0]: #low angle data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
elif 'R' in data['Type'][0]: #low angle data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
RefObj = {}
#These from Intl. Tables C, Table 4.2.2.1, p. 177-179
waves = {'CuKa':[1.54051,1.54433],'TiKa':[2.74841,2.75207],'CrKa':[2.28962,2.29351],
'FeKa':[1.93597,1.93991],'CoKa':[1.78892,1.79278],'MoKa':[0.70926,0.713543],
'AgKa':[0.559363,0.563775]}
# meanwaves computed as (2*Ka1+Ka2)/3
meanwaves = {'CuKa':1.54178,'TiKa':2.74963,'CrKa':2.29092,'FeKa':1.93728,
'CoKa':1.79021,'MoKa':0.71069,'AgKa':0.56083}
Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[1]
G2gd.SetDataMenuBar(G2frame)
#patch
if 'P' in insVal['Type']: #powder data
if 'C' in insVal['Type']: #constant wavelength
if 'Azimuth' not in insVal:
insVal['Azimuth'] = 0.0
insDef['Azimuth'] = 0.0
insRef['Azimuth'] = False
# if 'T' in insVal['Type']:
# if 'difB' not in insVal:
# insVal['difB'] = 0.0
# insDef['difB'] = 0.0
# insRef['difB'] = False
#end of patch
if 'P' in insVal['Type']: #powder data menu commands
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.InstMenu)
G2frame.GetStatusBar().SetStatusText('NB: Azimuth is used for polarization only',1)
G2frame.Bind(wx.EVT_MENU,OnCalibrate,id=G2G.wxID_INSTCALIB)
G2frame.Bind(wx.EVT_MENU,OnLoad,id=G2G.wxID_INSTLOAD)
G2frame.Bind(wx.EVT_MENU,OnSave,id=G2G.wxID_INSTSAVE)
G2frame.Bind(wx.EVT_MENU,OnSaveAll,id=G2G.wxID_INSTSAVEALL)
G2frame.Bind(wx.EVT_MENU,OnReset,id=G2G.wxID_INSTPRMRESET)
G2frame.Bind(wx.EVT_MENU,OnInstCopy,id=G2G.wxID_INSTCOPY)
G2frame.Bind(wx.EVT_MENU,OnInstFlagCopy,id=G2G.wxID_INSTFLAGCOPY)
G2frame.Bind(wx.EVT_MENU,OnCopy1Val,id=G2G.wxID_INST1VAL)
elif 'L' in insVal['Type'] or 'R' in insVal['Type']: #SASD data menu commands
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.SASDInstMenu)
G2frame.Bind(wx.EVT_MENU,OnInstCopy,id=G2G.wxID_SASDINSTCOPY)
MakeParameterWindow()
################################################################################
##### Sample parameters
################################################################################
def UpdateSampleGrid(G2frame,data):
'''respond to selection of PWDR/SASD Sample Parameters
data tree item.
'''
def OnSampleSave(event):
'''Respond to the Sample Parameters Operations/Save menu
item: writes current parameters to a .samprm file
'''
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II sample parameters file', pth, '',
'sample parameter files (*.samprm)|*.samprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .samprm
filename = os.path.splitext(filename)[0]+'.samprm'
File = open(filename,'w')
File.write("#GSAS-II sample parameter file\n")
File.write("'Type':'"+str(data['Type'])+"'\n")
File.write("'Gonio. radius':"+str(data['Gonio. radius'])+"\n")
if data.get('InstrName'):
File.write("'InstrName':'"+str(data['InstrName'])+"'\n")
File.close()
finally:
dlg.Destroy()
def OnSampleLoad(event):
'''Loads sample parameters from a G2 .samprm file
in response to the Sample Parameters-Operations/Load menu
Note that similar code is found in ReadPowderInstprm (GSASII.py)
'''
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II sample parameters file', pth, '',
'sample parameter files (*.samprm)|*.samprm',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = {}
while S:
if S[0] == '#':
S = File.readline()
continue
[item,val] = S[:-1].split(':')
newItems[item.strip("'")] = eval(val)
S = File.readline()
File.close()
data.update(newItems)
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Sample Parameters'),data)
UpdateSampleGrid(G2frame,data)
finally:
dlg.Destroy()
def OnAllSampleLoad(event):
filename = ''
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose multihistogram metadata text file', pth, '',
'metadata file (*.*)|*.*',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = []
itemNames = []
Comments = []
while S:
if S[0] == '#':
Comments.append(S)
S = File.readline()
continue
S = S.replace(',',' ').replace('\t',' ')
Stuff = S[:-1].split()
itemNames.append(Stuff[0])
newItems.append(Stuff[1:])
S = File.readline()
File.close()
finally:
dlg.Destroy()
if not filename:
G2frame.ErrorDialog('Nothing to do','No file selected')
return
dataDict = dict(zip(itemNames,newItems))
ifany = False
Controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Controls'))
Names = [' ','Phi','Chi','Omega','Time','Temperature','Pressure']
freeNames = {}
for name in ['FreePrm1','FreePrm2','FreePrm3']:
freeNames[Controls[name]] = name
Names.append(Controls[name])
#import imp
#imp.reload(G2G)
dlg = G2G.G2ColumnIDDialog( G2frame,' Choose multihistogram metadata columns:',
'Select columns',Comments,Names,np.array(newItems).T)
try:
if dlg.ShowModal() == wx.ID_OK:
colNames,newData = dlg.GetSelection()
dataDict = dict(zip(itemNames,newData.T))
for item in colNames:
if item != ' ':
ifany = True
finally:
dlg.Destroy()
if not ifany:
G2frame.ErrorDialog('Nothing to do','No columns identified')
return
histList = [G2frame.GPXtree.GetItemText(G2frame.PatternId),]
histList += GetHistsLikeSelected(G2frame)
colIds = {}
for i,name in enumerate(colNames):
if name != ' ':
colIds[name] = i
for hist in histList:
name = hist.split()[1] #this is file name
newItems = {}
for item in colIds:
key = freeNames.get(item,item)
newItems[key] = float(dataDict[name][colIds[item]])
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(newItems)
UpdateSampleGrid(G2frame,data)
def OnSetScale(event):
if histName[:4] in ['REFD','PWDR']:
Scale = data['Scale'][0]
dlg = wx.MessageDialog(G2frame,'Rescale data by %.2f?'%(Scale),'Rescale data',wx.OK|wx.CANCEL)
try:
if dlg.ShowModal() == wx.ID_OK:
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,histName)
y,w = G2frame.GPXtree.GetItemPyData(pId)[1][1:3]
y *= Scale
w /= Scale**2
data['Scale'][0] = 1.0
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=histName[:4],newPlot=True)
UpdateSampleGrid(G2frame,data)
return
#SASD rescaliing
histList = []
item, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while item:
name = G2frame.GPXtree.GetItemText(item)
if 'SASD' in name and name != histName:
histList.append(name)
item, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
if not len(histList): #nothing to copy to!
return
dlg = wx.SingleChoiceDialog(G2frame,'Select reference histogram for scaling',
'Reference histogram',histList)
try:
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
refHist = histList[sel]
finally:
dlg.Destroy()
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))
Profile = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[1]
Data = [Profile,Limits,data]
refId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,refHist)
refSample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,refId, 'Sample Parameters'))
refLimits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,refId, 'Limits'))
refProfile = G2frame.GPXtree.GetItemPyData(refId)[1]
refData = [refProfile,refLimits,refSample]
G2sasd.SetScale(Data,refData)
G2plt.PlotPatterns(G2frame,plotType='SASD',newPlot=True)
UpdateSampleGrid(G2frame,data)
def OnRescaleAll(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
x0,y0,w0 = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[1][:3]
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
od = {'label_1':'Scaling range min','value_1':0.0,'label_2':'Scaling range max','value_2':10.}
dlg = G2G.G2MultiChoiceDialog(G2frame,
'Do scaling from\n'+str(hst[5:])+' to...','Rescale histograms', histList,extraOpts=od)
try:
if dlg.ShowModal() == wx.ID_OK:
Xmin = od['value_1']
Xmax = od['value_2']
iBeg = np.searchsorted(x0,Xmin)
iFin = np.searchsorted(x0,Xmax)
if iBeg > iFin:
wx.MessageBox('Wrong order for Xmin, Xmax','Error',style=wx.ICON_EXCLAMATION)
else:
sum0 = np.sum(y0[iBeg:iFin])
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
xi,yi,wi = G2frame.GPXtree.GetItemPyData(Id)[1][:3]
sumi = np.sum(yi[iBeg:iFin])
if sumi:
Scale = sum0/sumi
yi *= Scale
wi /= Scale**2
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=histName[:4],newPlot=True)
def OnSampleCopy(event):
histType,copyNames = SetCopyNames(histName,data['Type'],
addNames = ['Omega','Chi','Phi','Gonio. radius','InstrName'])
copyDict = {}
for parm in copyNames:
copyDict[parm] = data[parm]
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample params from\n'+str(hst[5:])+' to...',
'Copy sample parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(copy.deepcopy(copyDict))
finally:
dlg.Destroy()
def OnSampleCopySelected(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
Controls = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
# Assemble a list of item labels
TextTable = {key:label for key,label,dig in
SetupSampleLabels(hst,data.get('Type'),Inst['Type'][0])}
# get flexible labels
TextTable.update({key:Controls[key] for key in Controls if key.startswith('FreePrm')})
# add a few extra
TextTable.update({'Type':'Diffractometer type','InstrName':'Instrument Name',})
# Assemble a list of dict entries that would be labeled in the Sample
# params data window (drop ranId and items not used).
keyList = [i for i in data.keys() if i in TextTable]
keyText = [TextTable[i] for i in keyList]
# sort both lists together, ordered by keyText
keyText, keyList = zip(*sorted(list(zip(keyText,keyList)))) # sort lists
selectedKeys = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Select which sample parameters\nto copy',
'Select sample parameters', keyText)
try:
if dlg.ShowModal() == wx.ID_OK:
selectedKeys = [keyList[i] for i in dlg.GetSelections()]
finally:
dlg.Destroy()
if not selectedKeys: return # nothing to copy
copyDict = {}
for parm in selectedKeys:
copyDict[parm] = data[parm]
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample params from\n'+str(hst[5:])+' to...',
'Copy sample parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(copy.deepcopy(copyDict))
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=hst[:4],newPlot=False)
def OnSampleFlagCopy(event):
histType,copyNames = SetCopyNames(histName,data['Type'])
flagDict = {}
for parm in copyNames:
flagDict[parm] = data[parm][1]
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample ref. flags from\n'+str(hst[5:])+' to...',
'Copy sample flags', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
for name in copyNames:
sampleData[name][1] = copy.copy(flagDict[name])
finally:
dlg.Destroy()
def OnHistoChange():
'''Called when the histogram type is changed to refresh the window
'''
#wx.CallAfter(UpdateSampleGrid,G2frame,data)
wx.CallLater(100,UpdateSampleGrid,G2frame,data)
def SetNameVal():
inst = instNameVal.GetValue()
data['InstrName'] = inst.strip()
def OnNameVal(event):
event.Skip()
wx.CallAfter(SetNameVal)
def AfterChange(invalid,value,tc):
if invalid:
return
if tc.key == 0 and 'SASD' in histName: #a kluge for Scale!
G2plt.PlotPatterns(G2frame,plotType='SASD',newPlot=True)
elif tc.key == 'Thick':
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnMaterial(event):
Obj = event.GetEventObject()
Id = Info[Obj.GetId()]
data['Materials'][Id]['Name'] = Obj.GetValue()
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnVolFrac(invalid,value,tc):
Id = Info[tc.GetId()]
data['Materials'][not Id][key] = 1.-value
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnCopy1Val(event):
'Select one value to copy to many histograms and optionally allow values to be edited in a table'
G2G.SelectEdit1Var(G2frame,data,labelLst,elemKeysLst,dspLst,refFlgElem)
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def SearchAllComments(value,tc,*args,**kwargs):
'''Called when the label for a FreePrm is changed: the comments for all PWDR
histograms are searched for a "label=value" pair that matches the label (case
is ignored) and the values are then set to this value, if it can be converted
to a float.
'''
Id, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while Id:
name = G2frame.GPXtree.GetItemText(Id)
if 'PWDR' in name:
Comments = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Comments'))
Sample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id, 'Sample Parameters'))
for i,item in enumerate(Comments):
itemSp = item.split('=')
if value.lower() == itemSp[0].lower():
try:
Sample[tc.key] = float(itemSp[1])
except:
print('"{}" has an invalid value in Comments from {}'
.format(item.strip(),name))
Id, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
wx.CallLater(100,UpdateSampleGrid,G2frame,data)
######## DEBUG #######################################################
#import GSASIIpwdGUI
#reload(GSASIIpwdGUI)
#reload(G2gd)
######################################################################
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
histName = G2frame.GPXtree.GetItemText(G2frame.PatternId)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.SampleMenu)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Sample Parameters')
G2frame.Bind(wx.EVT_MENU, OnSetScale, id=G2G.wxID_SETSCALE)
G2frame.Bind(wx.EVT_MENU, OnSampleCopy, id=G2G.wxID_SAMPLECOPY)
G2frame.Bind(wx.EVT_MENU, OnSampleCopySelected, id=G2G.wxID_SAMPLECOPYSOME)
G2frame.Bind(wx.EVT_MENU, OnSampleFlagCopy, id=G2G.wxID_SAMPLEFLAGCOPY)
G2frame.Bind(wx.EVT_MENU, OnSampleSave, id=G2G.wxID_SAMPLESAVE)
G2frame.Bind(wx.EVT_MENU, OnSampleLoad, id=G2G.wxID_SAMPLELOAD)
G2frame.Bind(wx.EVT_MENU, OnCopy1Val, id=G2G.wxID_SAMPLE1VAL)
G2frame.Bind(wx.EVT_MENU, OnAllSampleLoad, id=G2G.wxID_ALLSAMPLELOAD)
G2frame.Bind(wx.EVT_MENU, OnRescaleAll, id=G2G.wxID_RESCALEALL)
if histName[:4] in ['SASD','REFD','PWDR']:
G2frame.dataWindow.SetScale.Enable(True)
Controls = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
#patch
if 'ranId' not in data:
data['ranId'] = ran.randint(0,sys.maxsize)
if not 'Gonio. radius' in data:
data['Gonio. radius'] = 200.0
if not 'Omega' in data:
data.update({'Omega':0.0,'Chi':0.0,'Phi':0.0})
if 'Azimuth' not in data:
data['Azimuth'] = 0.0
if type(data['Temperature']) is int:
data['Temperature'] = float(data['Temperature'])
if 'Time' not in data:
data['Time'] = 0.0
if 'FreePrm1' not in Controls:
Controls['FreePrm1'] = 'Sample humidity (%)'
if 'FreePrm2' not in Controls:
Controls['FreePrm2'] = 'Sample voltage (V)'
if 'FreePrm3' not in Controls:
Controls['FreePrm3'] = 'Applied load (MN)'
if 'FreePrm1' not in data:
data['FreePrm1'] = 0.
if 'FreePrm2' not in data:
data['FreePrm2'] = 0.
if 'FreePrm3' not in data:
data['FreePrm3'] = 0.
if 'SurfRoughA' not in data and 'PWDR' in histName:
data['SurfRoughA'] = [0.,False]
data['SurfRoughB'] = [0.,False]
if 'Trans' not in data and 'SASD' in histName:
data['Trans'] = 1.0
if 'SlitLen' not in data and 'SASD' in histName:
data['SlitLen'] = 0.0
if 'Shift' not in data:
data['Shift'] = [0.0,False]
if 'Transparency' not in data:
data['Transparency'] = [0.0,False]
data['InstrName'] = data.get('InstrName','')
#patch end
labelLst,elemKeysLst,dspLst,refFlgElem = [],[],[],[]
parms = SetupSampleLabels(histName,data.get('Type'),Inst['Type'][0])
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add((-1,-1),1,WACV|wx.EXPAND)
topSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Sample and Experimental Parameters'))
# add help button to bring up help web page
helpkey = G2frame.dataWindow.helpKey
topSizer.Add((30,-1))
topSizer.Add(G2G.HelpButton(G2frame.dataWindow,helpIndex=helpkey))
topSizer.Add((-1,-1),1,WACV|wx.EXPAND)
mainSizer.Add(topSizer,0,WACV|wx.EXPAND)
nameSizer = wx.BoxSizer(wx.HORIZONTAL)
nameSizer.Add(wx.StaticText(G2frame.dataWindow,wx.ID_ANY,' Instrument Name '),0,WACV)
nameSizer.Add((-1,-1),1,WACV)
instNameVal = wx.TextCtrl(G2frame.dataWindow,wx.ID_ANY,data['InstrName'],
size=(200,-1),style=wx.TE_PROCESS_ENTER)
nameSizer.Add(instNameVal)
instNameVal.Bind(wx.EVT_CHAR,OnNameVal)
mainSizer.Add(nameSizer,0,WACV)
mainSizer.Add((5,5),0)
labelLst.append('Instrument Name')
elemKeysLst.append(['InstrName'])
dspLst.append(None)
refFlgElem.append(None)
if 'PWDR' in histName:
nameSizer = wx.BoxSizer(wx.HORIZONTAL)
nameSizer.Add(wx.StaticText(G2frame.dataWindow,wx.ID_ANY,' Diffractometer type: '),
0,WACV)
if 'T' in Inst['Type'][0]:
choices = ['Debye-Scherrer',]
else:
choices = ['Debye-Scherrer','Bragg-Brentano',]
histoType = G2G.G2ChoiceButton(G2frame.dataWindow,choices,
strLoc=data,strKey='Type',
onChoice=OnHistoChange)
nameSizer.Add(histoType)
mainSizer.Add(nameSizer,0,WACV)
mainSizer.Add((5,5),0)
parmSizer = wx.FlexGridSizer(0,2,5,0)
for key,lbl,nDig in parms:
labelLst.append(lbl.strip().strip(':').strip())
dspLst.append(nDig)
if 'list' in str(type(data[key])):
parmRef = G2G.G2CheckBox(G2frame.dataWindow,' '+lbl,data[key],1)
parmSizer.Add(parmRef,0,WACV|wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[key],0,
nDig=nDig,typeHint=float,OnLeave=AfterChange)
elemKeysLst.append([key,0])
refFlgElem.append([key,1])
else:
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' '+lbl),
0,WACV|wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,key,
typeHint=float,OnLeave=AfterChange)
elemKeysLst.append([key])
refFlgElem.append(None)
parmSizer.Add(parmVal,0,WACV)
Info = {}
for key in ('FreePrm1','FreePrm2','FreePrm3'):
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,Controls,key,typeHint=str,
notBlank=False,OnLeave=SearchAllComments)
parmSizer.Add(parmVal,1,wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,key,typeHint=float)
parmSizer.Add(parmVal,0,WACV)
labelLst.append(Controls[key])
dspLst.append(None)
elemKeysLst.append([key])
refFlgElem.append(None)
mainSizer.Add(parmSizer,0)
mainSizer.Add((0,5),0)
if histName[:4] in ['SASD',]:
rho = [0.,0.]
anomrho = [0.,0.]
mu = 0.
subSizer = wx.FlexGridSizer(0,4,5,5)
Substances = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Substances'))
for Id,item in enumerate(data['Materials']):
subSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Material: '),0,WACV)
matsel = wx.ComboBox(G2frame.dataWindow,value=item['Name'],choices=list(Substances['Substances'].keys()),
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Info[matsel.GetId()] = Id
matsel.Bind(wx.EVT_COMBOBOX,OnMaterial)
subSizer.Add(matsel,0,WACV)
subSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Volume fraction: '),0,WACV)
volfrac = G2G.ValidatedTxtCtrl(G2frame.dataWindow,item,'VolFrac',
min=0.,max=1.,nDig=(10,3),typeHint=float,OnLeave=OnVolFrac)
subSizer.Add(volfrac,0,WACV)
try:
material = Substances['Substances'][item['Name']]
except KeyError:
print('ERROR - missing substance: '+item['Name'])
material = Substances['Substances']['vacuum']
mu += item['VolFrac']*material.get('XAbsorption',0.)
rho[Id] = material['Scatt density']
anomrho[Id] = material.get('XAnom density',0.)
data['Contrast'] = [(rho[1]-rho[0])**2,(anomrho[1]-anomrho[0])**2]
mainSizer.Add(subSizer,0)
conSizer = wx.BoxSizer(wx.HORIZONTAL)
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Contrast: %10.2f '%(data['Contrast'][0])),0,WACV)
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Anom. Contrast: %10.2f '%(data['Contrast'][1])),0,WACV)
mut = mu*data['Thick']
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Transmission (calc): %10.3f '%(np.exp(-mut))),0,WACV)
mainSizer.Add(conSizer,0)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Indexing Peaks
################################################################################
def UpdateIndexPeaksGrid(G2frame, data):
'''respond to selection of PWDR Index Peak List data
tree item.
'''
bravaisSymb = ['Fm3m','Im3m','Pm3m','R3-H','P6/mmm','I4/mmm',
'P4/mmm','Fmmm','Immm','Ammm','Bmmm','Cmmm','Pmmm','C2/m','P2/m','C1','P1']
IndexId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Index Peak List')
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
limitId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits')
Limits = G2frame.GPXtree.GetItemPyData(limitId)
def RefreshIndexPeaksGrid(event):
r,c = event.GetRow(),event.GetCol()
peaks = G2frame.IndexPeaksTable.GetData()
if c == 2:
peaks[r][c] = not peaks[r][c]
G2frame.IndexPeaksTable.SetData(peaks)
G2frame.indxPeaks.ForceRefresh()
if 'PKS' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2plt.PlotPowderLines(G2frame)
else:
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnReload(event):
peaks = []
sigs = []
Peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'))
for ip,peak in enumerate(Peaks['peaks']):
dsp = G2lat.Pos2dsp(Inst,peak[0])
peaks.append([peak[0],peak[2],True,False,0,0,0,dsp,0.0]) #SS?
try:
sig = Peaks['sigDict']['pos'+str(ip)]
except KeyError:
sig = 0.
sigs.append(sig)
data = [peaks,sigs]
G2frame.GPXtree.SetItemPyData(IndexId,data)
UpdateIndexPeaksGrid(G2frame,data)
def OnSave(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose Index peaks csv file', pth, '',
'indexing peaks file (*.csv)|*.csv',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
filename = os.path.splitext(filename)[0]+'.csv'
File = open(filename,'w')
names = 'h,k,l,position,intensity,d-Obs,d-calc\n'
File.write(names)
fmt = '%d,%d,%d,%.4f,%.1f,%.5f,%.5f\n'
for refl in data[0]:
if refl[3]:
File.write(fmt%(refl[4],refl[5],refl[6],refl[0],refl[1],refl[7],refl[8]))
File.close()
finally:
dlg.Destroy()
def KeyEditPickGrid(event):
colList = G2frame.indxPeaks.GetSelectedCols()
data = G2frame.GPXtree.GetItemPyData(IndexId)
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
G2frame.indxPeaks.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if G2frame.IndexPeaksTable.GetColLabelValue(col) in ['use',]:
if key == 89: #'Y'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col]=True
elif key == 78: #'N'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col]=False
elif key == 83: # 'S'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col] = not data[0][row][col]
if 'PWD' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.IndPeaksMenu)
G2frame.Bind(wx.EVT_MENU, OnReload, id=G2G.wxID_INDXRELOAD)
G2frame.Bind(wx.EVT_MENU, OnSave, id=G2G.wxID_INDEXSAVE)
G2frame.dataWindow.IndexPeaks.Enable(False)
G2frame.IndexPeaksTable = []
if len(data[0]):
G2frame.dataWindow.IndexPeaks.Enable(True)
Unit = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List'))
if Unit:
if len(Unit) == 4: #patch
Unit.append({})
if len(Unit) == 5: #patch
Unit.append({})
controls,bravais,cellist,dmin,ssopt,magcells = Unit
if 'T' in Inst['Type'][0]: #TOF - use other limit!
dmin = G2lat.Pos2dsp(Inst,Limits[1][0])
else:
dmin = G2lat.Pos2dsp(Inst,Limits[1][1])
G2frame.HKL = []
if ssopt.get('Use',False):
cell = controls[6:12]
A = G2lat.cell2A(cell)
ibrav = bravaisSymb.index(controls[5])
spc = controls[13]
SGData = G2spc.SpcGroup(spc)[1]
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'])[1]
Vec = ssopt['ModVec']
maxH = ssopt['maxH']
G2frame.HKL = G2pwd.getHKLMpeak(dmin,Inst,SGData,SSGData,Vec,maxH,A)
G2frame.HKL =
|
np.array(G2frame.HKL)
|
numpy.array
|
from typing import List
Vector = List[float]
Matrix2D = List[Vector]
import numpy as np
import matplotlib.pyplot as plt
from .stats import mean_squared_error
# Bibliography https://github.com/arseniyturin/SGD-From-Scratch/blob/master/Gradient%20Descent.ipynb
class LinearRegression:
def fit(self, X: Matrix2D, y: Vector) -> None:
"""
Fits the data and calculates the coefficients of the linear regression
Parameters
----------
X : Matrix2D
Data
y : Vector
Target
"""
self.X = np.asarray(X)
self.y = np.asarray(y)
X, y = self.X, self.y
if len(X.shape) == 1 or X.shape[1:] == np.ones(X.shape[1:]).all():
# Least Square Error (minimizes mean square error)
self.coeffs = ((np.mean(X) * np.mean(y) - np.mean(X*y)) / ((np.mean(X)**2) - np.mean(X**2)))
self.b = np.mean(y) - self.coeffs * np.mean(X)
self.uni_dim = True
else:
self.coeffs = np.linalg.inv(X.T @ X) @ X.T @ y
self.b = np.mean(y) - np.mean(X, axis=0) @ self.coeffs
self.uni_dim = False
def coef_(self) -> Vector:
"""
Returns the coefficients
Returns
-------
Vector
The vector of coefficients
"""
return self.coeffs
def intercept_(self) -> float:
"""Returns the intercept value
Returns:
float: Intercept value
"""
return self.b
def predict(self, X: Matrix2D = None) -> Vector:
"""Returns the predicted data once fitted
Args:
X (Matrix2D, optional): Data to predict. Defaults to None (takes the fitted data)
Returns:
Vector: The predicted data
"""
if X is None: X = self.X
if self.uni_dim: self.y_pred = X * self.coeffs + self.b
else: self.y_pred = X @ self.coeffs + self.b
return self.y_pred
def mse(self, y_real: Vector = None, y_pred: Vector = None) -> float:
"""Returns the mean squared error
Args:
y_real (Vector, optional): Real data. Defaults to None (takes the fitted data)
y_pred (Vector, optional): Predicted data. Defaults to None (takes the predicted data)
Returns:
float: Mean squared error
"""
if y_real is None: y_real = self.y
if y_pred is None: y_pred = self.y_pred
return mean_squared_error(y_real, y_pred)
def plot(self, show: bool = True, delimeters: bool = False) -> None:
"""Plots the linear regression data against the real data
Args:
show (bool, optional): This shows the plot. Defaults to True.
delimeters (bool, optional): This shows the delimeters of the surface that is plot. Defaults to False.
"""
if self.uni_dim:
plt.title('Simple Linear Regression')
plt.ylim(min(self.y), max(self.y))
plt.plot(self.X, self.y_pred, c='red')
plt.scatter(self.X, self.y, c='#325aa8', s=15)
if show: plt.show()
elif self.X.shape[1] == 2:
plt.title('Multiple Linear Regression')
ax = plt.axes(projection = '3d')
min_x = np.min(self.X, axis = 0)
max_x = np.max(self.X, axis = 0)
x_axis =
|
np.array([min_x[0],max_x[0]])
|
numpy.array
|
"""
Module for guiding Slit/Order tracing
.. _numpy.ndarray: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
import os
import inspect
import copy
import IPython
import numpy as np
from scipy import ndimage
from astropy.io import fits
from pypeit import msgs
from pypeit.core import parse, trace_slits, extract, pixels
#from pypeit.core import io
from pypeit import utils
from pypeit import masterframe
from pypeit import ginga
from pypeit.spectrographs import util
from pypeit.par import pypeitpar
class TraceSlits(masterframe.MasterFrame):
"""Class to guide slit/order tracing
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The `Spectrograph` instance that sets the instrument used to
take the observations. Used to set :attr:`spectrograph`.
par (:class:`pypeit.par.pypeitpar.TraceSlitsPar`):
The parameters used to guide slit tracing
det (:obj:`int`, optional):
The 1-indexed detector number to process.
master_key (:obj:`str`, optional):
The string identifier for the instrument configuration. See
:class:`pypeit.masterframe.MasterFrame`.
master_dir (:obj:`str`, optional):
Path to master frames.
reuse_masters (:obj:`bool`, optional):
Load master files from disk, if possible.
msbpm (`numpy.ndarray`_, optional):
Bad pixel mask. If not provided, a dummy array with no
masking is generated.
qa_path (:obj:`str`, optional):
Directory for QA output.
Attributes:
TODO: Come back to these...
frametype (str): Hard-coded to 'trace'
slit_left (ndarray [nrow, nslit]): Left edges, in physical space
slit_righ (ndarray [nrow, nslit]): Right edges, in physical space
slitpix (ndarray): Image specifying which pixels are in which slit
slitcen : ndarray [nrow, nslit]
Pixel values down the center of the slit
extrapord : ndarray
??
edgearr : ndarray
Edge image
-200000, 200000 indexing -- ??
-100000, 100000 indexing -- Edges defined but additional work in progress
-1, 1 indexing -- Edges finalized
tc_dict : dict, optional
Dict guiding multi-slit work
[left,right][xval][edge]
steps : list
List of the processing steps performed
siglev : ndarray
Sobolev filtered image of mstrace
Used to find images and used for tracing
binarr : ndarray
Uniform filter version of mstrace
Generated by make_binarr()
user_set : bool
Did the user set the slit? If so, most of the automated algorithms are skipped
lmin : int
Lowest left edge, edgearr value
lmax : int
Highest left edge, edgearr value
rmin : int
Lowest right edge, edgearr value
rmax : int
Highest right edge, edgearr value
lcnt : int
Number of left edges
rcnt : int
Number of right edges
"""
# Frametype is a class attribute
# frametype = 'trace' # For ProcessImages base
master_type = 'Trace' # For MasterFrame base
def __init__(self, spectrograph, par, binning=None, det=1, master_key=None, master_dir=None,
reuse_masters=False, qa_path=None, msbpm=None):
# MasterFrame
masterframe.MasterFrame.__init__(self, self.master_type, master_dir=master_dir,
master_key=master_key, reuse_masters=reuse_masters)
# Required parameters
self.spectrograph = spectrograph
self.par = par
self.binning = None
self.mstrace = None
self.msbpm = msbpm
# Optional parameters
self.qa_path = qa_path
self.det = det
self.ednum = 100000
# Main outputs
self.slit_left = None # narray
self.slit_righ = None # narray
self.tc_dict = None # dict
self.edgearr = None # ndarray
self.siglev = None # ndarray
self.steps = []
self.extrapord = None
self.slitcen = None
self.slitpix = None
self.slit_left_tweak = None # Tweaked slit boundaries from flat fielding routine.
self.slit_righ_tweak = None
# Key Internals
# if mstrace is not None:
# self.binarr = self._make_binarr()
self.binarr = None
self.user_set = None
self.lmin = None
self.lmax = None
self.rmin = None
self.rmax = None
self.lcnt = None
self.rcnt = None
self.lcoeff = None
# Fitting
self.lnmbrarr = None
self.ldiffarr = None
self.lwghtarr = None
self.rcoeff = None
self.rnmbrarr = None
self.rdiffarr = None
self.rwghtarr = None
@property
def nslit(self):
"""
Returns:
int: Number of slits currently identified
"""
return 0 if self.slit_left is None else self.slit_left.shape[1]
def _edgearr_from_binarr(self):
"""
Generate the first edgearr from the Sobolev produced siglev image
Wrapper to trace_slits.edgearr_from_binarr
:attr:`siglev` : ndarray (internal)
:attr:`edgearr` : ndarray (internal)
"""
self.siglev, self.edgearr \
= trace_slits.edgearr_from_binarr(self.binarr, self.msbpm,
medrep=self.par['medrep'],
sobel_mode=self.par['sobel_mode'],
sigdetect=self.par['sigdetect'])
#number_slits=self.par['number'])
# Step
self.steps.append(inspect.stack()[0][3])
def _add_left_right(self):
"""
Add left/right edges to edgearr
Wrapper to trace_slits.edgearr_add_left_right()
If 0 is returned for both counts, this detector will be skipped
Returns:
bool: If True, at least one slit was added
self.edgearr : ndarray (internal)
self.lcnt : int (internal)
self.rcnt : int (internal)
"""
self.edgearr, self.lcnt, self.rcnt = trace_slits.edgearr_add_left_right(
self.edgearr, self.binarr, self.msbpm, self.lcnt, self.rcnt, self.ednum)
# Check on return
if (self.lcnt == 0) and (self.rcnt == 0):
any_slits = False
else:
any_slits = True
# Step
self.steps.append(inspect.stack()[0][3])
return any_slits
def add_user_slits(self, user_slits):
"""
Add user-defined slit(s)
Wrapper to trace_slits.add_user_edges()
self.slit_left and self.slit_righ modified in place
Args:
user_slits (list):
"""
# Reset (if needed) -- For running after PyepIt took a first pass
#self.reset_edgearr_ednum()
# Add user input slits
self.slit_left, self.slit_righ \
= trace_slits.add_user_edges(self.slit_left, self.slit_righ, user_slits)
# Step
self.steps.append(inspect.stack()[0][3])
def _chk_for_longslit(self, fwhm=3.):
"""
Are we done?, i.e. we have a simple longslit, i.e. one left and one right
Args:
fwhm (float, optional):
Returns:
bool: True = longslit only
"""
# TODO: Why is this here?
orig = False
if orig:
if (self.lmax+1-self.lmin == 1) and (self.rmax+1-self.rmin == 1):
plxbin = self.pixlocn[:, :, 0].copy()
minvf, maxvf = plxbin[0, 0], plxbin[-1, 0]
# Just a single order has been identified (i.e. probably longslit)
msgs.info("Only one slit was identified. Should be a longslit.")
xint = self.pixlocn[:, 0, 0]
# Finish
self.slit_left = np.zeros((self.mstrace.shape[0], 1))
self.slit_righ = np.zeros((self.mstrace.shape[0], 1))
self.slit_left[:, 0] = utils.func_val(self.lcoeff[:, 0], xint,
self.par['function'], minx=minvf, maxx=maxvf)
self.slit_righ[:, 0] = utils.func_val(self.rcoeff[:, 0], xint,
self.par['function'], minx=minvf, maxx=maxvf)
return True
return False
if len(self.tc_dict['left']['xval']) == 1 and len(self.tc_dict['right']['xval']) == 1:
# fweight the trace crude
for key,sign in zip(['left','right'], [1., -1.]):
trace_crutch = self.tc_dict[key]['xset']
trace_fweight = extract.iter_tracefit(np.fmax(sign*self.siglev, 0.0), trace_crutch,
self.par['trace_npoly'], fwhm=3.0*fwhm,
niter=9)[0]
trace_gweight = extract.iter_tracefit(np.fmax(sign*self.siglev, 0.0),
trace_fweight, self.par['trace_npoly'],
fwhm=fwhm, gweight=True, niter=6)[0]
self.tc_dict[key]['traces'] = trace_gweight
return True
return False
def _fill_tslits_dict(self):
"""
Build a simple dictionary holding the key trace bits and pieces that PypeIt wants
Returns:
dict: Trace slits dict
"""
self.tslits_dict = {}
# Have the slit boundaries been tweaked? If so use the tweaked
# boundaries. TODO: Have the dict keys have the same name as
# the attribute
self.tslits_dict['slit_left_orig'] = self.slit_left
self.tslits_dict['slit_righ_orig'] = self.slit_righ
if self.slit_left_tweak is not None:
self.tslits_dict['slit_left_tweak'] = self.slit_left_tweak
self.tslits_dict['slit_righ_tweak'] = self.slit_righ_tweak
self.tslits_dict['slit_left'] = self.slit_left_tweak
self.tslits_dict['slit_righ'] = self.slit_righ_tweak
else:
self.tslits_dict['slit_left'] = self.slit_left
self.tslits_dict['slit_righ'] = self.slit_righ
# Fill in the rest of the keys that were generated by
# make_pixel_arrays from the slit boundaries. This was done with
# tweaked boundaries if they exist. TODO: Some of these
# quantities may be deprecated.
#for key in ['slitcen', 'pixwid', 'lordpix','rordpix', 'extrapord']:
# self.tslits_dict[key] = getattr(self, key)
# add in the image size and some stuff to create the slitmask
self.tslits_dict['maskslits'] = self.maskslits
self.tslits_dict['slitcen'] = self.slitcen
self.tslits_dict['nspec'] = self.mstrace.shape[0]
self.tslits_dict['nspat'] = self.mstrace.shape[1]
self.tslits_dict['nslits'] = self.slit_left.shape[1]
self.tslits_dict['pad'] = self.par['pad']
binspectral, binspatial = parse.parse_binning(self.binning)
self.tslits_dict['binspectral'] = binspectral
self.tslits_dict['binspatial'] = binspatial
self.tslits_dict['spectrograph'] = self.spectrograph.spectrograph
self.tslits_dict['spec_min'], self.tslits_dict['spec_max'] = \
self.spectrograph.slit_minmax(self.tslits_dict['nslits'], binspectral = binspectral)
return self.tslits_dict
def _final_left_right(self):
"""
Last check on left/right edges
Wrapper to trace_slits.edgearr_final_left_right()
These are modified:
self.edgearr : ndarray (internal)
self.lcnt : int (internal)
self.rcnt : int (internal)
Returns:
"""
# Final left/right edgearr fussing (as needed)
self.edgearr, self.lcnt, self.rcnt = trace_slits.edgearr_final_left_right(
self.edgearr, self.ednum, self.siglev)
# Steps
self.steps.append(inspect.stack()[0][3])
def _make_pixel_arrays(self):
"""
Generate pixel arrays
Primarily for later stages of PypeIt
Modified internally:
self.slitcen
self.slitpix
Returns:
"""
if self.slit_left_tweak is not None:
msgs.info("Using tweaked slit boundaries determined from IllumFlat")
slit_left = self.slit_left_tweak
slit_righ = self.slit_righ_tweak
else:
slit_left = self.slit_left
slit_righ = self.slit_righ
# Convert physical traces into a pixel trace
msgs.info("Converting physical trace locations to nearest pixel")
# Notice that the slitcen is always defined relative to the
# untweaked boundaries. This guarantees that the reference
# location along the slit that we use for wavelength calibration
# and for tilts is always at a fixed unchanging location.
self.slitcen = 0.5*(self.slit_left+self.slit_righ)
#self.pixwid = (slit_righ-slit_left).mean(0).astype(np.int)
def _make_binarr(self):
"""
Lightly process mstrace
Returns:
ndarray: The processed image
"""
# Only filter in the spectral dimension, not spatial!
self.binarr = ndimage.uniform_filter(self.mstrace, size=(3, 1), mode='mirror')
# Step
self.steps.append(inspect.stack()[0][3])
return self.binarr
def _match_edges(self):
"""
# Assign a number to each edge 'grouping'
Wrapper to trace_slits.match_edges()
Modified internally:
self.edgearr : ndarray (internal)
self.lcnt : int (intenal)
self.rcnt: int (intenal)
Returns:
"""
self.lcnt, self.rcnt = trace_slits.match_edges(self.edgearr, self.ednum)
# Sanity check (unlikely we will ever hit this)
if self.lcnt >= self.ednum or self.rcnt >= self.ednum:
msgs.error("Found more edges than allowed by ednum. Set ednum to a larger number.")
# Step
self.steps.append(inspect.stack()[0][3])
def _maxgap_prep(self):
"""
First step in the maxgap algorithm
Likely to be Deprecated
Returns
-------
self.edgearr : ndarray (internal)
self.edgearrcp : ndarray (internal)
"""
self.edgearrcp = self.edgearr.copy()
self.edgearr[np.where(self.edgearr < 0)] += 1 + np.max(self.edgearr) -
|
np.min(self.edgearr)
|
numpy.min
|
# modified from: https://github.com/sgherbst/hslink-emu/blob/master/msemu/rf.py
import logging
import sys
import numpy as np
from math import log2, ceil
from scipy.interpolate import interp1d
from scipy.fftpack import ifft
from scipy.integrate import cumtrapz
from skrf import Network
def s2sdd(s):
""" Converts a 4-port single-ended S-parameter matrix
to a 2-port differential mode representation.
Reference: https://www.aesa-cortaillod.com/fileadmin/documents/knowledge/AN_150421_E_Single_ended_S_Parameters.pdf
"""
sdd = np.zeros((2, 2), dtype=np.complex128)
sdd[0, 0] = 0.5*(s[0, 0] - s[0, 2] - s[2, 0] + s[2, 2])
sdd[0, 1] = 0.5*(s[0, 1] - s[0, 3] - s[2, 1] + s[2, 3])
sdd[1, 0] = 0.5*(s[1, 0] - s[1, 2] - s[3, 0] + s[3, 2])
sdd[1, 1] = 0.5*(s[1, 1] - s[1, 3] - s[3, 1] + s[3, 3])
return sdd
def s2tf(s, zo, zs, zl):
""" Converts a two-port S-parameter matrix to a transfer function,
given characteristic impedance, input impedance, and output
impedance.
Reference: https://www.mathworks.com/help/rf/ug/s2tf.html
"""
gamma_l = (zl-zo)/(zl+zo)
gamma_s = (zs-zo)/(zs+zo)
gamma_in = s[0,0]+(s[0,1]*s[1,0]*gamma_l/(1-s[1,1]*gamma_l))
tf = ((zs + np.conj(zs))/np.conj(zs))*(s[1,0]*(1+gamma_l)*(1-gamma_s))/(2*(1-s[1,1]*gamma_l)*(1-gamma_in*gamma_s))
return tf
def is_mostly_real(v, ratio=1e-6):
return np.all(np.abs(np.imag(v)/np.real(v)) < ratio)
def get_impulse(f, tf, dt, T):
""" Calculates the impulse response, given a single-sided transfer function.
f should be non-negative and increasing. See https://www.overleaf.com/read/mxxtgdvkmkvt
"""
# calculate number of time points in impulse response
n_req = round(T/dt)
logging.debug('Number of time points requested: {}'.format(n_req))
# calculate number of IFFT points
n = 1<<int(ceil(log2(n_req)))
logging.debug('Number of IFFT points: {}'.format(n))
# calculate frequency spacing
df = 1/(n*dt)
# copy f and tf vectors so they can be modified
f = f.copy()
tf = tf.copy()
# make sure that the DC component is real if present
if f[0] == 0:
logging.debug('Removing imaginary part of tf[0]')
assert is_mostly_real(tf[0])
tf[0] = tf[0].real
# calculate magnitude and phase
ma = np.abs(tf)
ph = np.unwrap(np.angle(tf))
# add DC component if necessary
if f[0] != 0:
logging.debug('Adding point f[0]=0, tf[0]=abs(tf[1])')
f = np.concatenate(([0], f))
ma = np.concatenate(([ma[0]], ma))
ph =
|
np.concatenate(([0], ph))
|
numpy.concatenate
|
from __future__ import print_function
import mysql.connector
import sys
import sqlalchemy as sqla
import numpy as np
import pandas as pd
import tensorflow as tf
from pprint import pprint
from time import strftime
from data import connect
'''
Features: OHLCVAX-LR + MA-LR(8*2) + SH & SZ indices-LR((6+8*2)*2) (7+16+44=67)
Label format: Scalar
'''
TIME_SHIFT = 9
nclsQry = (
"SELECT "
" COUNT(*) "
"FROM "
" (SELECT DISTINCT "
" score "
" FROM "
" kpts) t"
)
ftQuery = (
"SELECT "
" d.lr, "
" d.lr_h, "
" d.lr_o, "
" d.lr_l, "
" d.lr_vol, "
" d.lr_amt, "
" d.lr_xr, "
" d.lr_ma5, "
" d.lr_ma10, "
" d.lr_ma20, "
" d.lr_ma30, "
" d.lr_ma60, "
" d.lr_ma120, "
" d.lr_ma200, "
" d.lr_ma250, "
" d.lr_vol5, "
" d.lr_vol10, "
" d.lr_vol20, "
" d.lr_vol30, "
" d.lr_vol60, "
" d.lr_vol120, "
" d.lr_vol200, "
" d.lr_vol250, "
" COALESCE(sh.lr,0) sh_lr, "
" COALESCE(sh.lr_h,0) sh_lr_h, "
" COALESCE(sh.lr_o,0) sh_lr_o, "
" COALESCE(sh.lr_l,0) sh_lr_l, "
" COALESCE(sh.lr_vol,0) sh_lr_vol, "
" COALESCE(sh.lr_amt,0) sh_lr_amt, "
" COALESCE(sh.lr_ma5,0) sh_lr_ma5, "
" COALESCE(sh.lr_ma10,0) sh_lr_ma10, "
" COALESCE(sh.lr_ma20,0) sh_lr_ma20, "
" COALESCE(sh.lr_ma30,0) sh_lr_ma30, "
" COALESCE(sh.lr_ma60,0) sh_lr_ma60, "
" COALESCE(sh.lr_ma120,0) sh_lr_ma120, "
" COALESCE(sh.lr_ma200,0) sh_lr_ma200, "
" COALESCE(sh.lr_ma250,0) sh_lr_ma250, "
" COALESCE(sh.lr_vol5,0) sh_lr_vol5, "
" COALESCE(sh.lr_vol10,0) sh_lr_vol10, "
" COALESCE(sh.lr_vol20,0) sh_lr_vol20, "
" COALESCE(sh.lr_vol30,0) sh_lr_vol30, "
" COALESCE(sh.lr_vol60,0) sh_lr_vol60, "
" COALESCE(sh.lr_vol120,0) sh_lr_vol120, "
" COALESCE(sh.lr_vol200,0) sh_lr_vol200, "
" COALESCE(sh.lr_vol250,0) sh_lr_vol250, "
" COALESCE(sz.lr,0) sz_lr, "
" COALESCE(sz.lr_h,0) sz_h, "
" COALESCE(sz.lr_o,0) sz_o, "
" COALESCE(sz.lr_l,0) sz_l, "
" COALESCE(sz.lr_vol,0) sz_vol, "
" COALESCE(sz.lr_amt,0) sz_lr_amt, "
" COALESCE(sz.lr_ma5,0) sz_lr_ma5, "
" COALESCE(sz.lr_ma10,0) sz_lr_ma10, "
" COALESCE(sz.lr_ma20,0) sz_lr_ma20, "
" COALESCE(sz.lr_ma30,0) sz_lr_ma30, "
" COALESCE(sz.lr_ma60,0) sz_lr_ma60, "
" COALESCE(sz.lr_ma120,0) sz_lr_ma120, "
" COALESCE(sz.lr_ma200,0) sz_lr_ma200, "
" COALESCE(sz.lr_ma250,0) sz_lr_ma250, "
" COALESCE(sz.lr_vol5,0) sz_lr_vol5, "
" COALESCE(sz.lr_vol10,0) sz_lr_vol10, "
" COALESCE(sz.lr_vol20,0) sz_lr_vol20, "
" COALESCE(sz.lr_vol30,0) sz_lr_vol30, "
" COALESCE(sz.lr_vol60,0) sz_lr_vol60, "
" COALESCE(sz.lr_vol120,0) sz_lr_vol120, "
" COALESCE(sz.lr_vol200,0) sz_lr_vol200, "
" COALESCE(sz.lr_vol250,0) sz_lr_vol250 "
"FROM "
" kline_d d "
" LEFT OUTER JOIN "
" (SELECT "
" lr, lr_h, lr_o, lr_l, lr_vol, lr_amt, "
" lr_ma5, lr_ma10, lr_ma20, lr_ma30, lr_ma60, lr_ma120, lr_ma200, lr_ma250, "
" lr_vol5, lr_vol10, lr_vol20, lr_vol30, lr_vol60, lr_vol120, lr_vol200, lr_vol250, "
" date "
" FROM "
" kline_d "
" WHERE "
" code = 'sh000001') sh USING (date) "
" LEFT OUTER JOIN "
" (SELECT "
" lr, lr_h, lr_o, lr_l, lr_vol, lr_amt, "
" lr_ma5, lr_ma10, lr_ma20, lr_ma30, lr_ma60, lr_ma120, lr_ma200, lr_ma250, "
" lr_vol5, lr_vol10, lr_vol20, lr_vol30, lr_vol60, lr_vol120, lr_vol200, lr_vol250, "
" date "
" FROM "
" kline_d "
" WHERE "
" code = 'sz399001') sz USING (date) "
"WHERE "
" d.code = %s "
" AND d.klid BETWEEN %s AND %s "
"ORDER BY klid "
"LIMIT %s "
)
def loadTestSet(max_step):
cnx = connect()
try:
nc_cursor = cnx.cursor(buffered=True)
nc_cursor.execute(nclsQry)
row = nc_cursor.fetchone()
nclass = int(row[0])
print('{} num class: {}'.format(strftime("%H:%M:%S"), nclass))
nc_cursor.close()
cursor = cnx.cursor(buffered=True)
pick = (
"SELECT "
" distinct flag "
"FROM "
" kpts "
"WHERE "
" flag LIKE 'TEST\\_%' "
"ORDER BY RAND() "
"LIMIT 1"
)
cursor.execute(pick)
row = cursor.fetchone()
print('{} selected test set: {}'.format(strftime("%H:%M:%S"), row[0]))
query = (
"SELECT "
" uuid, code, klid, score "
"FROM "
" kpts "
"WHERE "
" flag = '{}' "
)
cursor.execute(query.format(row[0]))
kpts = cursor.fetchall()
cursor.close()
data = [] # [batch, max_step, feature*time_shift]
labels = [] # [batch] scalar labels
seqlen = [] # [batch] scalar sequence length
uuids = []
for (uuid, code, klid, score) in kpts:
uuids.append(uuid)
labels.append(score)
s = max(0, klid-max_step+1-TIME_SHIFT)
batch, total = getBatch(cnx, code, s, klid, max_step)
data.append(batch)
seqlen.append(total)
return uuids, np.array(data), np.array(labels), np.array(seqlen), nclass
except:
print(sys.exc_info()[0])
raise
finally:
cnx.close()
def getBatch(cnx, code, s, e, max_step):
'''
[max_step, feature*time_shift], length
'''
fcursor = cnx.cursor(buffered=True)
try:
fcursor.execute(ftQuery, (code, s, e, max_step+TIME_SHIFT))
featSize = len(fcursor.column_names)
total = fcursor.rowcount
rows = fcursor.fetchall()
batch = []
for t in range(TIME_SHIFT+1):
steps = np.zeros((max_step, featSize), dtype='f')
offset = max_step + TIME_SHIFT - total
s = max(0, t - offset)
e = total - TIME_SHIFT + t
for i, row in enumerate(rows[s:e]):
steps[i+offset] = [col for col in row]
batch.append(steps)
return np.concatenate(batch, 1), total - TIME_SHIFT
except:
print(sys.exc_info()[0])
raise
finally:
fcursor.close()
def loadTrainingData(batch_no, max_step):
cnx = connect()
try:
nc_cursor = cnx.cursor(buffered=True)
nc_cursor.execute(nclsQry)
row = nc_cursor.fetchone()
nclass = int(row[0])
nc_cursor.close()
cursor = cnx.cursor(buffered=True)
query = (
'SELECT '
' uuid, code, klid, score '
'FROM'
' kpts '
'WHERE '
" flag = 'TRN_{}'"
)
# print(query)
cursor.execute(query.format(batch_no))
kpts = cursor.fetchall()
cursor.close()
data = [] # [batch, max_step, feature*time_shift]
labels = [] # [batch] scalar labels
seqlen = [] # [batch] scalar sequence lengths
uuids = []
for (uuid, code, klid, score) in kpts:
uuids.append(uuid)
labels.append(score)
s = max(0, klid-max_step+1-TIME_SHIFT)
batch, total = getBatch(cnx, code, s, klid, max_step)
data.append(batch)
seqlen.append(total)
# pprint(data)
# print("\n")
# pprint(len(labels))
return uuids,
|
np.array(data)
|
numpy.array
|
'''
(c) University of Liverpool 2019
All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=no-member
# pylint: disable=wrong-import-order
import sys
from keras.utils import to_categorical
from sklearn import metrics, model_selection
from sklearn.metrics import confusion_matrix
from liv_learn.keras import Classifier
import numpy as np
import pandas as pd
def get_data(filename):
'''Gets data.'''
df = pd.read_csv(filename, sep='\t')
x_data = np.array(liv_learn.get_aa_props(df['seq'].tolist()))
return (x_data, df['bin'])
def classify(x_data, y_data):
'''Runs the classify method.'''
y_data = to_categorical(y_data, num_classes=len(set(y_data)))
x_train, x_test, y_train, y_test = \
model_selection.train_test_split(x_data, y_data, test_size=0.05)
classifier = Classifier(x_train, y_train)
classifier.train(learn_rate=0.001, epochs=200)
y_pred = classifier.predict(x_test)
y_pred = np.array([[round(val) for val in pred] for pred in y_pred])
print(confusion_matrix([np.argmax(y) for y in y_test],
[np.argmax(y) for y in y_pred]))
print(metrics.accuracy_score(y_test, y_pred))
def regression(x_data, y_data):
'''Runs the regression method.'''
x_train, x_test, y_train, y_test = \
model_selection.train_test_split(x_data, y_data, test_size=0.05)
classifier = Classifier(x_train, y_train)
classifier.train(learn_rate=0.001, epochs=200)
y_pred = classifier.predict(x_test)
y_pred = np.array([[round(val) for val in pred] for pred in y_pred])
print(confusion_matrix([
|
np.argmax(y)
|
numpy.argmax
|
"""
{This script compares the cumulative luminosity function used for AM in Victor's
mock-making script and the data I use to compare with mocks now.}
"""
from cosmo_utils.utils import file_readers as freader
from cosmo_utils.utils import work_paths as cwpaths
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
import numpy as np
import math
def read_data(path_to_file, survey):
"""
Reads survey catalog from file
Parameters
----------
path_to_file: `string`
Path to survey catalog file
survey: `string`
Name of survey
Returns
---------
catl: `pandas.DataFrame`
Survey catalog with grpcz, abs rmag and stellar mass limits
volume: `float`
Volume of survey
cvar: `float`
Cosmic variance of survey
z_median: `float`
Median redshift of survey
"""
if survey == 'eco':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s',
'fc', 'grpmb', 'grpms']
# 13878 galaxies
eco_buff = pd.read_csv(path_to_file,delimiter=",", header=0, \
usecols=columns)
# 6456 galaxies
catl = eco_buff.loc[(eco_buff.cz.values >= 3000) & \
(eco_buff.cz.values <= 7000) & (eco_buff.absrmag.values <= -17.33)
& (eco_buff.absrmag.values >= -23.5)]
volume = 192351.36 # Survey volume with buffer [Mpc/h]^3
# volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
cvar = 0.125
z_median = np.median(catl.grpcz.values) / (3 * 10**5)
return catl,volume,cvar,z_median
def cumu_num_dens(data, weights, volume, bool_mag):
if weights is None:
weights = np.ones(len(data))
else:
weights = np.array(weights)
#Unnormalized histogram and bin edges
data += 0.775 #change mags from h=0.7 to h=1
bins = np.arange(math.floor(data.min()), math.ceil(data.max()), 0.2)
freq,edg = np.histogram(data,bins=bins,weights=weights)
bin_centers = 0.5*(edg[1:]+edg[:-1])
bin_width = edg[1] - edg[0]
if not bool_mag:
N_cumu = np.cumsum(freq[::-1])[::-1]
else:
N_cumu = np.cumsum(freq)
n_cumu = N_cumu/volume
err_poiss = np.sqrt(N_cumu)/volume
return bin_centers,edg,n_cumu,err_poiss,bin_width
def Schechter_func(M,phi_star,M_star,alpha):
const = 0.4*np.log(10)*phi_star
first_exp_term = 10**(0.4*(alpha+1)*(M_star-M))
second_exp_term = np.exp(-10**(0.4*(M_star-M)))
return const*first_exp_term*second_exp_term
__author__ = '{<NAME>}'
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_data = dict_of_paths['data_dir']
path_to_raw = dict_of_paths['raw_dir']
survey = 'eco'
# Path to files
catl_current = path_to_raw + 'eco/eco_all.csv'
catl_katie_mocks = path_to_raw + 'gal_Lr_Mb_Re.txt'
catl_vc_mocks = path_to_raw + 'eco_wresa_050815.dat'
weights_vc_mocks = path_to_raw + 'eco_wresa_050815_weightmuiso.dat'
# * NOTE: volume_current is without the buffer
catl_current, volume_current, cvar, z_median = read_data(catl_current, survey)
mr_current = catl_current.absrmag.values
vc_data = pd.DataFrame(freader.IDL_read_file(catl_vc_mocks))
vc_weights = pd.DataFrame(freader.IDL_read_file(weights_vc_mocks))
volume_vc = 192294.221932 #(Mpc/h)^3
mr_vc = np.array(vc_data['goodnewabsr'])
vc_weights =
|
np.array(vc_weights['corrvecinterp'])
|
numpy.array
|
import numpy as np
import os
import shutil
import tensorflow as tf
from input_utils import input_fn, predict_input_fn
from models import TrASenD
HHAR_DATA_FOLDER_PATH = "/path/to/hhar/data/dir"
MODEL_DIR_PATH = "/path/to/model/dir"
if not os.path.exists(MODEL_DIR_PATH):
os.mkdir(MODEL_DIR_PATH)
# Model Parameters
num_sensors = 2
batch_size = 64
hyper_params = {"learning_rate": 1e-3,
"beta1": 0.5,
"beta2": 0.9,
"l2_lambda_term": 5e-4}
num_output_classes = 6
trasend_model = TrASenD(num_sensors, num_output_classes, hyper_params)
f1_scores = []
for user_data_folder in os.listdir(HHAR_DATA_FOLDER_PATH):
if user_data_folder.startswith("."):
continue
full_user_data_folder = os.path.join(HHAR_DATA_FOLDER_PATH, user_data_folder)
current_user = user_data_folder.split("_")[1]
print("----- Training and evaluating for User:", current_user)
training_data_folder = os.path.join(full_user_data_folder, "train")
eval_data_folder = os.path.join(full_user_data_folder, "eval")
current_user_model_dir = os.path.join(MODEL_DIR_PATH, "user_{}_model_dir".format(current_user))
if os.path.exists(current_user_model_dir):
shutil.rmtree(current_user_model_dir)
os.mkdir(current_user_model_dir)
# Create TensorFlow estimator
trasend_estimator = tf.estimator.Estimator(
model_fn = trasend_model.get_model_function(),
model_dir = current_user_model_dir,
params = None)
# Train & Evaluate
best_f1_score = 0
for epoch in range(30):
# Train
training_input_function = lambda: input_fn(training_data_folder,
batch_size,
num_sensors,
num_output_classes,
True)
trasend_estimator.train(training_input_function)
# Eval
eval_input_function = lambda: input_fn(eval_data_folder,
batch_size,
num_sensors,
num_output_classes,
False)
eval_result = trasend_estimator.evaluate(eval_input_function)
cm = eval_result["conf_matrix"]
# Calculate F1-score from confusion matrix
TP = np.diag(cm)
FP = np.sum(cm, axis=0) - TP
FN = np.sum(cm, axis=1) - TP
num_classes = cm.shape[0]
TN = []
for i in range(num_classes):
temp = np.delete(cm, i, 0) # delete ith row
temp = np.delete(temp, i, 1) # delete ith column
TN.append(sum(sum(temp)))
precision = TP/((TP+FP)+0.01)
recall = TP/((TP+FN)+0.01)
precision = np.clip(precision, 0, 1)
recall =
|
np.clip(recall, 0, 1)
|
numpy.clip
|
import numpy as np
from scipy.spatial.distance import correlation as _correlation
np.seterr(divide='ignore', invalid='ignore')
def minkowski(x1, x2, power):
"""Minkowski Distance Metric
Parameters
----------
x1: numpy.ndarray
Vector one
x2: numpy.ndarray
Vector two
power: int
L_{power} norm order
Returns
-------
distance: float
Minkowski distance between `x1` and `x2`
"""
return np.linalg.norm(x1 - x2, power)
def cosine(x1, x2):
"""Cosine Distance Metric
Parameters
----------
x1: numpy.ndarray
Vector one
x2: numpy.ndarray
Vector two
Returns
-------
distance: float
Cosine distance between `x1` and `x2`
"""
return np.dot(x1.T, x2) / (np.linalg.norm(x1, 2) * np.linalg.norm(x2, 2))
def chisquare(x1, x2):
"""Chi-Square Distance Metric
Parameters
----------
x1: numpy.ndarray
Vector one
x2: numpy.ndarray
Vector two
Returns
-------
distance: float
Chi-Square distance between `x1` and `x2`
"""
return
|
np.sum((x1 - x2)**2 / (x1 + x2))
|
numpy.sum
|
"""
Generate figures for the DeepCytometer paper for v7 of the pipeline.
Partly deprecated by klf14_b6ntac_exp_0110_paper_figures_v8.py:
* Some figures have been updated to have v8 of the pipeline in the paper.
Code cannibalised from:
* klf14_b6ntac_exp_0097_full_slide_pipeline_v7.py
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# script name to identify this experiment
experiment_id = 'klf14_b6ntac_exp_0099_paper_figures'
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
# json_annotation_files_dict here needs to have the same files as in
# klf14_b6ntac_exp_0098_full_slide_size_analysis_v7.py
# SQWAT: list of annotation files
json_annotation_files_dict = {}
json_annotation_files_dict['sqwat'] = [
'KLF14-B6NTAC 36.1d PAT 99-16 C1 - 2016-02-11 11.48.31.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46.json',
'KLF14-B6NTAC-MAT-17.1a 44-16 C1 - 2016-02-01 11.14.17.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 C1 - 2016-02-01 16.27.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 C1 - 2016-02-03 09.10.17.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 C1 - 2016-03-15 17.15.41.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 C1 - 2016-02-02 14.32.03.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 C1 - 2016-02-04 10.24.22.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 C1 - 2016-02-04 16.15.05.json',
'KLF14-B6NTAC 37.1a PAT 106-16 C1 - 2016-02-12 16.21.00.json',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06.json',
# 'KLF14-B6NTAC-PAT-37.2d 411-16 C1 - 2016-03-15 12.42.26.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 C1 - 2016-02-04 09.17.52.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 C1 - 2016-02-18 10.28.27.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 C1 - 2016-02-01 15.25.53.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 C1 - 2016-02-18 09.19.26.json',
'KLF14-B6NTAC 36.1g PAT 102-16 C1 - 2016-02-11 17.20.14.json',
'KLF14-B6NTAC-37.1g PAT 112-16 C1 - 2016-02-16 13.33.09.json',
'KLF14-B6NTAC-38.1e PAT 94-16 C1 - 2016-02-10 12.13.10.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 C1 - 2016-02-03 15.46.15.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 C1 - 2016-02-02 09.59.16.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 C1 - 2016-02-18 17.03.38.json',
'KLF14-B6NTAC-MAT-18.1f 55-16 C1 - 2016-02-02 16.14.30.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 C1 - 2016-03-15 14.37.55.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 C1 - 2016-02-17 14.51.18.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32.json',
'KLF14-B6NTAC 36.1e PAT 100-16 C1 - 2016-02-11 14.06.56.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 C1 - 2016-02-02 12.26.58.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52.json',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 C1 - 2016-03-17 14.33.38.json',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 C1 - 2016-02-03 14.19.35.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 C1 - 2016-02-25 15.13.00.json',
'KLF14-B6NTAC-PAT-37.2a 406-16 C1 - 2016-03-14 12.01.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 C1 - 2016-02-12 14.33.33.json',
'KLF14-B6NTAC-37.1b PAT 107-16 C1 - 2016-02-15 11.43.31.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 C1 - 2016-02-18 11.48.16.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 C1 - 2016-02-04 12.34.32.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 C1 - 2016-02-18 13.12.09.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 C1 - 2016-03-15 15.54.12.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 C1 - 2016-02-02 17.23.31.json',
'KLF14-B6NTAC-37.1h PAT 113-16 C1 - 2016-02-16 15.14.09.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52.json',
'KLF14-B6NTAC-37.1e PAT 110-16 C1 - 2016-02-15 17.33.11.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54.json',
'KLF14-B6NTAC 36.1h PAT 103-16 C1 - 2016-02-12 10.15.22.json',
# 'KLF14-B6NTAC-PAT-39.1h 453-16 C1 - 2016-03-17 11.38.04.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 C1 - 2016-02-17 12.49.00.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 C1 - 2016-02-01 17.51.46.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 C1 - 2016-02-17 11.46.42.json',
'KLF14-B6NTAC-38.1f PAT 95-16 C1 - 2016-02-10 14.41.44.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 C1 - 2016-03-15 10.18.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 C1 - 2016-02-18 15.41.38.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 C1 - 2016-02-01 12.23.50.json',
'KLF14-B6NTAC 36.1f PAT 101-16 C1 - 2016-02-11 15.23.06.json',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33.json',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 C1 - 2016-02-03 11.56.52.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 C1 - 2016-03-14 10.58.34.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 C1 - 2016-03-14 16.23.30.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 C1 - 2016-02-25 14.00.14.json',
# 'KLF14-B6NTAC-PAT-37.2c 407-16 C1 - 2016-03-14 14.13.54.json',
# 'KLF14-B6NTAC-PAT-37.2b 410-16 C1 - 2016-03-15 11.24.20.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 C1 - 2016-03-17 10.22.54.json',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41.json',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 C1 - 2016-03-16 17.01.17.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52.json',
'KLF14-B6NTAC-37.1f PAT 111-16 C2 - 2016-02-16 11.26 (1).json',
'KLF14-B6NTAC-PAT 37.2b 410-16 C4 - 2020-02-14 10.27.23.json',
'KLF14-B6NTAC-PAT 37.2c 407-16 C4 - 2020-02-14 10.15.57.json',
# 'KLF14-B6NTAC-PAT 37.2d 411-16 C4 - 2020-02-14 10.34.10.json'
]
# GWAT: list of annotation files
json_annotation_files_dict['gwat'] = [
'KLF14-B6NTAC-36.1a PAT 96-16 B1 - 2016-02-10 15.32.31.json',
'KLF14-B6NTAC-36.1b PAT 97-16 B1 - 2016-02-10 17.15.16.json',
'KLF14-B6NTAC-36.1c PAT 98-16 B1 - 2016-02-10 18.32.40.json',
'KLF14-B6NTAC 36.1d PAT 99-16 B1 - 2016-02-11 11.29.55.json',
'KLF14-B6NTAC 36.1e PAT 100-16 B1 - 2016-02-11 12.51.11.json',
'KLF14-B6NTAC 36.1f PAT 101-16 B1 - 2016-02-11 14.57.03.json',
'KLF14-B6NTAC 36.1g PAT 102-16 B1 - 2016-02-11 16.12.01.json',
'KLF14-B6NTAC 36.1h PAT 103-16 B1 - 2016-02-12 09.51.08.json',
# 'KLF14-B6NTAC 36.1i PAT 104-16 B1 - 2016-02-12 11.37.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 B1 - 2016-02-12 14.08.19.json',
'KLF14-B6NTAC 37.1a PAT 106-16 B1 - 2016-02-12 15.33.02.json',
'KLF14-B6NTAC-37.1b PAT 107-16 B1 - 2016-02-15 11.25.20.json',
'KLF14-B6NTAC-37.1c PAT 108-16 B1 - 2016-02-15 12.33.10.json',
'KLF14-B6NTAC-37.1d PAT 109-16 B1 - 2016-02-15 15.03.44.json',
'KLF14-B6NTAC-37.1e PAT 110-16 B1 - 2016-02-15 16.16.06.json',
'KLF14-B6NTAC-37.1g PAT 112-16 B1 - 2016-02-16 12.02.07.json',
'KLF14-B6NTAC-37.1h PAT 113-16 B1 - 2016-02-16 14.53.02.json',
'KLF14-B6NTAC-38.1e PAT 94-16 B1 - 2016-02-10 11.35.53.json',
'KLF14-B6NTAC-38.1f PAT 95-16 B1 - 2016-02-10 14.16.55.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 B1 - 2016-02-17 11.21.54.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 B1 - 2016-02-17 12.33.18.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 B1 - 2016-02-17 14.01.06.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 B1 - 2016-02-17 15.43.57.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 B1 - 2016-02-17 17.14.16.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 B1 - 2016-02-18 10.05.52.json',
# 'KLF14-B6NTAC-MAT-17.1a 44-16 B1 - 2016-02-01 09.19.20.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 B1 - 2016-02-01 12.05.15.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 B1 - 2016-02-01 13.01.30.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 B1 - 2016-02-01 15.11.42.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 B1 - 2016-02-01 16.01.09.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 B1 - 2016-02-01 17.12.31.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 B1 - 2016-02-04 08.57.34.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 B1 - 2016-02-04 10.06.00.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 B1 - 2016-02-04 11.14.28.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 B1 - 2016-02-04 12.20.20.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 B1 - 2016-02-04 14.01.40.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 B1 - 2016-02-04 15.52.52.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 B1 - 2016-02-02 08.49.06.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 B1 - 2016-02-02 09.46.31.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 B1 - 2016-02-02 11.24.31.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 B1 - 2016-02-02 14.11.37.json',
# 'KLF14-B6NTAC-MAT-18.1e 54-16 B1 - 2016-02-02 15.06.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 B1 - 2016-02-03 08.54.27.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 B1 - 2016-02-03 09.58.06.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 B1 - 2016-02-03 11.41.32.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 B1 - 2016-02-03 12.56.49.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 B1 - 2016-02-03 14.02.25.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 B1 - 2016-02-03 15.00.17.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 B1 - 2016-02-03 16.40.37.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 B1 - 2016-02-25 16.53.42.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 B1 - 2016-02-18 12.51.46.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 B1 - 2016-02-26 10.48.56.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 B1 - 2016-02-02 16.57.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 B1 - 2016-02-18 14.21.50.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 B1 - 2016-02-18 16.40.48.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 B1 - 2016-02-25 13.15.27.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 B1 - 2016-02-18 11.23.22.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 B1 - 2016-02-25 14.51.57.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 B1 - 2016-03-15 09.24.54.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 B1 - 2016-03-15 14.11.47.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 B1 - 2016-03-16 14.22.04.json',
# 'KLF14-B6NTAC-PAT-37.2a 406-16 B1 - 2016-03-14 11.46.47.json',
'KLF14-B6NTAC-PAT-37.2b 410-16 B1 - 2016-03-15 11.12.01.json',
'KLF14-B6NTAC-PAT-37.2c 407-16 B1 - 2016-03-14 12.54.55.json',
'KLF14-B6NTAC-PAT-37.2d 411-16 B1 - 2016-03-15 12.01.13.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 B1 - 2016-03-14 16.06.43.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 B1 - 2016-03-14 09.49.45.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 B1 - 2016-03-16 11.04.45.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 B1 - 2016-03-16 16.42.16.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 B1 - 2016-03-15 15.31.26.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 B1 - 2016-03-15 16.49.22.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 B1 - 2016-03-16 15.25.38.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 B1 - 2016-03-17 09.10.42.json',
'KLF14-B6NTAC-PAT-38.1a 90-16 B1 - 2016-02-04 17.27.42.json',
'KLF14-B6NTAC-PAT-39.1h 453-16 B1 - 2016-03-17 11.15.50.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 B1 - 2016-03-17 12.16.06.json'
]
########################################################################################################################
## Explore training/test data of different folds
########################################################################################################################
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras.backend as K
import cytometer
import cytometer.data
import tensorflow as tf
# limit number of GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
set_session(tf.Session(config=config))
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
klf14_training_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_non_overlap_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training_non_overlap')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# original dataset used in pipelines up to v6 + extra "other" tissue images
kfold_filename = os.path.join(saved_models_dir, saved_extra_kfolds_filename)
with open(kfold_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# number of images
n_im = len(file_svg_list)
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# HACK: If file_svg_list_extra is used above, this block will not work but you don't need it
# for the loop before that calculates the rows of Table MICE with the breakdown of
# cells/other/background objects by mouse
#
# loop the folds to get the ndpi files that correspond to testing of each fold,
ndpi_files_test_list = {}
for i_fold in range(len(idx_test_all)):
# list of .svg files for testing
file_svg_test = np.array(file_svg_list)[idx_test_all[i_fold]]
# list of .ndpi files that the .svg windows came from
file_ndpi_test = [os.path.basename(x).replace('.svg', '') for x in file_svg_test]
file_ndpi_test = np.unique([x.split('_row')[0] for x in file_ndpi_test])
# add to the dictionary {file: fold}
for file in file_ndpi_test:
ndpi_files_test_list[file] = i_fold
if DEBUG:
# list of NDPI files
for key in ndpi_files_test_list.keys():
print(key)
# init dataframe to aggregate training numbers of each mouse
table = pd.DataFrame(columns=['Cells', 'Other', 'Background', 'Windows', 'Windows with cells'])
# loop files with hand traced contours
for i, file_svg in enumerate(file_svg_list):
print('file ' + str(i) + '/' + str(len(file_svg_list) - 1) + ': ' + os.path.basename(file_svg))
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
other_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Other', add_offset_from_filename=False,
minimum_npoints=3)
brown_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Brown', add_offset_from_filename=False,
minimum_npoints=3)
background_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Background', add_offset_from_filename=False,
minimum_npoints=3)
contours = cell_contours + other_contours + brown_contours + background_contours
# make a list with the type of cell each contour is classified as
contour_type = [np.zeros(shape=(len(cell_contours),), dtype=np.uint8), # 0: white-adipocyte
np.ones(shape=(len(other_contours),), dtype=np.uint8), # 1: other types of tissue
np.ones(shape=(len(brown_contours),), dtype=np.uint8), # 1: brown cells (treated as "other" tissue)
np.zeros(shape=(len(background_contours),), dtype=np.uint8)] # 0: background
contour_type = np.concatenate(contour_type)
print('Cells: ' + str(len(cell_contours)) + '. Other: ' + str(len(other_contours))
+ '. Brown: ' + str(len(brown_contours)) + '. Background: ' + str(len(background_contours)))
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(file_svg),
values=[i,], values_tag='i',
tags_to_keep=['id', 'ko_parent', 'sex'])
# mouse ID as a string
id = df_common['id'].values[0]
sex = df_common['sex'].values[0]
ko = df_common['ko_parent'].values[0]
# row to add to the table
df = pd.DataFrame(
[(sex, ko,
len(cell_contours), len(other_contours) + len(brown_contours), len(background_contours), 1, int(len(cell_contours)>0))],
columns=['Sex', 'Genotype', 'Cells', 'Other', 'Background', 'Windows', 'Windows with cells'], index=[id])
if id in table.index:
num_cols = ['Cells', 'Other', 'Background', 'Windows', 'Windows with cells']
table.loc[id, num_cols] = (table.loc[id, num_cols] + df.loc[id, num_cols])
else:
table = table.append(df, sort=False, ignore_index=False, verify_integrity=True)
# alphabetical order by mouse IDs
table = table.sort_index()
# total number of sampled windows
print('Total number of windows = ' + str(np.sum(table['Windows'])))
print('Total number of windows with cells = ' + str(np.sum(table['Windows with cells'])))
# total number of "Other" and background areas
print('Total number of Other areas = ' + str(np.sum(table['Other'])))
print('Total number of Background areas = ' + str(np.sum(table['Background'])))
# aggregate by sex and genotype
idx_f = table['Sex'] == 'f'
idx_m = table['Sex'] == 'm'
idx_pat = table['Genotype'] == 'PAT'
idx_mat = table['Genotype'] == 'MAT'
print('f PAT: ' + str(np.sum(table.loc[idx_f * idx_pat, 'Cells'])))
print('f MAT: ' + str(np.sum(table.loc[idx_f * idx_mat, 'Cells'])))
print('m PAT: ' + str(np.sum(table.loc[idx_m * idx_pat, 'Cells'])))
print('m MAT: ' + str(np.sum(table.loc[idx_m * idx_mat, 'Cells'])))
# find folds that test images belong to
for i_file, ndpi_file_kernel in enumerate(ndpi_files_test_list):
# fold where the current .ndpi image was not used for training
i_fold = ndpi_files_test_list[ndpi_file_kernel]
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': ' + ndpi_file_kernel
+ '. Fold = ' + str(i_fold))
# mean and std of mouse weight
weight_f_mat = [22.07, 26.39, 30.65, 24.28, 27.72]
weight_f_pat = [31.42, 29.25, 27.18, 23.69, 21.20]
weight_m_mat = [46.19, 40.87, 40.02, 41.98, 34.52, 36.08]
weight_m_pat = [36.55, 40.77, 36.98, 36.11]
print('f MAT: mean = ' + str(np.mean(weight_f_mat)) + ', std = ' + str(np.std(weight_f_mat)))
print('f PAT: mean = ' + str(np.mean(weight_f_pat)) + ', std = ' + str(np.std(weight_f_pat)))
print('m MAT: mean = ' + str(np.mean(weight_m_mat)) + ', std = ' + str(np.std(weight_m_mat)))
print('m PAT: mean = ' + str(np.mean(weight_m_pat)) + ', std = ' + str(np.std(weight_m_pat)))
########################################################################################################################
## Statistics of hand traced white adipocytes
########################################################################################################################
import shapely
import cytometer.utils
import scipy
rectangle_sides_ratios = []
areas = []
perimeters = []
sphericities = []
# loop files with hand traced contours
for i, file_svg in enumerate(file_svg_list):
print('file ' + str(i) + '/' + str(len(file_svg_list) - 1) + ': ' + os.path.basename(file_svg))
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
# compute contour properties
for j, cell_contour in enumerate(cell_contours):
poly_cell = shapely.geometry.Polygon(cell_contour)
x, y = poly_cell.minimum_rotated_rectangle.exterior.coords.xy
edge_length = (shapely.geometry.Point(x[0], y[0]).distance(shapely.geometry.Point(x[1], y[1])),
shapely.geometry.Point(x[1], y[1]).distance(shapely.geometry.Point(x[2], y[2])))
rectangle_sides_ratio = np.max(edge_length) / np.min(edge_length)
area = poly_cell.area
perimeter = poly_cell.length
inv_compactness = poly_cell.length ** 2 / (4 * np.pi * area)
sphericity = cytometer.utils.sphericity(poly_cell)
# if j == 58:
# raise ValueError('foo: ' + str(j))
# if (inv_compactness) < 2.2 and (inv_compactness) > 2.15:
# raise ValueError('foo: ' + str(j))
rectangle_sides_ratios.append(rectangle_sides_ratio)
areas.append(area)
perimeters.append(perimeter)
sphericities.append(sphericity)
# compactness measure
inv_compactnesses = list(np.array(perimeters)**2 / (4 * np.pi * np.array(areas)))
print('Max rectangle sides ratio: ' + str(np.max(rectangle_sides_ratios)))
print('Min area: ' + str(np.min(areas)))
print('Max area: ' + str(np.max(areas)))
print('Min perimeter: ' + str(np.min(perimeters)))
print('Max perimeter: ' + str(np.max(perimeters)))
print('Min sphericity: ' + str(np.min(sphericities)))
print('Max sphericity: ' + str(np.max(sphericities)))
print('Min inv_compactness: ' + str(np.min(inv_compactnesses)))
print('Max inv_compactness: ' + str(np.max(inv_compactnesses)))
if DEBUG:
plt.clf()
plt.hist(rectangle_sides_ratios, bins=100)
plt.clf()
plt.boxplot(rectangle_sides_ratios)
plt.ylabel('Rectangle sides ratio')
q = scipy.stats.mstats.hdquantiles(rectangle_sides_ratios, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.2f' %q])
plt.clf()
plt.boxplot(areas)
plt.ylabel('Pixel$^2$')
q = scipy.stats.mstats.hdquantiles(areas, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.0f' %q])
plt.clf()
plt.boxplot(inv_compactnesses)
plt.ylabel('Compatncess$^{-1}$ ratio')
q = scipy.stats.mstats.hdquantiles(inv_compactnesses, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.2f' %q])
plt.clf()
plt.boxplot(sphericities)
plt.ylabel('Sphericity')
q = scipy.stats.mstats.hdquantiles(sphericities, prob=[0.02], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{2\%}$ = ' + '%.2f' %q])
########################################################################################################################
## Plots of get_next_roi_to_process(): Adaptive Block Algorithm
#
# Note: the quantitative comparison versus uniform tiling is provided in klf14_b6ntac_exp_0105_adaptive_blocks_analysis.py
########################################################################################################################
import pickle
import cytometer.utils
# Filter out INFO & WARNING messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# # limit number of GPUs
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
os.environ['KERAS_BACKEND'] = 'tensorflow'
import time
import openslide
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from scipy.signal import fftconvolve
from cytometer.utils import rough_foreground_mask
import PIL
from keras import backend as K
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
set_session(tf.Session(config=config))
DEBUG = False
SAVE_FIGS = False
root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
data_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
training_dir = os.path.join(home, root_data_dir, 'klf14_b6ntac_training')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(root_data_dir, 'saved_models')
results_dir = os.path.join(root_data_dir, 'klf14_b6ntac_results')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
# k-folds file
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
# full resolution image window and network expected receptive field parameters
fullres_box_size = np.array([2751, 2751])
receptive_field = np.array([131, 131])
# rough_foreground_mask() parameters
downsample_factor = 8.0
dilation_size = 25
component_size_threshold = 1e6
hole_size_treshold = 8000
# contour parameters
contour_downsample_factor = 0.1
bspline_k = 1
# block_split() parameters in downsampled image
block_len = np.ceil((fullres_box_size - receptive_field) / downsample_factor)
block_overlap = np.ceil((receptive_field - 1) / 2 / downsample_factor).astype(np.int)
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
min_class_prop = 0.5
correction_window_len = 401
correction_smoothing = 11
batch_size = 16
# segmentation correction parameters
# load list of images, and indices for training vs. testing indices
saved_kfolds_filename = os.path.join(saved_models_dir, saved_extra_kfolds_filename)
with open(saved_kfolds_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# loop the folds to get the ndpi files that correspond to testing of each fold
ndpi_files_test_list = {}
for i_fold in range(len(idx_test_all)):
# list of .svg files for testing
file_svg_test = np.array(file_svg_list)[idx_test_all[i_fold]]
# list of .ndpi files that the .svg windows came from
file_ndpi_test = [os.path.basename(x).replace('.svg', '') for x in file_svg_test]
file_ndpi_test = np.unique([x.split('_row')[0] for x in file_ndpi_test])
# add to the dictionary {file: fold}
for file in file_ndpi_test:
ndpi_files_test_list[file] = i_fold
# File 4/19: KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04. Fold = 2
i_file = 4
# File 10/19: KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38. Fold = 5
i_file = 10
ndpi_file_kernel = list(ndpi_files_test_list.keys())[i_file]
# for i_file, ndpi_file_kernel in enumerate(ndpi_files_test_list):
# fold where the current .ndpi image was not used for training
i_fold = ndpi_files_test_list[ndpi_file_kernel]
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': ' + ndpi_file_kernel
+ '. Fold = ' + str(i_fold))
# make full path to ndpi file
ndpi_file = os.path.join(data_dir, ndpi_file_kernel + '.ndpi')
contour_model_file = os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model_file = os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model_file = os.path.join(saved_models_dir,
classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
correction_model_file = os.path.join(saved_models_dir,
correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
# name of file to save annotations
annotations_file = os.path.basename(ndpi_file)
annotations_file = os.path.splitext(annotations_file)[0]
annotations_file = os.path.join(annotations_dir, annotations_file + '_exp_0097.json')
# name of file to save areas and contours
results_file = os.path.basename(ndpi_file)
results_file = os.path.splitext(results_file)[0]
results_file = os.path.join(results_dir, results_file + '_exp_0097.npz')
# rough segmentation of the tissue in the image
lores_istissue0, im_downsampled = rough_foreground_mask(ndpi_file, downsample_factor=downsample_factor,
dilation_size=dilation_size,
component_size_threshold=component_size_threshold,
hole_size_treshold=hole_size_treshold,
return_im=True)
if DEBUG:
plt.clf()
plt.imshow(im_downsampled)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_histology_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
plt.clf()
plt.imshow(im_downsampled)
plt.contour(lores_istissue0, colors='k')
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
# segmentation copy, to keep track of what's left to do
lores_istissue = lores_istissue0.copy()
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert(im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# # init empty list to store area values and contour coordinates
# areas_all = []
# contours_all = []
# keep extracting histology windows until we have finished
step = -1
time_0 = time_curr = time.time()
while np.count_nonzero(lores_istissue) > 0:
# next step (it starts from 0)
step += 1
time_prev = time_curr
time_curr = time.time()
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': step ' +
str(step) + ': ' +
str(np.count_nonzero(lores_istissue)) + '/' + str(np.count_nonzero(lores_istissue0)) + ': ' +
"{0:.1f}".format(100.0 - np.count_nonzero(lores_istissue) / np.count_nonzero(lores_istissue0) * 100) +
'% completed: ' +
'step time ' + "{0:.2f}".format(time_curr - time_prev) + ' s' +
', total time ' + "{0:.2f}".format(time_curr - time_0) + ' s')
## Code extracted from:
## get_next_roi_to_process()
# variables for get_next_roi_to_process()
seg = lores_istissue.copy()
downsample_factor = downsample_factor
max_window_size = fullres_box_size
border = np.round((receptive_field - 1) / 2)
# convert to np.array so that we can use algebraic operators
max_window_size = np.array(max_window_size)
border = np.array(border)
# convert segmentation mask to [0, 1]
seg = (seg != 0).astype('int')
# approximate measures in the downsampled image (we don't round them)
lores_max_window_size = max_window_size / downsample_factor
lores_border = border / downsample_factor
# kernels that flipped correspond to top line and left line. They need to be pre-flipped
# because the convolution operation internally flips them (two flips cancel each other)
kernel_top = np.zeros(shape=np.round(lores_max_window_size - 2 * lores_border).astype('int'))
kernel_top[int((kernel_top.shape[0] - 1) / 2), :] = 1
kernel_left = np.zeros(shape=np.round(lores_max_window_size - 2 * lores_border).astype('int'))
kernel_left[:, int((kernel_top.shape[1] - 1) / 2)] = 1
if DEBUG:
plt.clf()
plt.imshow(kernel_top)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_kernel_top_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
plt.imshow(kernel_left)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_kernel_left_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
seg_top = np.round(fftconvolve(seg, kernel_top, mode='same'))
seg_left = np.round(fftconvolve(seg, kernel_left, mode='same'))
# window detections
detection_idx = np.nonzero(seg_left * seg_top)
# set top-left corner of the box = top-left corner of first box detected
lores_first_row = detection_idx[0][0]
lores_first_col = detection_idx[1][0]
# first, we look within a window with the maximum size
lores_last_row = detection_idx[0][0] + lores_max_window_size[0] - 2 * lores_border[0]
lores_last_col = detection_idx[1][0] + lores_max_window_size[1] - 2 * lores_border[1]
# second, if the segmentation is smaller than the window, we reduce the window size
window = seg[lores_first_row:int(np.round(lores_last_row)), lores_first_col:int(np.round(lores_last_col))]
idx = np.any(window, axis=1) # reduce rows size
last_segmented_pixel_len = np.max(np.where(idx))
lores_last_row = detection_idx[0][0] + np.min((lores_max_window_size[0] - 2 * lores_border[0],
last_segmented_pixel_len))
idx = np.any(window, axis=0) # reduce cols size
last_segmented_pixel_len = np.max(np.where(idx))
lores_last_col = detection_idx[1][0] + np.min((lores_max_window_size[1] - 2 * lores_border[1],
last_segmented_pixel_len))
# save coordinates for plot (this is only for a figure in the paper and doesn't need to be done in the real
# implementation)
lores_first_col_bak = lores_first_col
lores_first_row_bak = lores_first_row
lores_last_col_bak = lores_last_col
lores_last_row_bak = lores_last_row
# add a border around the window
lores_first_row = np.max([0, lores_first_row - lores_border[0]])
lores_first_col = np.max([0, lores_first_col - lores_border[1]])
lores_last_row = np.min([seg.shape[0], lores_last_row + lores_border[0]])
lores_last_col = np.min([seg.shape[1], lores_last_col + lores_border[1]])
# convert low resolution indices to high resolution
first_row = np.int(np.round(lores_first_row * downsample_factor))
last_row = np.int(np.round(lores_last_row * downsample_factor))
first_col = np.int(np.round(lores_first_col * downsample_factor))
last_col = np.int(np.round(lores_last_col * downsample_factor))
# round down indices in downsampled segmentation
lores_first_row = int(lores_first_row)
lores_last_row = int(lores_last_row)
lores_first_col = int(lores_first_col)
lores_last_col = int(lores_last_col)
# load window from full resolution slide
tile = im.read_region(location=(first_col, first_row), level=0,
size=(last_col - first_col, last_row - first_row))
tile = np.array(tile)
tile = tile[:, :, 0:3]
# interpolate coarse tissue segmentation to full resolution
istissue_tile = lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col]
istissue_tile = cytometer.utils.resize(istissue_tile, size=(last_col - first_col, last_row - first_row),
resample=PIL.Image.NEAREST)
if DEBUG:
plt.clf()
plt.imshow(tile)
plt.imshow(istissue_tile, alpha=0.5)
plt.contour(istissue_tile, colors='k')
plt.title('Yellow: Tissue. Purple: Background')
plt.axis('off')
# clear keras session to prevent each segmentation iteration from getting slower. Note that this forces us to
# reload the models every time
K.clear_session()
# segment histology, split into individual objects, and apply segmentation correction
labels, labels_class, todo_edge, \
window_im, window_labels, window_labels_corrected, window_labels_class, index_list, scaling_factor_list \
= cytometer.utils.segmentation_pipeline6(tile,
dmap_model=dmap_model_file,
contour_model=contour_model_file,
correction_model=correction_model_file,
classifier_model=classifier_model_file,
min_cell_area=min_cell_area,
mask=istissue_tile,
min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis,
min_class_prop=min_class_prop,
correction_window_len=correction_window_len,
correction_smoothing=correction_smoothing,
return_bbox=True, return_bbox_coordinates='xy',
batch_size=batch_size)
# downsample "to do" mask so that the rough tissue segmentation can be updated
lores_todo_edge = PIL.Image.fromarray(todo_edge.astype(np.uint8))
lores_todo_edge = lores_todo_edge.resize((lores_last_col - lores_first_col,
lores_last_row - lores_first_row),
resample=PIL.Image.NEAREST)
lores_todo_edge = np.array(lores_todo_edge)
# update coarse tissue mask (this is only necessary here to plot figures for the paper. In the actual code,
# the coarse mask gets directly updated, without this intermediate step)
seg_updated = seg.copy()
seg_updated[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = lores_todo_edge
if DEBUG:
plt.clf()
fig = plt.imshow(seg, cmap='Greys')
plt.contour(seg_left * seg_top > 0, colors='r')
rect = Rectangle((lores_first_col, lores_first_row),
lores_last_col - lores_first_col, lores_last_row - lores_first_row,
alpha=0.5, facecolor='g', edgecolor='g', zorder=2)
fig.axes.add_patch(rect)
rect2 = Rectangle((lores_first_col_bak, lores_first_row_bak),
lores_last_col_bak - lores_first_col_bak, lores_last_row_bak - lores_first_row_bak,
alpha=1.0, facecolor=None, fill=False, edgecolor='g', lw=1, zorder=3)
fig.axes.add_patch(rect2)
plt.scatter(detection_idx[1][0], detection_idx[0][0], color='k', s=5, zorder=3)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_fftconvolve_i_file_' + str(i_file) +
'_step_' + str(step) + '.png'),
bbox_inches='tight')
if DEBUG:
plt.clf()
fig = plt.imshow(seg, cmap='Greys')
plt.contour(seg_left * seg_top > 0, colors='r')
plt.contour(seg_updated, colors='w', zorder=4)
rect = Rectangle((lores_first_col, lores_first_row),
lores_last_col - lores_first_col, lores_last_row - lores_first_row,
alpha=0.5, facecolor='g', edgecolor='g', zorder=2)
fig.axes.add_patch(rect)
rect2 = Rectangle((lores_first_col_bak, lores_first_row_bak),
lores_last_col_bak - lores_first_col_bak, lores_last_row_bak - lores_first_row_bak,
alpha=1.0, facecolor=None, fill=False, edgecolor='g', lw=3, zorder=3)
fig.axes.add_patch(rect2)
plt.scatter(detection_idx[1][0], detection_idx[0][0], color='k', s=5, zorder=3)
plt.axis('off')
plt.tight_layout()
plt.xlim(int(lores_first_col - 50), int(lores_last_col + 50))
plt.ylim(int(lores_last_row + 50), int(lores_first_row - 50))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_fftconvolve_detail_i_file_' + str(i_file) +
'_step_' + str(step) + '.png'),
bbox_inches='tight')
# update coarse tissue mask for next iteration
lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = lores_todo_edge
########################################################################################################################
## Show examples of what each deep CNN do (code cannibilised from the "inspect" scripts of the networks)
########################################################################################################################
import pickle
import warnings
# other imports
import numpy as np
import cv2
import matplotlib.pyplot as plt
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
import keras.backend as K
import cytometer.data
import cytometer.utils
import cytometer.model_checkpoint_parallel
import tensorflow as tf
import skimage
from PIL import Image, ImageDraw
import math
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
set_session(tf.Session(config=config))
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
klf14_training_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_non_overlap_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training_non_overlap')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
'''Load folds'''
# load list of images, and indices for training vs. testing indices
contour_model_kfold_filename = os.path.join(saved_models_dir, saved_kfolds_filename)
with open(contour_model_kfold_filename, 'rb') as f:
aux = pickle.load(f)
svg_file_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
if DEBUG:
for i, file in enumerate(svg_file_list):
print(str(i) + ': ' + file)
# correct home directory
svg_file_list = [x.replace('/home/rcasero', home) for x in svg_file_list]
# KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_017204_col_019444.tif (fold 5 for testing. No .svg)
# KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_009644_col_061660.tif (fold 5 for testing. No .svg)
# KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_019228_col_015060.svg (fold 7 for testing. With .svg)
# find which fold the testing image belongs to
np.where(['36.1c' in x for x in svg_file_list])
idx_test_all[7]
# TIFF files that correspond to the SVG files (without augmentation)
im_orig_file_list = []
for i, file in enumerate(svg_file_list):
im_orig_file_list.append(file.replace('.svg', '.tif'))
im_orig_file_list[i] = os.path.join(os.path.dirname(im_orig_file_list[i]) + '_augmented',
'im_seed_nan_' + os.path.basename(im_orig_file_list[i]))
# check that files exist
if not os.path.isfile(file):
# warnings.warn('i = ' + str(i) + ': File does not exist: ' + os.path.basename(file))
warnings.warn('i = ' + str(i) + ': File does not exist: ' + file)
if not os.path.isfile(im_orig_file_list[i]):
# warnings.warn('i = ' + str(i) + ': File does not exist: ' + os.path.basename(im_orig_file_list[i]))
warnings.warn('i = ' + str(i) + ': File does not exist: ' + im_orig_file_list[i])
'''Inspect model results'''
# for i_fold, idx_test in enumerate(idx_test_all):
i_fold = 7; idx_test = idx_test_all[i_fold]
print('Fold ' + str(i_fold) + '/' + str(len(idx_test_all)-1))
'''Load data'''
# split the data list into training and testing lists
im_test_file_list, im_train_file_list = cytometer.data.split_list(im_orig_file_list, idx_test)
# load the test data (im, dmap, mask)
test_dataset, test_file_list, test_shuffle_idx = \
cytometer.data.load_datasets(im_test_file_list, prefix_from='im', prefix_to=['im', 'dmap', 'mask', 'contour'],
nblocks=1, shuffle_seed=None)
# fill in the little gaps in the mask
kernel = np.ones((3, 3), np.uint8)
for i in range(test_dataset['mask'].shape[0]):
test_dataset['mask'][i, :, :, 0] = cv2.dilate(test_dataset['mask'][i, :, :, 0].astype(np.uint8),
kernel=kernel, iterations=1)
# load dmap model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model = keras.models.load_model(saved_model_filename)
if dmap_model.input_shape[1:3] != test_dataset['im'].shape[1:3]:
dmap_model = cytometer.utils.change_input_size(dmap_model, batch_shape=test_dataset['im'].shape)
# estimate dmaps
pred_dmap = dmap_model.predict(test_dataset['im'], batch_size=4)
if DEBUG:
for i in range(test_dataset['im'].shape[0]):
plt.clf()
plt.subplot(221)
plt.imshow(test_dataset['im'][i, :, :, :])
plt.axis('off')
plt.subplot(222)
plt.imshow(test_dataset['dmap'][i, :, :, 0])
plt.axis('off')
plt.subplot(223)
plt.imshow(test_dataset['mask'][i, :, :, 0])
plt.axis('off')
plt.subplot(224)
plt.imshow(pred_dmap[i, :, :, 0])
plt.axis('off')
# KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_019228_col_015060.svg
i = 2
if DEBUG:
plt.clf()
plt.imshow(test_dataset['im'][i, :, :, :])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(test_dataset['dmap'][i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'dmap_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_dmap[i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_dmap_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# load dmap to contour model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
contour_model = keras.models.load_model(saved_model_filename)
if contour_model.input_shape[1:3] != pred_dmap.shape[1:3]:
contour_model = cytometer.utils.change_input_size(contour_model, batch_shape=pred_dmap.shape)
# estimate contours
pred_contour = contour_model.predict(pred_dmap, batch_size=4)
if DEBUG:
plt.clf()
plt.imshow(test_dataset['contour'][i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'contour_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_contour[i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_contour_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# load classifier model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model = keras.models.load_model(saved_model_filename)
if classifier_model.input_shape[1:3] != test_dataset['im'].shape[1:3]:
classifier_model = cytometer.utils.change_input_size(classifier_model, batch_shape=test_dataset['im'].shape)
# estimate pixel-classification
pred_class = classifier_model.predict(test_dataset['im'], batch_size=4)
if DEBUG:
plt.clf()
plt.imshow(pred_class[i, :, :, 0])
plt.contour(pred_class[i, :, :, 0] > 0.5, colors='r', linewidhts=3)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_class_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_class[i, :, :, 0] > 0.5)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_class_thresh_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
## plot of classifier ground truth
# print('file ' + str(i) + '/' + str(len(file_svg_list) - 1))
# init output
im_array_all = []
out_class_all = []
out_mask_all = []
contour_type_all = []
file_tif = os.path.join(klf14_training_dir, os.path.basename(im_test_file_list[i]))
file_tif = file_tif.replace('im_seed_nan_', '')
# change file extension from .svg to .tif
file_svg = file_tif.replace('.tif', '.svg')
# open histology training image
im = Image.open(file_tif)
# make array copy
im_array = np.array(im)
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
other_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Other', add_offset_from_filename=False,
minimum_npoints=3)
brown_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Brown', add_offset_from_filename=False,
minimum_npoints=3)
background_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Background',
add_offset_from_filename=False,
minimum_npoints=3)
contours = cell_contours + other_contours + brown_contours + background_contours
# make a list with the type of cell each contour is classified as
contour_type = [np.zeros(shape=(len(cell_contours),), dtype=np.uint8), # 0: white-adipocyte
np.ones(shape=(len(other_contours),), dtype=np.uint8), # 1: other types of tissue
np.ones(shape=(len(brown_contours),), dtype=np.uint8), # 1: brown cells (treated as "other" tissue)
np.zeros(shape=(len(background_contours),), dtype=np.uint8)] # 0: background
contour_type = np.concatenate(contour_type)
contour_type_all.append(contour_type)
print('Cells: ' + str(len(cell_contours)))
print('Other: ' + str(len(other_contours)))
print('Brown: ' + str(len(brown_contours)))
print('Background: ' + str(len(background_contours)))
# initialise arrays for training
out_class = np.zeros(shape=im_array.shape[0:2], dtype=np.uint8)
out_mask = np.zeros(shape=im_array.shape[0:2], dtype=np.uint8)
# loop ground truth cell contours
for j, contour in enumerate(contours):
plt.plot([p[0] for p in contour], [p[1] for p in contour])
plt.text(contour[0][0], contour[0][1], str(j))
if DEBUG:
plt.clf()
plt.subplot(121)
plt.imshow(im_array)
plt.plot([p[0] for p in contour], [p[1] for p in contour])
xy_c = (np.mean([p[0] for p in contour]), np.mean([p[1] for p in contour]))
plt.scatter(xy_c[0], xy_c[1])
# rasterise current ground truth segmentation
cell_seg_gtruth = Image.new("1", im_array.shape[0:2][::-1], "black") # I = 32-bit signed integer pixels
draw = ImageDraw.Draw(cell_seg_gtruth)
draw.polygon(contour, outline="white", fill="white")
cell_seg_gtruth = np.array(cell_seg_gtruth, dtype=np.bool)
# we are going to save the ground truth segmentation of the cell that we are going to later use in the figures
if j == 106:
cell_seg_gtruth_106 = cell_seg_gtruth.copy()
if DEBUG:
plt.subplot(122)
plt.cla()
plt.imshow(im_array)
plt.contour(cell_seg_gtruth.astype(np.uint8))
# add current object to training output and mask
out_mask[cell_seg_gtruth] = 1
out_class[cell_seg_gtruth] = contour_type[j]
if DEBUG:
plt.clf()
aux = (1- out_class).astype(np.float32)
aux = np.ma.masked_where(out_mask < 0.5, aux)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'class_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
## Segmentation correction CNN
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
min_class_prop = 0.5
correction_window_len = 401
correction_smoothing = 11
batch_size = 2
# segment histology
labels, labels_class, _ \
= cytometer.utils.segment_dmap_contour_v6(im_array,
contour_model=contour_model, dmap_model=dmap_model,
classifier_model=classifier_model,
border_dilation=0)
labels = labels[0, :, :]
labels_class = labels_class[0, :, :, 0]
if DEBUG:
plt.clf()
plt.imshow(labels)
if DEBUG:
plt.clf()
plt.imshow(labels)
plt.clf()
aux = skimage.segmentation.find_boundaries(labels, mode='thick')
kernel = np.ones((3, 3), np.uint8)
aux = cv2.dilate(aux.astype(np.uint8), kernel=kernel, iterations=1)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'watershed_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# remove labels that touch the edges, that are too small or too large, don't overlap enough with the tissue mask,
# are fully surrounded by another label or are not white adipose tissue
labels, todo_edge = cytometer.utils.clean_segmentation(
labels, min_cell_area=min_cell_area, max_cell_area=max_cell_area,
remove_edge_labels=True, mask=None, min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis,
labels_class=labels_class, min_class_prop=min_class_prop)
if DEBUG:
plt.clf()
plt.imshow(im_array)
plt.contour(labels, levels=np.unique(labels), colors='k')
plt.contourf(labels == 0)
plt.clf()
aux = skimage.segmentation.find_boundaries(labels, mode='thick')
kernel = np.ones((3, 3), np.uint8)
aux = cv2.dilate(aux.astype(np.uint8), kernel=kernel, iterations=1)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'cleaned_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# split image into individual labels
im_array = np.expand_dims(im_array, axis=0)
labels = np.expand_dims(labels, axis=0)
labels_class = np.expand_dims(labels_class, axis=0)
cell_seg_gtruth_106 = np.expand_dims(cell_seg_gtruth_106, axis=0)
window_mask = None
(window_labels, window_im, window_labels_class, window_cell_seg_gtruth_106), index_list, scaling_factor_list \
= cytometer.utils.one_image_per_label_v2((labels, im_array, labels_class, cell_seg_gtruth_106.astype(np.uint8)),
resize_to=(correction_window_len, correction_window_len),
resample=(Image.NEAREST, Image.LINEAR, Image.NEAREST, Image.NEAREST),
only_central_label=True, return_bbox=True)
# load correction model
saved_model_filename = os.path.join(saved_models_dir, correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
correction_model = keras.models.load_model(saved_model_filename)
if correction_model.input_shape[1:3] != window_im.shape[1:3]:
correction_model = cytometer.utils.change_input_size(correction_model, batch_shape=window_im.shape)
# multiply image by mask
window_im_masked = cytometer.utils.quality_model_mask(
np.expand_dims(window_labels, axis=-1), im=window_im, quality_model_type='-1_1')
# process (histology * mask) to estimate which pixels are underestimated and which overestimated in the segmentation
window_im_masked = correction_model.predict(window_im_masked, batch_size=batch_size)
# compute the correction to be applied to the segmentation
correction = (window_im[:, :, :, 0].copy() * 0).astype(np.float32)
correction[window_im_masked[:, :, :, 0] >= 0.5] = 1 # the segmentation went too far
correction[window_im_masked[:, :, :, 0] <= -0.5] = -1 # the segmentation fell short
if DEBUG:
j = 0
plt.clf()
plt.imshow(correction[j, :, :])
# plt.contour(window_labels[j, ...], colors='r', linewidths=1)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_correction_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(correction[j, :, :])
plt.contour(window_labels[j, ...], colors='r', linewidths=1)
plt.contour(window_cell_seg_gtruth_106[j, ...], colors='w', linewidths=1)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'pred_correction_gtruth_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# correct segmentation (full operation)
window_im = window_im.astype(np.float32)
window_im /= 255.0
window_labels_corrected = cytometer.utils.correct_segmentation(
im=window_im, seg=window_labels,
correction_model=correction_model, model_type='-1_1',
smoothing=correction_smoothing,
batch_size=batch_size)
if DEBUG:
# plot input to and output from Correction CNN examples
for j in [13, 15, 18]:
plt.clf()
plt.imshow(window_im[j, :, :, :])
plt.contour(window_labels[j, ...], colors='g', linewidths=4)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'correction_input_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir, 'correction_input_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
plt.clf()
plt.imshow(window_im[j, :, :, :])
plt.contour(window_labels_corrected[j, ...], colors='r', linewidths=4)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'correction_output_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir, 'correction_output_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
# convert overlap labels in cropped images to contours (points), and add cropping window offset so that the
# contours are in the tile-window coordinates
offset_xy = np.array(index_list)[:, [2, 3]] # index_list: [i, lab, x0, y0, xend, yend]
contours = cytometer.utils.labels2contours(window_labels, offset_xy=offset_xy,
scaling_factor_xy=scaling_factor_list)
contours_corrected = cytometer.utils.labels2contours(window_labels_corrected, offset_xy=offset_xy,
scaling_factor_xy=scaling_factor_list)
# crop contours that overflow the edges
for j in range(len(contours_corrected)):
contours_corrected[j] = np.clip(contours_corrected[j], a_min=0, a_max=1000)
if DEBUG:
# plot corrected overlapping contours all together
ax = plt.clf()
plt.imshow(labels[0, :, :] * 0)
for j in range(len(contours_corrected)):
plt.fill(contours_corrected[j][:, 1], contours_corrected[j][:, 0],
edgecolor=(0.993248, 0.906157, 0.143936, 1.0), fill=False, lw=1.5)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir,
'corrected_contours_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir,
'corrected_contours_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
if DEBUG:
j = 0
plt.clf()
plt.imshow(window_im[j, ...])
plt.contour(window_labels[j, ...], colors='r', linewidths=3)
plt.text(185, 210, '+1', fontsize=30)
plt.text(116, 320, '-1', fontsize=30)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'im_for_correction_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(window_im[j, ...])
plt.contour(window_labels_corrected[j, ...], colors='g', linewidths=3)
plt.text(185, 210, '+1', fontsize=30)
plt.text(116, 320, '-1', fontsize=30)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'corrected_seg_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
aux = np.array(contours[j])
plt.plot(aux[:, 0], aux[:, 1])
########################################################################################################################
## Check whether manual correction of pipeline results makes a difference
# For this experiment, we corrected by hand on AIDA the automatic segmentations produced by the pipeline,
# and compared the segmentation error.
########################################################################################################################
import matplotlib.pyplot as plt
import cytometer.data
from shapely.geometry import Polygon
import openslide
import numpy as np
import scipy.stats
import pandas as pd
from mlxtend.evaluate import permutation_test
from statsmodels.stats.multitest import multipletests
import math
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
depot = 'sqwat'
# depot = 'gwat'
permutation_sample_size = 9 # the factorial of this number is the number of repetitions in the permutation tests
# list of annotation files for this depot
json_annotation_files = json_annotation_files_dict[depot]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_pipeline_annotation_files = [x.replace('.json', '_exp_0097_corrected_monolayer_left.json') for x in json_annotation_files]
json_pipeline_annotation_files = [os.path.join(annotations_dir, x) for x in json_pipeline_annotation_files]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_refined_annotation_files = [x.replace('.json', '_exp_0097_refined_left.json') for x in json_annotation_files]
json_refined_annotation_files = [os.path.join(annotations_dir, x) for x in json_refined_annotation_files]
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
pipeline_area_q_all = []
refined_area_q_all = []
pipeline_area_mean_all = []
refined_area_mean_all = []
id_all = []
for i_file, (json_pipeline_annotation_file, json_refined_annotation_file) in enumerate(zip(json_pipeline_annotation_files, json_refined_annotation_files)):
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_pipeline_annotation_file),
values=[i_file, ], values_tag='i_file',
tags_to_keep=['id', 'ko_parent', 'genotype', 'sex',
'BW', 'gWAT', 'SC'])
# mouse ID as a string
id = df_common['id'].values[0]
# we have only refined some of the segmentations for testing
if not id in ['16.2a', '16.2b', '16.2c', '16.2d', '16.2e', '16.2f', '17.1a', '17.1b', '17.1c', '17.1d', '17.1e',
'17.1f', '17.2a', '17.2b', '17.2c', '17.2d', '17.2f', '17.2g', '18.1a', '18.1b', '18.1c', '18.1d']:
continue
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ')
if os.path.isfile(json_pipeline_annotation_file):
print('\t' + os.path.basename(json_pipeline_annotation_file))
else:
print('\t' + os.path.basename(json_pipeline_annotation_file) + ' ... missing')
if os.path.isfile(json_refined_annotation_file):
print('\t' + os.path.basename(json_refined_annotation_file))
else:
print('\t' + os.path.basename(json_refined_annotation_file) + ' ... missing')
# ndpi file that corresponds to this .json file
ndpi_file = json_pipeline_annotation_file.replace('_exp_0097_corrected_monolayer_left.json', '.ndpi')
ndpi_file = ndpi_file.replace(annotations_dir, ndpi_dir)
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution']) # m
yres = 1e-2 / float(im.properties['tiff.YResolution']) # m
# ko = df_common['ko_parent'].values[0]
# genotype = df_common['genotype'].values[0]
# sex = df_common['sex'].values[0]
# bw = df_common['BW'].values[0]
# gwat = df_common['gWAT'].values[0]
# sc = df_common['SC'].values[0]
# read contours from AIDA annotations
pipeline_contours = cytometer.data.aida_get_contours(json_pipeline_annotation_file, layer_name='White adipocyte.*')
refined_contours = cytometer.data.aida_get_contours(json_refined_annotation_file, layer_name='White adipocyte.*')
# compute area of each contour
pipeline_areas = [Polygon(c).area * xres * yres for c in pipeline_contours] # (um^2)
refined_areas = [Polygon(c).area * xres * yres for c in refined_contours] # (um^2)
# compute HD quantiles
pipeline_area_q = scipy.stats.mstats.hdquantiles(pipeline_areas, prob=quantiles, axis=0)
refined_area_q = scipy.stats.mstats.hdquantiles(refined_areas, prob=quantiles, axis=0)
# compute average cell size
pipeline_area_mean = np.mean(pipeline_areas)
refined_area_mean = np.mean(refined_areas)
pipeline_area_q_all.append(pipeline_area_q)
refined_area_q_all.append(refined_area_q)
pipeline_area_mean_all.append(pipeline_area_mean)
refined_area_mean_all.append(refined_area_mean)
id_all.append(id)
print('Removed cells: %.2f' % (1 - len(refined_areas) / len(pipeline_areas)))
print((np.array(pipeline_area_q) - np.array(refined_area_q)) * 1e12)
if DEBUG:
plt.clf()
plt.plot(quantiles, pipeline_area_q * 1e12, label='Pipeline', linewidth=3)
plt.plot(quantiles, refined_area_q * 1e12, label='Refined', linewidth=3)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel('Quantiles', fontsize=14)
plt.ylabel('Area ($\mu m^2$)', fontsize=14)
plt.legend(fontsize=12)
# convert the list of vectors into a matrix
pipeline_area_q_all = np.vstack(pipeline_area_q_all)
refined_area_q_all = np.vstack(refined_area_q_all)
refined_area_mean_all = np.array(refined_area_mean_all)
pipeline_area_mean_all = np.array(pipeline_area_mean_all)
if DEBUG:
plt.clf()
pipeline_area_q_mean = np.mean(pipeline_area_q_all, axis=0)
for i in range(pipeline_area_q_all.shape[0]):
plt.plot(quantiles, 100 * (refined_area_q_all[i, :] - pipeline_area_q_all[i, :]) / pipeline_area_q_mean, label=id_all[i], linewidth=3)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel('Quantiles', fontsize=14)
plt.ylabel('Area change with refinement (%)', fontsize=14)
plt.legend(fontsize=12)
plt.tight_layout()
if DEBUG:
plt.clf()
plt.boxplot(100 * (refined_area_mean_all - pipeline_area_mean_all) / pipeline_area_mean_all, labels=['Mean size'])
plt.tick_params(axis='both', which='major', labelsize=14)
plt.ylabel('Area change with refinement (%)', fontsize=14)
plt.tight_layout()
########################################################################################################################
## Plots of segmented full slides with quantile colourmaps
########################################################################################################################
# This is done in klf14_b6ntac_exp_0098_full_slide_size_analysis_v7
########################################################################################################################
## Analysis of time and blocks that took to compute full slide segmentation from the server logs
########################################################################################################################
# The results were noted down in klf14_b6ntac_exp_0097_full_slide_pipeline_v7_logs.csv. This file is in the GoogleDrive
# directory with the rest of the paper.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import openslide
import statsmodels.api as sm
import scipy.stats
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
times_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
# load filenames, number of blocks and time it took to segment them
times_file = 'klf14_b6ntac_exp_0097_full_slide_pipeline_v7_logs.csv'
times_file = os.path.join(times_dir, times_file)
times_df = pd.read_csv(times_file)
# read rough masks of the files in the dataframe, to measure the tissue area in each
for i, file in enumerate(times_df['File']):
# filename of the coarse tissue mask
coarse_mask_file = os.path.join(annotations_dir, file + '_rough_mask.npz')
# load coarse tissue mask
with np.load(coarse_mask_file) as data:
mask = data['lores_istissue0']
if DEBUG:
plt.clf()
plt.imshow(mask)
# open full resolution histology slide to get pixel size
ndpi_file = os.path.join(ndpi_dir, file + '.ndpi')
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# scaling factor to get pixel size in the coarse mask
k = np.array(im.dimensions) / mask.shape[::-1]
# add tissue area to the dataframe. Calculations for full resolution slide, even though they
# are computed from the coarse mask
times_df.loc[i, 'tissue_area_pix'] = np.count_nonzero(mask) * k[0] * k[1]
times_df.loc[i, 'tissue_area_mm2'] = times_df.loc[i, 'tissue_area_pix'] * xres * yres * 1e6
if DEBUG:
# plot tissue area vs. time to compute
plt.clf()
plt.scatter(times_df['tissue_area_mm2'], times_df['Blocks Time (s)'])
# fit linear model
model = sm.formula.ols('Q("Blocks Time (s)") ~ tissue_area_mm2', data=times_df).fit()
print(model.summary())
# Pearson coefficient
rho, rho_p = scipy.stats.pearsonr(times_df['tissue_area_mm2'], times_df['Blocks Time (s)'])
print('Pearson coeff = ' + str(rho))
print('p-val = ' + str(rho_p))
# tissue area
print('Tissue area')
print('min = ' + str(np.min(times_df['tissue_area_mm2'])) + ' mm2 = ' + str(np.min(times_df['tissue_area_pix']) * 1e-6) + ' Mpix')
print('max = ' + str(np.max(times_df['tissue_area_mm2'])) + ' mm2 = ' + str(np.max(times_df['tissue_area_pix']) * 1e-6) + ' Mpix')
# corresponding time to compute
print('Time to compute')
time_pred = model.predict(times_df['tissue_area_mm2'])
print('min = ' + str(np.min(time_pred) / 3600) + ' h')
print('max = ' + str(np.max(time_pred) / 3600) + ' h')
tissue_area_mm2_q1 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.25, axis=0)
tissue_area_mm2_q2 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.5, axis=0)
tissue_area_mm2_q3 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.75, axis=0)
aux_df = times_df.loc[0:2, :].copy()
aux_df.loc[0, 'tissue_area_mm2'] = tissue_area_mm2_q1
aux_df.loc[1, 'tissue_area_mm2'] = tissue_area_mm2_q2
aux_df.loc[2, 'tissue_area_mm2'] = tissue_area_mm2_q3
tissue_time_pred_q = model.predict(aux_df)
print('q1 = ' + str(tissue_area_mm2_q1) + ' mm2 -> '
+ str(tissue_time_pred_q[0] / 3600) + ' h')
print('q2 = ' + str(tissue_area_mm2_q2) + ' mm2 -> '
+ str(tissue_time_pred_q[1] / 3600) + ' h')
print('q3 = ' + str(tissue_area_mm2_q3) + ' mm2 -> '
+ str(tissue_time_pred_q[2] / 3600) + ' h')
########################################################################################################################
## Segmentation validation
########################################################################################################################
# This is done in klf14_b6ntac_exp_0096_pipeline_v7_validation.py
########################################################################################################################
## Time that it takes to do Auto vs. Corrected segmentation
########################################################################################################################
import numpy as np
import time
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import cytometer.data
import cytometer.utils
from PIL import Image, ImageDraw, ImageEnhance
DEBUG = False
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
times_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
# training window length
training_window_len = 401
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
# min_class_prop = 0.5
# correction_window_len = 401
# correction_smoothing = 11
batch_size = 2
'''Load folds'''
# load list of images, and indices for training vs. testing indices
saved_kfolds_filename = os.path.join(saved_models_dir, saved_kfolds_filename)
with open(saved_kfolds_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# number of images
n_im = len(file_svg_list)
# number of folds
n_folds = len(idx_test_all)
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# # correct home directory in file paths
# file_svg_list = cytometer.data.change_home_directory(list(file_svg_list), '/home/rcasero', home, check_isfile=True)
# file_svg_list = cytometer.data.change_home_directory(list(file_svg_list), '/users/rittscher/rcasero', home, check_isfile=True)
## compute and save results (you can skip this section if this has been done before, and go straight where you load the
## results)
# load data computed in 0096 validation script
data_filename = os.path.join(saved_models_dir, 'klf14_b6ntac_exp_0096_pipeline_v7_validation' + '_data.npz')
with np.load(data_filename) as data:
im_array_all = data['im_array_all']
rough_mask_all = data['rough_mask_all']
out_class_all = 1 - data['out_class_all'] # encode as 0: other, 1: WAT
out_mask_all = data['out_mask_all']
# init dataframes
df_manual_all = pd.DataFrame()
df_auto_all = pd.DataFrame()
# init time vectors
time_auto = []
time_corrected = []
for i_fold in range(len(idx_test_all)):
# start timer
t0 = time.time()
''' Get the images/masks/classification that were not used for training of this particular fold '''
print('# Fold ' + str(i_fold) + '/' + str(len(idx_test_all) - 1))
# test and training image indices. These indices refer to file_list
idx_test = idx_test_all[i_fold]
# list of test files (used later for the dataframe)
file_list_test = np.array(file_svg_list)[idx_test]
print('## len(idx_test) = ' + str(len(idx_test)))
# split data into training and testing
im_array_test = im_array_all[idx_test, :, :, :]
rough_mask_test = rough_mask_all[idx_test, :, :]
out_class_test = out_class_all[idx_test, :, :, :]
out_mask_test = out_mask_all[idx_test, :, :]
''' Segmentation into non-overlapping objects '''
# names of contour, dmap and tissue classifier models
contour_model_filename = \
os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model_filename = \
os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model_filename = \
os.path.join(saved_models_dir, classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
# segment histology
pred_seg_test, pred_class_test, _ \
= cytometer.utils.segment_dmap_contour_v6(im_array_test,
dmap_model=dmap_model_filename,
contour_model=contour_model_filename,
classifier_model=classifier_model_filename,
border_dilation=0, batch_size=batch_size)
if DEBUG:
i = 0
plt.clf()
plt.subplot(221)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.axis('off')
plt.subplot(222)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contourf(pred_class_test[i, :, :, 0].astype(np.float32), alpha=0.5)
plt.axis('off')
plt.subplot(223)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contour(pred_seg_test[i, :, :], levels=np.unique(pred_seg_test[i, :, :]), colors='k')
plt.axis('off')
plt.subplot(224)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contourf(pred_class_test[i, :, :, 0].astype(np.float32), alpha=0.5)
plt.contour(pred_seg_test[i, :, :], levels=np.unique(pred_seg_test[i, :, :]), colors='k')
plt.axis('off')
plt.tight_layout()
# clean segmentation: remove labels that are too small or that don't overlap enough with
# the rough foreground mask
pred_seg_test, _ \
= cytometer.utils.clean_segmentation(pred_seg_test, min_cell_area=min_cell_area, max_cell_area=max_cell_area,
remove_edge_labels=False,
mask=rough_mask_test, min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis, labels_class=None)
# record processing time
time_auto.append((time.time() - t0) / im_array_test.shape[0])
if DEBUG:
plt.clf()
aux = np.stack((rough_mask_test[i, :, :],) * 3, axis=2)
plt.imshow(im_array_test[i, :, :, :] * aux)
plt.contour(pred_seg_test[i, ...], levels=np.unique(pred_seg_test[i, ...]), colors='k')
plt.axis('off')
''' Split image into individual labels and correct segmentation to take overlaps into account '''
(window_seg_test, window_im_test, window_class_test, window_rough_mask_test), index_list, scaling_factor_list \
= cytometer.utils.one_image_per_label_v2((pred_seg_test, im_array_test,
pred_class_test[:, :, :, 0].astype(np.uint8),
rough_mask_test.astype(np.uint8)),
resize_to=(training_window_len, training_window_len),
resample=(Image.NEAREST, Image.LINEAR, Image.NEAREST, Image.NEAREST),
only_central_label=True)
# correct segmentations
correction_model_filename = os.path.join(saved_models_dir,
correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
window_seg_corrected_test = cytometer.utils.correct_segmentation(im=window_im_test, seg=window_seg_test,
correction_model=correction_model_filename,
model_type='-1_1', batch_size=batch_size,
smoothing=11)
# record processing time
time_corrected.append((time.time() - t0 - time_auto[-1]) / im_array_test.shape[0])
# save for later use
times_file = os.path.join(times_dir, 'klf14_b6ntac_exp_0099_time_comparison_auto_corrected.npz')
np.savez(times_file, time_auto=time_auto, time_corrected=time_corrected)
# compute what proportion of time the algorithm spends on the Auto segmentatiom vs. corrected segmentation
time_auto_ratio = np.array(time_auto) / (np.array(time_auto) + np.array(time_corrected))
print('Time Auto ratio:')
print('mean = ' + str(100 * np.mean(time_auto_ratio)) + ' %')
print('std = ' + str(100 * np.std(time_auto_ratio)) + ' %')
print('Ratio of total time to Auto')
print('mean = ' + 'x' + str(np.mean(1 / time_auto_ratio)))
print('std = ' + 'x' + str(np.std(1 / time_auto_ratio)))
time_corrected_ratio = np.array(time_corrected) / (np.array(time_auto) + np.array(time_corrected))
print('Time Corrected ratio:')
print('mean = ' + str(100 * np.mean(time_corrected_ratio)) + ' %')
print('std = ' + str(100 * np.std(time_corrected_ratio)) + ' %')
########################################################################################################################
## Cell populations from automatically segmented images in two depots: SQWAT and GWAT.
## This section needs to be run for each of the depots. But the results are saved, so in later sections, it's possible
## to get all the data together
### USED IN PAPER
########################################################################################################################
import matplotlib.pyplot as plt
import cytometer.data
from shapely.geometry import Polygon
import openslide
import numpy as np
import scipy.stats
import pandas as pd
from mlxtend.evaluate import permutation_test
from statsmodels.stats.multitest import multipletests
import math
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v8/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
permutation_sample_size = 9 # the factorial of this number is the number of repetitions in the permutation tests
for depot in ['sqwat', 'gwat']:
# list of annotation files for this depot
json_annotation_files = json_annotation_files_dict[depot]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_annotation_files = [x.replace('.json', '_exp_0097_corrected.json') for x in json_annotation_files]
json_annotation_files = [os.path.join(annotations_dir, x) for x in json_annotation_files]
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
# compute areas of the rough masks
filename_rough_mask_area = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_area_' + depot + '.npz')
id_all = []
rough_mask_area_all = []
for i_file, json_file in enumerate(json_annotation_files):
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ' + os.path.basename(json_file))
if not os.path.isfile(json_file):
print('Missing file')
# continue
# open full resolution histology slide
ndpi_file = json_file.replace('_exp_0097_corrected.json', '.ndpi')
ndpi_file = os.path.join(ndpi_dir, os.path.basename(ndpi_file))
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# load mask
rough_mask_file = json_file.replace('_exp_0097_corrected.json', '_rough_mask.npz')
rough_mask_file = os.path.join(annotations_dir, rough_mask_file)
if not os.path.isfile(rough_mask_file):
print('No mask: ' + rough_mask_file)
aux = np.load(rough_mask_file)
lores_istissue0 = aux['lores_istissue0']
if DEBUG:
foo = aux['im_downsampled']
foo = PIL.Image.fromarray(foo)
foo = foo.resize(tuple((np.round(np.array(foo.size[0:2]) / 4)).astype(np.int)))
plt.imshow(foo)
plt.title(os.path.basename(ndpi_file))
# compute scaling factor between downsampled mask and original image
size_orig = np.array(im.dimensions) # width, height
size_downsampled = np.array(lores_istissue0.shape)[::-1] # width, height
downsample_factor = size_orig / size_downsampled # width, height
# create dataframe for this image
rough_mask_area = np.count_nonzero(lores_istissue0) * (xres * downsample_factor[0]) * (yres * downsample_factor[1]) # m^2
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_file),
values=[rough_mask_area,], values_tag='SC_rough_mask_area',
tags_to_keep=['id', 'ko_parent', 'sex'])
# mouse ID as a string
id = df_common['id'].values[0]
# correct area because most slides contain two slices, but some don't
if depot == 'sqwat' and not (id in ['16.2d', '17.1e', '17.2g', '16.2e', '18.1f', '37.4a', '37.2e']):
# two slices in the slide, so slice area is approx. one half
rough_mask_area /= 2
elif depot == 'gwat' and not (id in ['36.1d', '16.2a', '16.2b', '16.2c', '16.2d', '16.2e', '17.1b', '17.1d',
'17.1e', '17.1f', '17.2c', '17.2d', '17.2f', '17.2g', '18.1b', '18.1c',
'18.1d', '18.2a', '18.2c', '18.2d', '18.2f', '18.2g', '18.3c', '19.1a',
'19.2e', '19.2f', '19.2g', '36.3d', '37.2e', '37.2f', '37.2g', '37.2h',
'37.3a', '37.4a', '37.4b', '39.2d']):
# two slices in the slide, so slice area is approx. one half
rough_mask_area /= 2
# add to output
id_all.append(id)
rough_mask_area_all.append(rough_mask_area)
# save results
np.savez_compressed(filename_rough_mask_area, id_all=id_all, rough_mask_area_all=rough_mask_area_all)
# load or compute area quantiles
filename_quantiles = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_area_quantiles_' + depot + '.npz')
if os.path.isfile(filename_quantiles):
with np.load(filename_quantiles) as aux:
area_mean_all = aux['area_mean_all']
area_q_all = aux['area_q_all']
id_all = aux['id_all']
ko_all = aux['ko_all']
genotype_all = aux['genotype_all']
sex_all = aux['sex_all']
else:
area_mean_all = []
area_q_all = []
id_all = []
ko_all = []
genotype_all = []
sex_all = []
bw_all = []
gwat_all = []
sc_all = []
for i_file, json_file in enumerate(json_annotation_files):
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ' + os.path.basename(json_file))
if not os.path.isfile(json_file):
print('Missing file')
continue
# ndpi file that corresponds to this .json file
ndpi_file = json_file.replace('_exp_0097_corrected.json', '.ndpi')
ndpi_file = ndpi_file.replace(annotations_dir, ndpi_dir)
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution']) # m
yres = 1e-2 / float(im.properties['tiff.YResolution']) # m
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_file),
values=[i_file,], values_tag='i_file',
tags_to_keep=['id', 'ko_parent', 'genotype', 'sex',
'BW', 'gWAT', 'SC'])
# mouse ID as a string
id = df_common['id'].values[0]
ko = df_common['ko_parent'].values[0]
genotype = df_common['genotype'].values[0]
sex = df_common['sex'].values[0]
bw = df_common['BW'].values[0]
gwat = df_common['gWAT'].values[0]
sc = df_common['SC'].values[0]
# read contours from AIDA annotations
contours = cytometer.data.aida_get_contours(os.path.join(annotations_dir, json_file), layer_name='White adipocyte.*')
# compute area of each contour
areas = [Polygon(c).area * xres * yres for c in contours] # (um^2)
# compute average area of all contours
area_mean = np.mean(areas)
# compute HD quantiles
area_q = scipy.stats.mstats.hdquantiles(areas, prob=quantiles, axis=0)
# append to totals
area_mean_all.append(area_mean)
area_q_all.append(area_q)
id_all.append(id)
ko_all.append(ko)
genotype_all.append(genotype)
sex_all.append(sex)
bw_all.append(bw)
gwat_all.append(gwat)
sc_all.append(sc)
# reorder from largest to smallest final area value
area_mean_all = np.array(area_mean_all)
area_q_all = np.array(area_q_all)
id_all = np.array(id_all)
ko_all = np.array(ko_all)
genotype_all = np.array(genotype_all)
sex_all = np.array(sex_all)
bw_all = np.array(bw_all)
gwat_all = np.array(gwat_all)
sc_all = np.array(sc_all)
idx = np.argsort(area_q_all[:, -1])
idx = idx[::-1] # sort from larger to smaller
area_mean_all = area_mean_all[idx]
area_q_all = area_q_all[idx, :]
id_all = id_all[idx]
ko_all = ko_all[idx]
genotype_all = genotype_all[idx]
sex_all = sex_all[idx]
bw_all = bw_all[idx]
gwat_all = gwat_all[idx]
sc_all = sc_all[idx]
np.savez_compressed(filename_quantiles, area_mean_all=area_mean_all, area_q_all=area_q_all, id_all=id_all,
ko_all=ko_all, genotype_all=genotype_all, sex_all=sex_all,
bw_all=bw_all, gwat_all=gwat_all, sc_all=sc_all)
if DEBUG:
plt.clf()
for i in range(len(area_q_all)):
# plot
if ko_all[i] == 'PAT':
color = 'g'
elif ko_all[i] == 'MAT':
color = 'r'
else:
raise ValueError('Unknown ko value: ' + ko)
if sex_all[i] == 'f':
plt.subplot(121)
plt.plot(quantiles, area_q_all[i] * 1e12 * 1e-3, color=color)
elif sex_all[i] == 'm':
plt.subplot(122)
plt.plot(quantiles, area_q_all[i] * 1e12 * 1e-3, color=color)
else:
raise ValueError('Unknown sex value: ' + sex)
legend_f = [i + ' ' + j.replace('KLF14-KO:', '') for i, j
in zip(id_all[sex_all == 'f'], genotype_all[sex_all == 'f'])]
legend_m = [i + ' ' + j.replace('KLF14-KO:', '') for i, j
in zip(id_all[sex_all == 'm'], genotype_all[sex_all == 'm'])]
plt.subplot(121)
plt.title('Female', fontsize=14)
plt.tick_params(labelsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('Area ($10^{3}\ \mu m^2$)', fontsize=14)
plt.legend(legend_f, fontsize=12)
plt.subplot(122)
plt.title('Male', fontsize=14)
plt.tick_params(labelsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.legend(legend_m, fontsize=12)
# DEBUG:
# area_q_all = np.vstack((area_q_all, area_q_all))
# id_all = np.hstack((id_all, id_all))
# ko_all = np.hstack((ko_all, ko_all))
# genotype_all = np.hstack((genotype_all, genotype_all))
# sex_all = np.hstack((sex_all, sex_all))
# compute variability of area values for each quantile
area_q_f_pat = area_q_all[(sex_all == 'f') * (ko_all == 'PAT'), :]
area_q_m_pat = area_q_all[(sex_all == 'm') * (ko_all == 'PAT'), :]
area_q_f_mat = area_q_all[(sex_all == 'f') * (ko_all == 'MAT'), :]
area_q_m_mat = area_q_all[(sex_all == 'm') * (ko_all == 'MAT'), :]
area_interval_f_pat = scipy.stats.mstats.hdquantiles(area_q_f_pat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat = scipy.stats.mstats.hdquantiles(area_q_m_pat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat = scipy.stats.mstats.hdquantiles(area_q_f_mat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat = scipy.stats.mstats.hdquantiles(area_q_m_mat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_q_f_pat_wt = area_q_all[(sex_all == 'f') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_m_pat_wt = area_q_all[(sex_all == 'm') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_f_mat_wt = area_q_all[(sex_all == 'f') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_m_mat_wt = area_q_all[(sex_all == 'm') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_interval_f_pat_wt = scipy.stats.mstats.hdquantiles(area_q_f_pat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat_wt = scipy.stats.mstats.hdquantiles(area_q_m_pat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat_wt = scipy.stats.mstats.hdquantiles(area_q_f_mat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat_wt = scipy.stats.mstats.hdquantiles(area_q_m_mat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_q_f_pat_het = area_q_all[(sex_all == 'f') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_m_pat_het = area_q_all[(sex_all == 'm') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_f_mat_het = area_q_all[(sex_all == 'f') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_m_mat_het = area_q_all[(sex_all == 'm') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_interval_f_pat_het = scipy.stats.mstats.hdquantiles(area_q_f_pat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat_het = scipy.stats.mstats.hdquantiles(area_q_m_pat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat_het = scipy.stats.mstats.hdquantiles(area_q_f_mat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat_het = scipy.stats.mstats.hdquantiles(area_q_m_mat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
n_f_pat_wt = area_q_f_pat_wt.shape[0]
n_m_pat_wt = area_q_m_pat_wt.shape[0]
n_f_mat_wt = area_q_f_mat_wt.shape[0]
n_m_mat_wt = area_q_m_mat_wt.shape[0]
n_f_pat_het = area_q_f_pat_het.shape[0]
n_m_pat_het = area_q_m_pat_het.shape[0]
n_f_mat_het = area_q_f_mat_het.shape[0]
n_m_mat_het = area_q_m_mat_het.shape[0]
if DEBUG:
# plots of female median ECDF^-1 with quartile shaded area
plt.clf()
plt.subplot(121)
plt.plot(quantiles, area_interval_f_pat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_f_pat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_f_pat_wt[1, :] * 1e12 * 1e-3, area_interval_f_pat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_f_pat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_f_pat_het) + ' Het')
plt.fill_between(quantiles, area_interval_f_pat_het[1, :] * 1e12 * 1e-3, area_interval_f_pat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Female PAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('White adipocyte area ($\cdot 10^3 \mu$m$^2$)', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 15)
plt.tight_layout()
plt.subplot(122)
plt.plot(quantiles, area_interval_f_mat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_f_mat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_f_mat_wt[1, :] * 1e12 * 1e-3, area_interval_f_mat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_f_mat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_f_mat_het) + ' Het')
plt.fill_between(quantiles, area_interval_f_mat_het[1, :] * 1e12 * 1e-3, area_interval_f_mat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Female MAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 15)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_female_pat_vs_mat_bands.svg'))
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_female_pat_vs_mat_bands.png'))
if DEBUG:
# plots of male median ECDF^-1 with quartile shaded area
plt.clf()
plt.subplot(121)
plt.plot(quantiles, area_interval_m_pat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_m_pat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_m_pat_wt[1, :] * 1e12 * 1e-3, area_interval_m_pat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_m_pat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_m_pat_het) + ' Het')
plt.fill_between(quantiles, area_interval_m_pat_het[1, :] * 1e12 * 1e-3, area_interval_m_pat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Male PAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('White adipocyte area ($\cdot 10^3 \mu$m$^2$)', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 16)
plt.tight_layout()
plt.subplot(122)
plt.plot(quantiles, area_interval_m_mat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_m_mat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_m_mat_wt[1, :] * 1e12 * 1e-3, area_interval_m_mat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_m_mat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_m_mat_het) + ' Het')
plt.fill_between(quantiles, area_interval_m_mat_het[1, :] * 1e12 * 1e-3, area_interval_m_mat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Male MAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 16)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_male_pat_vs_mat_bands.svg'))
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_male_pat_vs_mat_bands.png'))
filename_pvals = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_pvals_' + depot + '.npz')
if os.path.isfile(filename_pvals):
with np.load(filename_pvals) as aux:
pval_perc_f_pat2mat = aux['pval_perc_f_pat2mat']
pval_perc_m_pat2mat = aux['pval_perc_m_pat2mat']
pval_perc_f_pat_wt2het = aux['pval_perc_f_pat_wt2het']
pval_perc_f_mat_wt2het = aux['pval_perc_f_mat_wt2het']
pval_perc_m_pat_wt2het = aux['pval_perc_m_pat_wt2het']
pval_perc_m_mat_wt2het = aux['pval_perc_m_mat_wt2het']
permutation_sample_size = aux['permutation_sample_size']
else:
# test whether the median values are different enough between two groups
func = lambda x, y: np.abs(scipy.stats.mstats.hdquantiles(x, prob=0.5, axis=0).data[0]
- scipy.stats.mstats.hdquantiles(y, prob=0.5, axis=0).data[0])
# func = lambda x, y: np.abs(np.mean(x) - np.mean(y))
## PAT vs. MAT
# test whether the median values are different enough between PAT vs. MAT
pval_perc_f_pat2mat = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_f_pat2mat[i] = permutation_test(x=area_q_f_pat[:, i], y=area_q_f_mat[:, i],
func=func, seed=None,
method='approximate', num_rounds=math.factorial(permutation_sample_size))
pval_perc_m_pat2mat = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_m_pat2mat[i] = permutation_test(x=area_q_m_pat[:, i], y=area_q_m_mat[:, i],
func=func, seed=None,
method='approximate', num_rounds=math.factorial(permutation_sample_size))
## WT vs. Het
# PAT Females
pval_perc_f_pat_wt2het = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_f_pat_wt2het[i] = permutation_test(x=area_q_f_pat_wt[:, i], y=area_q_f_pat_het[:, i],
func=func, seed=None,
method='approximate',
num_rounds=math.factorial(permutation_sample_size))
# MAT Females
pval_perc_f_mat_wt2het = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_f_mat_wt2het[i] = permutation_test(x=area_q_f_mat_wt[:, i], y=area_q_f_mat_het[:, i],
func=func, seed=None,
method='approximate',
num_rounds=math.factorial(permutation_sample_size))
# PAT Males
pval_perc_m_pat_wt2het = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_m_pat_wt2het[i] = permutation_test(x=area_q_m_pat_wt[:, i], y=area_q_m_pat_het[:, i],
func=func, seed=None,
method='approximate',
num_rounds=math.factorial(permutation_sample_size))
# MAT Males
pval_perc_m_mat_wt2het = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_m_mat_wt2het[i] = permutation_test(x=area_q_m_mat_wt[:, i], y=area_q_m_mat_het[:, i],
func=func, seed=None,
method='approximate',
num_rounds=math.factorial(permutation_sample_size))
np.savez_compressed(filename_pvals, permutation_sample_size=permutation_sample_size,
pval_perc_f_pat2mat=pval_perc_f_pat2mat, pval_perc_m_pat2mat=pval_perc_m_pat2mat,
pval_perc_f_pat_wt2het=pval_perc_f_pat_wt2het, pval_perc_f_mat_wt2het=pval_perc_f_mat_wt2het,
pval_perc_m_pat_wt2het=pval_perc_m_pat_wt2het, pval_perc_m_mat_wt2het=pval_perc_m_mat_wt2het)
# data has been loaded or computed
np.set_printoptions(precision=2)
print('PAT vs. MAT before multitest correction')
print('Female:')
print(pval_perc_f_pat2mat)
print('Male:')
print(pval_perc_m_pat2mat)
np.set_printoptions(precision=8)
# multitest correction using Benjamini-Hochberg
_, pval_perc_f_pat2mat, _, _ = multipletests(pval_perc_f_pat2mat, method='fdr_bh', alpha=0.05, returnsorted=False)
_, pval_perc_m_pat2mat, _, _ = multipletests(pval_perc_m_pat2mat, method='fdr_bh', alpha=0.05, returnsorted=False)
np.set_printoptions(precision=2)
print('PAT vs. MAT with multitest correction')
print('Female:')
print(pval_perc_f_pat2mat)
print('Male:')
print(pval_perc_m_pat2mat)
np.set_printoptions(precision=8)
np.set_printoptions(precision=3)
print('WT vs. Het before multitest correction')
print('Female:')
print(pval_perc_f_pat_wt2het)
print(pval_perc_f_mat_wt2het)
print('Male:')
print(pval_perc_m_pat_wt2het)
print(pval_perc_m_mat_wt2het)
np.set_printoptions(precision=8)
# multitest correction using Benjamini-Hochberg
_, pval_perc_f_pat_wt2het, _, _ = multipletests(pval_perc_f_pat_wt2het, method='fdr_bh', alpha=0.05, returnsorted=False)
_, pval_perc_f_mat_wt2het, _, _ = multipletests(pval_perc_f_mat_wt2het, method='fdr_bh', alpha=0.05, returnsorted=False)
_, pval_perc_m_pat_wt2het, _, _ = multipletests(pval_perc_m_pat_wt2het, method='fdr_bh', alpha=0.05, returnsorted=False)
_, pval_perc_m_mat_wt2het, _, _ = multipletests(pval_perc_m_mat_wt2het, method='fdr_bh', alpha=0.05, returnsorted=False)
np.set_printoptions(precision=3)
print('WT vs. Het with multitest correction')
print('Female:')
print(pval_perc_f_pat_wt2het)
print(pval_perc_f_mat_wt2het)
print('Male:')
print(pval_perc_m_pat_wt2het)
print(pval_perc_m_mat_wt2het)
np.set_printoptions(precision=8)
# # plot the median difference and the population quantiles at which the difference is significant
# if DEBUG:
# plt.clf()
# idx = pval_perc_f_pat2mat < 0.05
# delta_a_f_pat2mat = (area_interval_f_mat[1, :] - area_interval_f_pat[1, :]) / area_interval_f_pat[1, :]
# if np.any(idx):
# plt.stem(quantiles[idx], 100 * delta_a_f_pat2mat[idx],
# markerfmt='.', linefmt='C6-', basefmt='C6',
# label='p-val$_{\mathrm{PAT}}$ < 0.05')
#
# idx = pval_perc_m_pat2mat < 0.05
# delta_a_m_pat2mat = (area_interval_m_mat[1, :] - area_interval_m_pat[1, :]) / area_interval_m_pat[1, :]
# if np.any(idx):
# plt.stem(quantiles[idx], 100 * delta_a_m_pat2mat[idx],
# markerfmt='.', linefmt='C7-', basefmt='C7', bottom=250,
# label='p-val$_{\mathrm{MAT}}$ < 0.05')
#
# plt.plot(quantiles, 100 * delta_a_f_pat2mat, 'C6', linewidth=3, label='Female PAT to MAT')
# plt.plot(quantiles, 100 * delta_a_m_pat2mat, 'C7', linewidth=3, label='Male PAT to MAT')
#
# plt.xlabel('Cell population quantile', fontsize=14)
# plt.ylabel('Area change (%)', fontsize=14)
# plt.tick_params(axis='both', which='major', labelsize=14)
# plt.legend(loc='lower right', prop={'size': 12})
# plt.tight_layout()
#
# plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_change_pat_2_mat.svg'))
# plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_change_pat_2_mat.png'))
########################################################################################################################
## Linear models of body weight (BW), fat depots weight (SC and gWAT), and categorical variables (sex, ko, genotype)
########################################################################################################################
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
import statsmodels.api as sm
import re
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
# load metainfo file
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
# add litter and mother ID columns to the data
for i, id in enumerate(metainfo['id']):
litter = re.sub('[a-zA-Z]', '', id)
metainfo.loc[i, 'litter'] = litter
metainfo.loc[i, 'mother_id'] = litter.split('.')[0]
# add rough mask area column to the data
depot = 'sqwat'
filename_rough_mask_area = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_area_' + depot + '.npz')
aux = np.load(filename_rough_mask_area)
id_all = aux['id_all']
rough_mask_area_all = aux['rough_mask_area_all']
for id, rough_mask_area in zip(id_all, rough_mask_area_all):
metainfo.loc[metainfo['id'] == id, 'SC_rough_mask_area'] = rough_mask_area
# add rough mask area column to the data
depot = 'gwat'
filename_rough_mask_area = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_area_' + depot + '.npz')
aux = np.load(filename_rough_mask_area)
aux = np.load(filename_rough_mask_area)
id_all = aux['id_all']
rough_mask_area_all = aux['rough_mask_area_all']
for id, rough_mask_area in zip(id_all, rough_mask_area_all):
metainfo.loc[metainfo['id'] == id, 'gWAT_rough_mask_area'] = rough_mask_area
########################################################################################################################
### Plot SC vs. gWAT to look for outliers
########################################################################################################################
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
if DEBUG:
idx = idx_not_nan
plt.clf()
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='All mice')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
if DEBUG:
plt.clf()
plt.subplot(221)
idx = np.where((metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT'))[0]
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='f PAT')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
plt.subplot(222)
idx = np.where((metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT'))[0]
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='f MAT')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
plt.subplot(223)
idx = np.where((metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT'))[0]
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='m PAT')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
plt.subplot(224)
idx = np.where((metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT'))[0]
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='m MAT')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
# 64 and 65 are outliers.
########################################################################################################################
### Plot SC and gWAT vs. corresponding rough mask areas
########################################################################################################################
if DEBUG:
plt.clf()
plt.subplot(121)
plt.scatter(np.power(metainfo['SC_rough_mask_area'] * 1e6, 3/2), metainfo['SC'])
# for i in range(metainfo.shape[0]):
# plt.annotate(metainfo['id'][i], (np.power(metainfo['SC_rough_mask_area'][i] * 1e6, 3/2), metainfo['SC'][i]))
plt.xlabel('Subcutaneous vol from slice area mm$^3$', fontsize=14)
plt.ylabel('m$_{SC}$ (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.subplot(122)
plt.scatter(np.power(metainfo['gWAT_rough_mask_area'] * 1e6, 3/2), metainfo['gWAT'])
# for i in range(metainfo.shape[0]):
# plt.annotate(metainfo['id'][i], (metainfo['gWAT_rough_mask_area'][i] * 1e6, metainfo['gWAT'][i]))
plt.xlabel('Gonadal slice area mm$^2$', fontsize=14)
plt.ylabel('m$_{G}$ (g)', fontsize=14)
plt.tick_params(labelsize=14)
########################################################################################################################
# Explore mice dataset in terms of weights alive and culled
########################################################################################################################
# plot weight when alive vs. weight after death
idx = np.where(~np.isnan(metainfo['BW_alive']) * ~np.isnan(metainfo['BW']))[0]
plt.clf()
plt.scatter(metainfo['BW_alive'], metainfo['BW'])
plt.plot([20, 50], [20, 50])
for i in idx:
plt.annotate(i, (metainfo['BW_alive'][i], metainfo['BW'][i]))
# plot weight evolution over time
plt.clf()
for i in idx:
plt.plot([metainfo['BW_alive_date'][i], metainfo['cull_age'][i]], [metainfo['BW_alive'][i], metainfo['BW'][i]])
# plot SC vs. BW (females)
if DEBUG:
plt.clf()
plt.subplot(121)
for litter in np.unique(metainfo['litter']):
# index of animals from this litter
idx = np.where((metainfo['litter'] == litter) * (metainfo['sex'] == 'f'))[0]
if (len(idx) > 1):
# order in order of increasing SC
idx_sort = np.argsort(metainfo['SC'][idx])
if np.array(metainfo['ko_parent'][idx])[0] == 'PAT':
color = 'k'
elif np.array(metainfo['ko_parent'][idx])[0] == 'MAT':
color = 'r'
plt.scatter(np.array(metainfo.loc[idx, 'SC'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
plt.plot(np.array(metainfo.loc[idx, 'SC'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
# if litter in ['19.1', '19.2']:
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['BW'][i]))
plt.title('Females')
plt.xlabel('$m_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
# plot SC vs. BW (males)
plt.subplot(122)
for litter in np.unique(metainfo['litter']):
# index of animals from this litter
idx = np.where((metainfo['litter'] == litter) * (metainfo['sex'] == 'm'))[0]
if (len(idx) > 1):
# order in order of increasing SC
idx_sort = np.argsort(metainfo['SC'][idx])
if np.array(metainfo['ko_parent'][idx])[0] == 'PAT':
color = 'k'
elif np.array(metainfo['ko_parent'][idx])[0] == 'MAT':
color = 'r'
plt.scatter(np.array(metainfo.loc[idx, 'SC'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
plt.plot(np.array(metainfo.loc[idx, 'SC'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
# if litter in ['19.1', '19.2']:
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['BW'][i]))
plt.title('Males')
plt.xlabel('$m_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
# plot gWAT vs. BW (female)
plt.clf()
for litter in np.unique(metainfo['litter']):
# index of animals from this litter
idx = np.where((metainfo['litter'] == litter) * (metainfo['sex'] == 'f'))[0]
if len(idx) > 1:
# order in order of increasing gWAT
idx_sort = np.argsort(metainfo['gWAT'][idx])
if np.array(metainfo['ko_parent'][idx])[0] == 'PAT':
color = 'k'
elif np.array(metainfo['ko_parent'][idx])[0] == 'MAT':
color = 'r'
plt.scatter(np.array(metainfo.loc[idx, 'gWAT'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
plt.plot(np.array(metainfo.loc[idx, 'gWAT'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
# if (litter == '19.2'):
for i in idx:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
plt.xlabel('$m_{G}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
########################################################################################################################
### Model BW ~ (C(sex) + C(ko_parent) + C(genotype)) * (SC * gWAT)
### WARNING: This model is an example of having too many variables
########################################################################################################################
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
model = sm.formula.ols('BW ~ (C(sex) + C(ko_parent) + C(genotype)) * (SC * gWAT)', data=metainfo, subset=idx_not_nan).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.816
# Model: OLS Adj. R-squared: 0.769
# Method: Least Squares F-statistic: 17.69
# Date: Mon, 24 Feb 2020 Prob (F-statistic): 1.19e-16
# Time: 15:32:07 Log-Likelihood: -197.81
# No. Observations: 76 AIC: 427.6
# Df Residuals: 60 BIC: 464.9
# Df Model: 15
# Covariance Type: nonrobust
# =======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# -------------------------------------------------------------------------------------------------------
# Intercept 20.8129 3.915 5.317 0.000 12.982 28.644
# C(sex)[T.m] 8.2952 5.920 1.401 0.166 -3.547 20.138
# C(ko_parent)[T.MAT] -3.1038 4.417 -0.703 0.485 -11.938 5.731
# C(genotype)[T.KLF14-KO:Het] 0.4508 4.154 0.109 0.914 -7.858 8.759
# SC -1.7885 8.152 -0.219 0.827 -18.096 14.519
# C(sex)[T.m]:SC 14.4534 9.416 1.535 0.130 -4.382 33.289
# C(ko_parent)[T.MAT]:SC 13.9682 12.136 1.151 0.254 -10.307 38.244
# C(genotype)[T.KLF14-KO:Het]:SC -4.4397 8.417 -0.527 0.600 -21.277 12.397
# gWAT 4.6153 4.514 1.022 0.311 -4.414 13.644
# C(sex)[T.m]:gWAT -1.1889 5.786 -0.205 0.838 -12.763 10.385
# C(ko_parent)[T.MAT]:gWAT 11.1354 4.622 2.409 0.019 1.890 20.380
# C(genotype)[T.KLF14-KO:Het]:gWAT -0.6653 4.608 -0.144 0.886 -9.883 8.553
# SC:gWAT 5.7281 7.709 0.743 0.460 -9.692 21.148
# C(sex)[T.m]:SC:gWAT -8.8374 8.256 -1.070 0.289 -25.352 7.678
# C(ko_parent)[T.MAT]:SC:gWAT -20.5613 9.163 -2.244 0.029 -38.889 -2.234
# C(genotype)[T.KLF14-KO:Het]:SC:gWAT 1.2538 7.420 0.169 0.866 -13.589 16.096
# ==============================================================================
# Omnibus: 0.893 Durbin-Watson: 1.584
# Prob(Omnibus): 0.640 Jarque-Bera (JB): 0.419
# Skew: 0.138 Prob(JB): 0.811
# Kurtosis: 3.236 Cond. No. 103.
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
# partial regression and influence plots
if DEBUG:
sm.graphics.plot_partregress_grid(model)
sm.graphics.influence_plot(model, criterion="cooks")
# list of point with high influence (large residuals and leverage)
idx_influence = [35, 36, 37, 64, 65]
idx_no_influence = list(set(range(metainfo.shape[0])) - set(idx_influence))
print(metainfo.loc[idx_influence, ['id', 'ko_parent', 'sex', 'genotype', 'BW', 'SC', 'gWAT']])
model = sm.formula.ols('BW ~ (C(sex) + C(ko_parent) + C(genotype)) * (SC * gWAT)', data=metainfo, subset=idx_no_influence).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.817
# Model: OLS Adj. R-squared: 0.768
# Method: Least Squares F-statistic: 16.41
# Date: Wed, 26 Feb 2020 Prob (F-statistic): 3.87e-15
# Time: 14:08:33 Log-Likelihood: -183.06
# No. Observations: 71 AIC: 398.1
# Df Residuals: 55 BIC: 434.3
# Df Model: 15
# Covariance Type: nonrobust
# =======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# -------------------------------------------------------------------------------------------------------
# Intercept 22.4929 5.698 3.948 0.000 11.074 33.912
# C(sex)[T.m] 7.2408 6.769 1.070 0.289 -6.324 20.806
# C(ko_parent)[T.MAT] -2.6859 4.448 -0.604 0.548 -11.600 6.229
# C(genotype)[T.KLF14-KO:Het] -0.0971 4.440 -0.022 0.983 -8.996 8.802
# SC -9.2532 23.095 -0.401 0.690 -55.537 37.031
# C(sex)[T.m]:SC 21.7051 21.391 1.015 0.315 -21.164 64.574
# C(ko_parent)[T.MAT]:SC 10.9030 13.041 0.836 0.407 -15.231 37.037
# C(genotype)[T.KLF14-KO:Het]:SC -2.7711 11.164 -0.248 0.805 -25.145 19.603
# gWAT 2.5214 5.410 0.466 0.643 -8.321 13.364
# C(sex)[T.m]:gWAT 0.0320 6.181 0.005 0.996 -12.356 12.420
# C(ko_parent)[T.MAT]:gWAT 11.2072 4.596 2.439 0.018 1.997 20.417
# C(genotype)[T.KLF14-KO:Het]:gWAT -0.3474 4.714 -0.074 0.942 -9.795 9.100
# SC:gWAT 13.9732 19.484 0.717 0.476 -25.074 53.020
# C(sex)[T.m]:SC:gWAT -16.5886 17.869 -0.928 0.357 -52.398 19.221
# C(ko_parent)[T.MAT]:SC:gWAT -18.1201 9.764 -1.856 0.069 -37.687 1.447
# C(genotype)[T.KLF14-KO:Het]:SC:gWAT -0.2622 9.087 -0.029 0.977 -18.472 17.948
# ==============================================================================
# Omnibus: 1.715 Durbin-Watson: 1.455
# Prob(Omnibus): 0.424 Jarque-Bera (JB): 1.060
# Skew: 0.245 Prob(JB): 0.589
# Kurtosis: 3.342 Cond. No. 229.
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
########################################################################################################################
### Model BW ~ C(sex) + C(sex):gWAT + C(ko_parent) + gWAT
### Deprecated model by the next one. This one is too simple, and forces the slopes of the lines to be parallel
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
# fit linear model to data
model = sm.formula.ols('BW ~ C(sex) + C(sex):gWAT + C(ko_parent) + gWAT', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.740
# Model: OLS Adj. R-squared: 0.726
# Method: Least Squares F-statistic: 50.60
# Date: Tue, 25 Feb 2020 Prob (F-statistic): 4.45e-20
# Time: 15:24:38 Log-Likelihood: -210.82
# No. Observations: 76 AIC: 431.6
# Df Residuals: 71 BIC: 443.3
# Df Model: 4
# Covariance Type: nonrobust
# =======================================================================================
# coef std err t P>|t| [0.025 0.975]
# ---------------------------------------------------------------------------------------
# Intercept 19.4126 1.598 12.144 0.000 16.225 22.600
# C(sex)[T.m] 17.6887 2.981 5.934 0.000 11.745 23.633
# C(ko_parent)[T.MAT] 2.7743 0.927 2.991 0.004 0.925 4.624
# gWAT 8.0439 1.721 4.674 0.000 4.612 11.476
# C(sex)[T.m]:gWAT -7.2480 2.845 -2.548 0.013 -12.921 -1.575
# ==============================================================================
# Omnibus: 4.048 Durbin-Watson: 1.411
# Prob(Omnibus): 0.132 Jarque-Bera (JB): 3.306
# Skew: 0.378 Prob(JB): 0.191
# Kurtosis: 3.687 Cond. No. 16.2
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
# helper function to plot the different lines created by the model
def model_line(model, sex, ko_parent, gWAT):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] +\
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['gWAT'] * gWAT + \
model.params['C(sex)[T.m]:gWAT'] * sex * gWAT
# plot BW as a function of gWAT
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_G$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mG_simpler.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mG_simpler.svg'), bbox_inches='tight')
########################################################################################################################
### Model BW ~ C(sex) * C(ko_parent) * gWAT
### VALID model
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
# Mixed-effects linear model
aux = metainfo.loc[idx_subset, :]
formula = 'BW ~ C(sex) * C(ko_parent) * gWAT'
vc = {'litter': '0 + C(litter)'} # litter is a random effected nested inside mother_id
model = sm.formula.mixedlm(formula, vc_formula=vc, re_formula='1', groups='mother_id', data=aux).fit()
print(model.summary())
# fit linear model to data
formula = 'BW ~ C(sex) * C(ko_parent) * gWAT'
model = sm.formula.ols(formula, data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.773
# Model: OLS Adj. R-squared: 0.749
# Method: Least Squares F-statistic: 33.01
# Date: Thu, 27 Feb 2020 Prob (F-statistic): 1.65e-19
# Time: 10:14:01 Log-Likelihood: -205.77
# No. Observations: 76 AIC: 427.5
# Df Residuals: 68 BIC: 446.2
# Df Model: 7
# Covariance Type: nonrobust
# ========================================================================================================
# coef std err t P>|t| [0.025 0.975]
# --------------------------------------------------------------------------------------------------------
# Intercept 19.8526 1.975 10.053 0.000 15.912 23.793
# C(sex)[T.m] 12.2959 3.705 3.318 0.001 4.902 19.690
# C(ko_parent)[T.MAT] 1.8066 2.984 0.605 0.547 -4.148 7.762
# C(sex)[T.m]:C(ko_parent)[T.MAT] 12.8513 5.808 2.213 0.030 1.261 24.441
# gWAT 6.7074 2.246 2.986 0.004 2.225 11.189
# C(sex)[T.m]:gWAT -0.5933 3.647 -0.163 0.871 -7.870 6.684
# C(ko_parent)[T.MAT]:gWAT 2.5961 3.308 0.785 0.435 -4.005 9.197
# C(sex)[T.m]:C(ko_parent)[T.MAT]:gWAT -14.5795 5.526 -2.638 0.010 -25.607 -3.552
# ==============================================================================
# Omnibus: 4.620 Durbin-Watson: 1.636
# Prob(Omnibus): 0.099 Jarque-Bera (JB): 4.493
# Skew: 0.308 Prob(JB): 0.106
# Kurtosis: 4.019 Cond. No. 40.4
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
# helper function to plot the different lines created by the model
def model_line(model, sex, ko_parent, gWAT):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] +\
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]'] * sex * ko_parent + \
model.params['gWAT'] * gWAT + \
model.params['C(sex)[T.m]:gWAT'] * sex * gWAT + \
model.params['C(ko_parent)[T.MAT]:gWAT'] * ko_parent * gWAT + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]:gWAT'] * sex * ko_parent * gWAT
# plot BW as a function of gWAT
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_G$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mG.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mG.svg'), bbox_inches='tight')
########################################################################################################################
### Model BW ~ C(sex) * C(ko_parent) * gWAT
### Add nested random effects: mother_id -> litter -> mouse
# https://www.statsmodels.org/dev/generated/statsmodels.formula.api.mixedlm.html?highlight=mixedlm#statsmodels.formula.api.mixedlm
### EXPERIMENTAL model
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
# Mixed-effects linear model
aux = metainfo.loc[idx_subset, :]
formula = 'BW ~ C(sex) * C(ko_parent) * gWAT'
vc = {'litter': '0 + C(litter)'} # litter is a random effected nested inside mother_id
model = sm.formula.mixedlm(formula, vc_formula=vc, re_formula='1', groups='mother_id', data=aux).fit()
print(model.summary())
# /home/rcasero/.conda/envs/cytometer_tensorflow/lib/python3.6/site-packages/statsmodels/base/model.py:1286: RuntimeWarning: invalid value encountered in sqrt
# bse_ = np.sqrt(np.diag(self.cov_params()))
# Mixed Linear Model Regression Results
# =================================================================================
# Model: MixedLM Dependent Variable: BW
# No. Observations: 76 Method: REML
# No. Groups: 8 Scale: 9.5401
# Min. group size: 1 Likelihood: -189.5050
# Max. group size: 20 Converged: Yes
# Mean group size: 9.5
# ---------------------------------------------------------------------------------
# Coef. Std.Err. z P>|z| [0.025 0.975]
# ---------------------------------------------------------------------------------
# Intercept 22.692 2.227 10.191 0.000 18.328 27.056
# C(sex)[T.m] 9.604 3.253 2.953 0.003 3.229 15.980
# C(ko_parent)[T.MAT] -2.804 3.152 -0.889 0.374 -8.982 3.375
# C(sex)[T.m]:C(ko_parent)[T.MAT] 7.670 6.151 1.247 0.212 -4.386 19.727
# gWAT 4.437 2.195 2.022 0.043 0.136 8.739
# C(sex)[T.m]:gWAT 1.656 3.157 0.524 0.600 -4.533 7.844
# C(ko_parent)[T.MAT]:gWAT 6.823 3.262 2.092 0.036 0.430 13.216
# C(sex)[T.m]:C(ko_parent)[T.MAT]:gWAT -10.814 5.674 -1.906 0.057 -21.935 0.308
# mother_id Var 0.000
# litter Var 7.659
# =================================================================================
########################################################################################################################
### BW ~ C(sex) * C(ko_parent) * SC
### VALID model, but bad fit because of 5 outliers (we remove them in the next model)
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# SC outliers
idx_outliers = []
# data that we are going to use
idx_subset = list(set(idx_not_nan) - set(idx_outliers))
# fit linear model to data
model = sm.formula.ols('BW ~ C(sex) * C(ko_parent) * SC', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.706
# Model: OLS Adj. R-squared: 0.676
# Method: Least Squares F-statistic: 23.32
# Date: Thu, 27 Feb 2020 Prob (F-statistic): 8.42e-16
# Time: 14:56:57 Log-Likelihood: -215.55
# No. Observations: 76 AIC: 447.1
# Df Residuals: 68 BIC: 465.7
# Df Model: 7
# Covariance Type: nonrobust
# ======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# ------------------------------------------------------------------------------------------------------
# Intercept 23.4759 1.721 13.637 0.000 20.041 26.911
# C(sex)[T.m] 10.4744 3.158 3.317 0.001 4.173 16.776
# C(ko_parent)[T.MAT] 4.6399 2.384 1.946 0.056 -0.117 9.397
# C(sex)[T.m]:C(ko_parent)[T.MAT] 4.6208 4.142 1.116 0.269 -3.645 12.887
# SC 2.9539 2.518 1.173 0.245 -2.071 7.979
# C(sex)[T.m]:SC 3.0082 4.049 0.743 0.460 -5.072 11.088
# C(ko_parent)[T.MAT]:SC 1.0103 4.400 0.230 0.819 -7.770 9.790
# C(sex)[T.m]:C(ko_parent)[T.MAT]:SC -12.2497 6.355 -1.928 0.058 -24.931 0.432
# ==============================================================================
# Omnibus: 3.016 Durbin-Watson: 1.576
# Prob(Omnibus): 0.221 Jarque-Bera (JB): 2.498
# Skew: 0.440 Prob(JB): 0.287
# Kurtosis: 3.118 Cond. No. 27.7
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
def model_line(model, sex, ko_parent, SC):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] + \
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]'] * sex * ko_parent + \
model.params['SC'] * SC + \
model.params['C(sex)[T.m]:SC'] * sex * SC + \
model.params['C(ko_parent)[T.MAT]:SC'] * ko_parent * SC + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]:SC'] * sex * ko_parent * SC
# plot BW as a function of SC
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mSC.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mSC.svg'), bbox_inches='tight')
########################################################################################################################
### BW ~ C(sex) * C(ko_parent) * SC
### VALID model, 5 outliers removed
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# SC outliers
idx_outliers = [35, 36, 37, 64, 65]
# data that we are going to use
idx_subset = list(set(idx_not_nan) - set(idx_outliers))
# fit linear model to data
model = sm.formula.ols('BW ~ C(sex) * C(ko_parent) * SC', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.754
# Model: OLS Adj. R-squared: 0.727
# Method: Least Squares F-statistic: 27.62
# Date: Thu, 27 Feb 2020 Prob (F-statistic): 6.14e-17
# Time: 15:14:50 Log-Likelihood: -193.59
# No. Observations: 71 AIC: 403.2
# Df Residuals: 63 BIC: 421.3
# Df Model: 7
# Covariance Type: nonrobust
# ======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# ------------------------------------------------------------------------------------------------------
# Intercept 21.6675 1.715 12.634 0.000 18.240 25.095
# C(sex)[T.m] 12.2828 2.936 4.184 0.000 6.416 18.150
# C(ko_parent)[T.MAT] 0.2006 3.291 0.061 0.952 -6.375 6.777
# C(sex)[T.m]:C(ko_parent)[T.MAT] 9.0601 4.486 2.020 0.048 0.095 18.025
# SC 8.4782 2.989 2.837 0.006 2.506 14.450
# C(sex)[T.m]:SC -2.5161 4.132 -0.609 0.545 -10.774 5.742
# C(ko_parent)[T.MAT]:SC 20.4707 10.549 1.941 0.057 -0.609 41.550
# C(sex)[T.m]:C(ko_parent)[T.MAT]:SC -31.7102 11.327 -2.799 0.007 -54.346 -9.074
# ==============================================================================
# Omnibus: 5.918 Durbin-Watson: 1.687
# Prob(Omnibus): 0.052 Jarque-Bera (JB): 5.075
# Skew: 0.575 Prob(JB): 0.0790
# Kurtosis: 3.626 Cond. No. 53.2
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
# partial regression and influence plots
if DEBUG:
sm.graphics.plot_partregress_grid(model)
sm.graphics.influence_plot(model, criterion="cooks")
def model_line(model, sex, ko_parent, SC):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] + \
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]'] * sex * ko_parent + \
model.params['SC'] * SC + \
model.params['C(sex)[T.m]:SC'] * sex * SC + \
model.params['C(ko_parent)[T.MAT]:SC'] * ko_parent * SC + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]:SC'] * sex * ko_parent * SC
# plot BW as a function of SC
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]),
|
np.max(metainfo['SC'][idx])
|
numpy.max
|
import unittest
import numpy as np
from component.implementation.FeedForwardNet import FeedForwardNet
class TestFeedForwardNet(unittest.TestCase):
dataset_dir = '/home/mendozah/workspace/datasets'
X_train =
|
np.load(dataset_dir + 'train.npy')
|
numpy.load
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
File: img2txt_reader.py
Author: liwei(<EMAIL>)
Date: 2021-06-21 19:27
Desc: image file reader and normalization
"""
import cv2
import numpy as np
import random
import paddle
from PIL import Image, ImageFile, UnidentifiedImageError
from multiprocessing.pool import ThreadPool
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ImageReader(object):
def __init__(self,
target_shape_h=224,
target_shape_w=224,
data_format="channels_last",
dtype='float32',
mean_value=None,
std_value=None,
with_random_crop=False,
with_random_flip=False):
if std_value is None:
std_value = [0.5, 0.5, 0.5]
if mean_value is None:
mean_value = [0.5, 0.5, 0.5]
if target_shape_h and target_shape_w:
self.target_size = (target_shape_h, target_shape_w)
else:
self.target_size = None
self.data_format = data_format
self.dtype = dtype
assert data_format in {'channels_first', 'channels_last'}
self.with_random_crop = with_random_crop
self.with_random_clip = with_random_flip
self.mean = np.array(mean_value).astype(self.dtype)
self.std = np.array(std_value).astype(self.dtype)
def _load_img(self, path):
'''
:param path: img path
:return: PIL image, (w, h)
'''
try:
img = Image.open(path) # (w, h)
except UnidentifiedImageError:
print("UnidentifiedImageError: ", path)
return None
except:
print("Image.open Fail: ", path)
img = cv2.imread(path)
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
if img.mode != 'RGB':
img = img.convert('RGB')
if self.with_random_crop:
img = self.random_crop(img, self.target_size)
if self.with_random_clip:
img = self.random_flip(img)
if self.target_size:
if img.size != self.target_size:
img = img.resize(self.target_size)
return img
def _img_to_array(self, img):
'''
:param img: PIL image, (w, h)
:return: numpy array, (c, h, w) or (h, w, c)
'''
x =
|
np.asarray(img, dtype=self.dtype)
|
numpy.asarray
|
import numpy as np
def faithfulness_metric(model, x, coefs, base):
""" This metric evaluates the correlation between the importance assigned by the interpretability algorithm
to attributes and the effect of each of the attributes on the performance of the predictive model.
The higher the importance, the higher should be the effect, and vice versa, The metric evaluates this by
incrementally removing each of the attributes deemed important by the interpretability metric, and
evaluating the effect on the performance, and then calculating the correlation between the weights (importance)
of the attributes and corresponding model performance. [#]_
References:
.. [#] `<NAME> and <NAME>. Towards robust interpretability with self-explaining
neural networks. In <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, editors,
Advances in Neural Information Processing Systems 31, pages 7775-7784. 2018.
<https://papers.nips.cc/paper/8003-towards-robust-interpretability-with-self-explaining-neural-networks.pdf>`_
Args:
model: Trained classifier, such as a ScikitClassifier that implements
a predict() and a predict_proba() methods.
x (numpy.ndarray): row of data.
coefs (numpy.ndarray): coefficients (weights) corresponding to attribute importance.
base ((numpy.ndarray): base (default) values of attributes
Returns:
float: correlation between attribute importance weights and corresponding effect on classifier.
"""
#find predicted class
pred_class = model.predict(x.reshape(1,-1))[0]
#find indexs of coefficients in decreasing order of value
ar = np.argsort(-coefs) #argsort returns indexes of values sorted in increasing order; so do it for negated array
pred_probs = np.zeros(x.shape[0])
for ind in
|
np.nditer(ar)
|
numpy.nditer
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
try:
from numba import jit
except ImportError:
from pandapower.pf.no_numba import jit
from scipy.stats import chi2
from pandapower.estimation.algorithm.matrix_base import BaseAlgebra
from pandapower.estimation.ppc_conversion import ExtendedPPCI
def get_estimator(base_class, estimator_name):
assert base_class in (BaseEstimatorIRWLS, BaseEstimatorOpt)
available_estimators = {estm_cls.__name__.split("Estimator")[0].lower(): estm_cls
for estm_cls in base_class.__subclasses__()}
if estimator_name.lower() not in available_estimators:
raise Exception("Estimator not available! Try another one!")
else:
return available_estimators[estimator_name.lower()]
class BaseEstimatorIRWLS(BaseAlgebra):
def __init__(self, eppci: ExtendedPPCI, **hyperparameters):
# Initilize BaseAlgebra object for calculation of relevant matrix
super(BaseEstimatorIRWLS, self).__init__(eppci)
def create_phi(self, E): # pragma: no cover
# Must be implemented for subclasses!
pass
class BaseEstimatorOpt(BaseAlgebra):
def __init__(self, eppci, **hyperparameters):
super(BaseEstimatorOpt, self).__init__(eppci)
# Hyperparameters for estimator should be added here
def cost_function(self, E): # pragma: no cover
# Minimize sum(cost(r))
# r = cost(z - h(x))
# Must be implemented according to the estimator for the optimization
pass
def create_cost_jacobian(self, E): # pragma: no cover
pass
class WLSEstimator(BaseEstimatorOpt, BaseEstimatorIRWLS):
def __init__(self, eppci, **hyperparameters):
super(WLSEstimator, self).__init__(eppci, **hyperparameters)
def cost_function(self, E):
rx = self.create_rx(E)
cost = np.sum((1/self.sigma**2) * (rx**2))
return cost
def create_cost_jacobian(self, E):
# dr/dE = drho / dr * d(z-hx) / dE
# dr/dE = (drho/dr) * - (d(hx)/dE)
# 2 * rx * (1/sigma**2)* -(dhx/dE)
rx = self.create_rx(E)
hx_jac = self.create_hx_jacobian(E)
drho_dr = 2 * (rx * (1/self.sigma**2))
jac = - np.sum(drho_dr.reshape((-1, 1)) * hx_jac, axis=0)
return jac
def create_phi(self, E):
# Standard WLS does not update this matrix
return np.diagflat(1/self.sigma**2)
class SHGMEstimatorIRWLS(BaseEstimatorIRWLS):
def __init__(self, eppci: ExtendedPPCI, **hyperparameters):
super(SHGMEstimatorIRWLS, self).__init__(eppci, **hyperparameters)
assert 'a' in hyperparameters
self.a = hyperparameters.get('a')
def create_phi(self, E):
r = self.create_rx(E)
chi2_res, w = self.weight(E)
rsi = r / (w * self.sigma)
phi = 1/(self.sigma**2)
condition_mask = np.abs(rsi)>self.a
phi[condition_mask] = (1/(self.sigma**2) * np.abs(self.a / rsi))[condition_mask]
return np.diagflat(phi)
def weight(self, E):
H = self.create_hx_jacobian(E)
v = np.sum(H != 0, axis=1)
chi2_res = chi2.ppf(0.975, v)
ps = self._ps(H)
return chi2_res, np.min(np.c_[(chi2_res/ps)**2, np.ones(ps.shape)], axis=1)
def _ps(self, H):
omega = np.dot(H, H.T)
x = np.zeros(omega.shape[0]-1)
y = np.zeros(omega.shape[0])
sm =
|
np.zeros(omega.shape[0])
|
numpy.zeros
|
import numpy
from sympy import Rational as frac
from sympy import sqrt
from ..helpers import article, rd, untangle
from ._helpers import NSimplexScheme
citation = article(
authors=["<NAME>"],
title="Some approximate integration formulas of degree 3 for an n-dimensional simplex",
journal="Numerische Mathematik",
month="nov",
year="1966",
volume="9",
number="1",
pages="38–45",
url="https://doi.org/10.1007/BF02165227",
)
def stroud_1966_1(n):
degree = 3
sqrt4n13 = sqrt(4 * n + 13)
r = (2 * n + 5 - sqrt4n13) / 2 / (n + 1) / (n + 3)
s = 1 - n * r
B = (1 - sqrt4n13) / 2 / (n + 1) / (n + 2) / (n + 3)
C = (2 * n ** 2 + 10 * n + 11 + sqrt4n13) / 2 / (n + 1) / (n + 2) / (n + 3)
data = [(B, rd(n + 1, [(1, 1)])), (C, rd(n + 1, [(r, n), (s, 1)]))]
points, weights = untangle(data)
return NSimplexScheme("Stroud 1966-I", n, weights, points, degree, citation)
def stroud_1966_2(n):
degree = 3
# r is a smallest real-valued root of a polynomial of degree 3
rts = numpy.roots(
[2 * (n - 2) * (n + 1) * (n + 3), -(5 * n ** 2 + 5 * n - 18), 4 * n, -1]
)
r = numpy.min([r.real for r in rts if abs(r.imag) < 1.0e-15])
s = 1 - n * r
t = 0.5
B = (n - 2) / (1 - 2 * n * r ** 2 - 2 * (1 - n * r) ** 2) / (n + 1) / (n + 2)
C = 2 * (1 / (n + 1) - B) / n
data = [(B, rd(n + 1, [(r, n), (s, 1)])), (C, rd(n + 1, [(t, 2)]))]
points, weights = untangle(data)
return NSimplexScheme("Stroud 1966-II", n, weights, points, degree, citation)
def stroud_1966_3(n):
degree = 3
assert n > 2
r = frac(1, 2)
s = frac(1, n)
prod = (n + 1) * (n + 2) * (n + 3)
B = frac(6 - n, prod)
C = frac(8 * (n - 3), (n - 2) * prod)
D = frac(n ** 3, (n - 2) * prod)
data = [
(B, rd(n + 1, [(1, 1)])),
(C, rd(n + 1, [(r, 2)])),
(D, rd(n + 1, [(s, n)])),
]
points, weights = untangle(data)
return NSimplexScheme("Stroud 1966-III", n, weights, points, degree, citation)
def stroud_1966_4(n):
degree = 3
assert n >= 3
r = frac(1, n + 1)
s = frac(1, 3)
A = frac((n + 1) ** 2 * (n - 3), (n - 2) * (n + 2) * (n + 3))
B = frac((9 - n), 2 * (n + 1) * (n + 2) * (n + 3))
C = frac(27, (n - 2) * (n + 1) * (n + 2) * (n + 3))
data = [
(A, numpy.full((1, n + 1), r)),
(B, rd(n + 1, [(1, 1)])),
(C, rd(n + 1, [(s, 3)])),
]
points, weights = untangle(data)
return NSimplexScheme("Stroud 1966-IV", n, weights, points, degree, citation)
def stroud_1966_5(n):
degree = 3
r = frac(1, n)
s = frac(1, 3)
prod = (n + 1) * (n + 2) * (n + 3)
A = frac(-(n ** 2) + 11 * n - 12, 2 * (n - 1) * prod)
B = frac(n ** 3, (n - 1) * prod)
C = frac(27, (n - 1) * prod)
data = [
(A, rd(n + 1, [(1, 1)])),
(B, rd(n + 1, [(r, n)])),
(C, rd(n + 1, [(s, 3)])),
]
points, weights = untangle(data)
return NSimplexScheme("Stroud 1966-I", n, weights, points, degree, citation)
def stroud_1966_6(n):
degree = 3
assert n >= 3
assert n != 5
r = frac(1, n + 1)
s = frac(1, 3)
t = frac(1, n - 2)
prod = (n + 1) * (n + 2) * (n + 3)
A = frac((3 - n) * (n - 12) * (n + 1) ** 2, 3 * (n - 2) * (n + 2) * (n + 3))
B = frac(54 * (3 * n - 11), (n - 5) * (n - 2) * (n - 1) * prod)
C = frac(2 * (n - 2) ** 2 * (n - 9), (n - 5) * (n - 1) * prod)
data = [
(A,
|
numpy.full((1, n + 1), r)
|
numpy.full
|
import logging
import sys
import os
import numpy as np
import pandas as pd
import errno
def create_logger(module_name, level=logging.INFO):
logger = logging.getLogger(module_name)
logger.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging.Formatter('[{}] [%(levelname)s] %(message)s'.format(module_name))
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def save_ess(ess, path):
df = pd.DataFrame(
|
np.reshape(ess, [1, -1])
|
numpy.reshape
|
import numpy as np
import pytest
import snc.environments.job_generators.discrete_review_job_generator \
as drjg
import snc.environments.controlled_random_walk as crw
import snc.environments.state_initialiser as si
import snc.agents.general_heuristics.custom_parameters_priority_agent \
as custom_parameters_priority_agent
def get_default_env_params(state, buffer_processing_matrix=None):
""" Default environment parameters used in the tests. """
num_buffers = state.shape[0]
arrival_rate = np.ones_like(state)
num_resources = num_buffers
constituency_matrix = np.zeros((num_resources, num_resources))
time_interval = 1
if buffer_processing_matrix is None:
# default upper triangular buffer_processing_matrix
buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) * 0.5
return {
"cost_per_buffer": np.zeros_like(state),
"capacity": np.zeros_like(state),
"constituency_matrix": constituency_matrix,
"job_generator": drjg.DeterministicDiscreteReviewJobGenerator(
arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval
),
"state_initialiser": si.DeterministicCRWStateInitialiser(state),
"job_conservation_flag": True,
"list_boundary_constraint_matrices": None,
}
# tests for the initial asserts
def test_assert_no_parameter_is_true():
state = 5 * np.ones((1, 1))
env_params = get_default_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
pytest.raises(AssertionError, custom_parameters_priority_agent.CustomParametersPriorityAgent,
env, state_option=False, cost_option=False, rate_option=False, name="CPPAgent")
def test_assert_activity_performed_by_multiple_resources():
state = 5 * np.ones((2, 1))
env_params = get_default_env_params(state)
env_params["constituency_matrix"] = np.ones((2, 2))
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env = crw.ControlledRandomWalk(**env_params)
pytest.raises(AssertionError, custom_parameters_priority_agent.CustomParametersPriorityAgent,
env, state_option=False, cost_option=False, rate_option=True, name="CPPAgent")
def test_assert_activity_performed_by_no_resource():
state = 5 * np.ones((2, 1))
env_params = get_default_env_params(state)
env_params["constituency_matrix"] = np.zeros((2, 2))
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env = crw.ControlledRandomWalk(**env_params)
pytest.raises(AssertionError,
custom_parameters_priority_agent.CustomParametersPriorityAgent,
env, state_option=False, cost_option=False, rate_option=True, name="CPPAgent")
def test_assert_no_push_model():
state = 5 * np.ones((2, 1))
env_params = get_default_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 0]])]
env = crw.ControlledRandomWalk(**env_params)
pytest.raises(AssertionError, custom_parameters_priority_agent.CustomParametersPriorityAgent,
env, state_option=False, cost_option=False, rate_option=True, name="CPPAgent")
# tests for compute_priority_values with all the possible parameters combinations
def test_compute_priority_values_with_state():
state = np.array([[5], [10]])
env_params = get_default_env_params(state=state)
# constituency_matrix and list_boundary_constraint_matrix are dummy matrices not used in the
# priority_value computation. They are needed to pass the initial asserts (tested above).
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=True,
cost_option=False,
rate_option=False,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[-5, -5], [0, -10]])
assert (priority_value == true_priority_values).all()
def test_compute_priority_values_with_cost():
state = 5 * np.ones((2, 1))
env_params = get_default_env_params(state=state)
# constituency_matrix and list_boundary_constraint_matrix are dummy matrices not used in the
# priority_value computation. They are needed to pass the initial asserts (tested above).
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env_params["cost_per_buffer"] = np.array([[2], [3]])
env = crw.ControlledRandomWalk(**env_params)
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=False,
cost_option=True,
rate_option=False,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[-2, -2], [0, -3]])
assert (priority_value == true_priority_values).all()
def test_compute_priority_values_with_rate():
state = 5 * np.ones((2, 1))
env_params = get_default_env_params(state=state)
# constituency_matrix and list_boundary_constraint_matrix are dummy matrices not used in the
# priority_value computation. They are needed to pass the initial asserts (tested above).
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=False,
cost_option=False,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
assert (priority_value == np.array([[-1/0.5, -1/0.5], [0, -1/0.5]])).all()
def test_compute_priority_values_all_combination():
state = np.array([[5], [10]])
env_params = get_default_env_params(state=state)
# constituency_matrix and list_boundary_constraint_matrix are dummy matrices not used in the
# priority_value computation. They are needed to pass the initial asserts (tested above).
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env_params["cost_per_buffer"] = np.array([[2], [3]])
env = crw.ControlledRandomWalk(**env_params)
# state and cost
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=True,
cost_option=True,
rate_option=False,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[-10, -10], [0, -30]])
assert (priority_value == true_priority_values).all()
# state and rate
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=True,
cost_option=False,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[-5/0.5, -5/0.5], [0, -10/0.5]])
assert (priority_value == true_priority_values).all()
# cost and rate
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=False,
cost_option=True,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[-2/0.5, -2/0.5], [0, -3/0.5]])
assert (priority_value == true_priority_values).all()
# state, cost, and rate
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=True,
cost_option=True,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[-10/0.5, -10/0.5], [0, -30/0.5]])
assert (priority_value == true_priority_values).all()
def test_compute_priority_values_remove_positive_rate():
state = 5 * np.ones((2, 1))
env_params = get_default_env_params(state=state,
buffer_processing_matrix=np.array([[-0.5, 0.5], [0, -0.5]]))
# constituency_matrix and list_boundary_constraint_matrix are dummy matrices not used in the
# priority_value computation. They are needed to pass the initial asserts (tested above).
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=False,
cost_option=False,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
assert (priority_value == np.array([[-1/0.5, 0], [0, -1/0.5]])).all()
# tests for map_state_to_action
def test_no_possible_action():
state = np.array([[0], [0]])
env_params = get_default_env_params(state=state)
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env_params["cost_per_buffer"] = np.array([[2], [3]])
env = crw.ControlledRandomWalk(**env_params)
# state and cost
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=True,
cost_option=True,
rate_option=False,
name="CPPAgent")
action = agent.map_state_to_actions(state)
true_action = np.zeros((2, 1))
assert (action == true_action).all()
def test_all_activities_are_possible():
state = np.array([[5], [10]])
env_params = get_default_env_params(state=state,
buffer_processing_matrix=np.array([[-5, 0], [0, -5]]))
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env_params["cost_per_buffer"] = np.array([[2], [3]])
env = crw.ControlledRandomWalk(**env_params)
# state and cost
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=True,
cost_option=True,
rate_option=False,
name="CPPAgent")
action = agent.map_state_to_actions(state)
true_action = np.ones((2, 1))
assert (action == true_action).all()
def test_multiple_activities_one_resource():
# resource 0 can do activity 0 and activity 2. Activity 0 is the one with the highest value and
# so the corresponding action is selected. resource 1 can do activity 1 however the related
# buffer is empty.
state = np.array([[10], [0]])
env_params = get_default_env_params(state=state,
buffer_processing_matrix=np.array(
[[-2, 0, -1], [0, -1, 0]]))
env_params["constituency_matrix"] = np.array([[1, 0, 1],
[0, 1, 0]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env_params["cost_per_buffer"] = np.array([[2], [3]])
env = crw.ControlledRandomWalk(**env_params)
# state and cost
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=False,
cost_option=False,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[-1/2, 0, -1], [0, -1, 0]])
assert (priority_value == true_priority_values).all()
action = agent.map_state_to_actions(state)
true_action = np.array([[0], [0], [1]])
assert (action == true_action).all()
def test_action_on_multiple_buffers_one_is_empty():
# an activity works on two buffer, the buffer for which the priority value is the lowers in not
# empty but the second one is. Thus, the activity should not be selected to perform the action.
state = np.array([[10], [0], [0]])
env_params = get_default_env_params(state=state,
buffer_processing_matrix=np.array([[-2, 0],
[0, -1],
[-0.5, 0]]))
env_params["constituency_matrix"] = np.array([[1, 0],
[0, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0, 1]]),
np.array([[0, 1, 0]])]
env = crw.ControlledRandomWalk(**env_params)
# state and cost
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=False,
cost_option=False,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[-1/2, 0], [0, -1], [-1/0.5, 0]])
assert (priority_value == true_priority_values).all()
action = agent.map_state_to_actions(state)
true_action = np.array([0, 0])[:, None]
assert (action == true_action).all()
def test_second_lowest_is_chosen():
# a resource (0) has two possible activities (0, 2). The one with the lower priority value (0)
# works on an empty buffer and so cannot be selected. Thus, the second activity (2) has to be
# chosen to perform the action.
state = np.array([[10], [0], [0]])
env_params = get_default_env_params(state=state,
buffer_processing_matrix=np.array([[0, 0, -2],
[0, -1, 0],
[-1, 0, 0]]))
env_params["constituency_matrix"] = np.array([[1, 0, 1],
[0, 1, 0]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0, 1]]),
np.array([[0, 1, 0]])]
env = crw.ControlledRandomWalk(**env_params)
# state and cost
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=False,
cost_option=False,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[0, 0, -1/2], [0, -1, 0], [-1, 0, 0]])
assert (priority_value == true_priority_values).all()
action = agent.map_state_to_actions(state)
true_action = np.array([0, 0, 1])[:, None]
assert (action == true_action).all()
def test_when_if_is_triggered():
# the buffer_processing_matrix has no negative value so the if min_value >= 0 is triggered
state = np.array([[10], [7]])
env_params = get_default_env_params(state=state,
buffer_processing_matrix=np.array([[2, 0, 4],
[0, 3, 0]]))
env_params["constituency_matrix"] = np.array([[1, 0, 1],
[0, 1, 0]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 0]]),
np.array([[0, 1]])]
env = crw.ControlledRandomWalk(**env_params)
# state and cost
agent = custom_parameters_priority_agent.CustomParametersPriorityAgent(env,
state_option=False,
cost_option=False,
rate_option=True,
name="CPPAgent")
priority_value = agent.compute_priority_values(state)
true_priority_values = np.array([[0, 0, 0], [0, 0, 0]])
assert (priority_value == true_priority_values).all()
action = agent.map_state_to_actions(state)
true_action = np.array([0, 0, 0])[:, None]
assert (action == true_action).all()
# tests for the classes without flags
def test_class_state():
state = np.array([[4], [5]])
env_params = get_default_env_params(state=state,
buffer_processing_matrix=np.array([[-2, 0],
[0, -3]]))
env_params["constituency_matrix"] = np.array([[1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1]])]
env_params["cost_per_buffer"] = np.array([[10], [2]])
env = crw.ControlledRandomWalk(**env_params)
agent = custom_parameters_priority_agent.PriorityState(env)
priority_value = agent.compute_priority_values(state)
assert (priority_value == np.array([[-4, 0], [0, -5]])).all()
action = agent.map_state_to_actions(state)
true_action = np.array([0, 1])[:, None]
assert (action == true_action).all()
def test_class_cost():
state = np.array([[4], [5]])
env_params = get_default_env_params(state=state,
buffer_processing_matrix=np.array([[-3, 0],
[0, -2]]))
env_params["constituency_matrix"] =
|
np.array([[1, 1]])
|
numpy.array
|
import pandas as pd
import statsmodels as sm
import numpy as np
from tsfresh.feature_selection.significance_tests import target_real_feature_real_test
from collections import OrderedDict
from sklearn.preprocessing import MinMaxScaler
import tsfresh
from tsfresh.feature_extraction import EfficientFCParameters, MinimalFCParameters
from tsfresh.utilities.dataframe_functions import make_forecasting_frame
from tsfresh import extract_features, select_features
check_date_number = {"second" : 60,
"minute" : 60,
"hour" : 24,
"week" : 53,
"weekday" : 7,
"month" : 12,
"day" : 31}
def get_nanosecond(time_index):
"""The nanoseconds of the datetime"""
return np.array(time_index.nanosecond).reshape(-1,1)
def get_second(time_index):
"""The second of the datetime"""
return np.array(time_index.second).reshape(-1,1)
def get_minute(time_index):
"""The minute of the datetime"""
return np.array(time_index.minute).reshape(-1,1)
def get_hour(time_index):
"""The hour of the datetime"""
return np.array(time_index.hour).reshape(-1,1)
def get_week(time_index):
"""The week of the datetime"""
return np.array(time_index.week).reshape(-1,1)
def get_weekday(time_index):
"""The weekday of the datetime"""
return np.array(time_index.weekday()).reshape(-1,1)
def get_month(time_index):
"""The month of the datetime"""
return np.array(time_index.month).reshape(-1,1)
def get_year(time_index):
"""The year of the datetime"""
return
|
np.array(time_index.year)
|
numpy.array
|
import tempfile
import sys
import os
import subprocess
import shutil
import numpy as np
from PIL import Image
import scipy.sparse
from .hornsmethod import robust_horn
def read_sift_key(fname):
"""
Read a SIFT .key file produced by <NAME>'s SIFT feature detector. This function ignores
the SIFT descriptors and returns only the feature locations.
Given:
fname: a filename to read
Returns:
keys: n_keys-by-2 numpy array of SIFT key locations.
Notes:
Sift keys are in the following coordinate system: (x,y) in [0:width-1, 0:height-1].
"""
# get a working directory
tmpdir = tempfile.mkdtemp()
# start by unzipping the key if necessary
if fname.endswith('.gz'):
fname_orig = fname
fname = os.path.join(tmpdir, os.path.basename(fname_orig).replace('.key.gz', '.key'))
cmd = "gunzip -c " + fname_orig + " > " + fname
subprocess.call(cmd, shell=True)
# it's fastest to read this into python if we've already stripped out the descriptors
fname_stripped = os.path.join(tmpdir, os.path.basename(fname).replace('.key','.stripped.key'))
cmd = r"awk 'NR % 8 == 2' " + fname + r' > ' + fname_stripped
subprocess.call(cmd, shell=True)
# load the data, clean up, and return
data =
|
np.loadtxt(fname_stripped)
|
numpy.loadtxt
|
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from random import random
train_file_path = "Input/train.csv"
test_file_path = "Input/test.csv"
def encode_data(data):
data["Sex_Enc"] = data["Sex"].replace({"male":0,"female":1})
data["Embarked_Enc"] = data["Embarked"].replace({"S":0,"C":1, "Q":2})
def output_submission(test_data, y_test):
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y_test})
output.to_csv("submission.csv", index=False)
print("Your submission was successfully saved!")
def basic_training(output=True):
train_data = pd.read_csv(train_file_path)
test_data = pd.read_csv(test_file_path)
encode_data(train_data)
encode_data(test_data)
features = ["Sex_Enc", "Pclass", "Embarked_Enc"]
Xy = train_data[features + ["Survived"]].dropna()
X = Xy[features]
y = Xy.Survived
clf = RandomForestClassifier()
if output:
clf.fit(X, y)
X_test = test_data[features]
output_submission(test_data, clf.predict(X_test))
else:
print("Test Score:\n", cross_validate(clf, X, y)["test_score"])
def get_CDT(data, quantile_age, quantile_fare):
features = ["Sex_Enc", "Embarked_Enc", "Age_Enc", "Fare_Enc"]
data["Sex_Enc"] = data["Sex"].replace({"male":0,"female":1})
data.Sex_Enc.fillna(data.Sex_Enc.median(), inplace=True)
data["Embarked_Enc"] = data["Embarked"].replace({"S":0,"C":1, "Q":2})
data.Embarked_Enc.fillna(data.Embarked_Enc.median(), inplace=True)
data["Age_Enc"] = data.Age.fillna(data.Age.median())
data["Fare_Enc"] = data.Fare.fillna(data.Fare.median())
data.Age_Enc.mask((quantile_age[.0] <= data.Age_Enc) & (data.Age_Enc <= quantile_age[0.25]), other=0, inplace=True)
data.Age_Enc.mask((quantile_age[0.25] <= data.Age_Enc) & (data.Age_Enc <= quantile_age[0.5]), other=1, inplace=True)
data.Age_Enc.mask((quantile_age[0.5] <= data.Age_Enc) & (data.Age_Enc <= quantile_age[0.75]), other=2, inplace=True)
data.Age_Enc.mask((quantile_age[0.75] <= data.Age_Enc) & (data.Age_Enc <= quantile_age[1.0]), other=3, inplace=True)
data.Fare_Enc.mask((quantile_fare[.0] <= data.Fare_Enc) & (data.Fare_Enc <= quantile_fare[0.25]), other=0, inplace=True)
data.Fare_Enc.mask((quantile_fare[0.25] <= data.Fare_Enc) & (data.Fare_Enc <= quantile_fare[0.5]), other=1, inplace=True)
data.Fare_Enc.mask((quantile_fare[0.5] <= data.Fare_Enc) & (data.Fare_Enc <= quantile_fare[0.75]), other=2, inplace=True)
data.Fare_Enc.mask((quantile_fare[0.75] <= data.Fare_Enc) & (data.Fare_Enc <= quantile_fare[1.0]), other=3, inplace=True)
train_data_Enc = data[features].astype(np.int)
columns = [ "Sex_0", "Sex_1",
"Embarked_0", "Embarked_1", "Embarked_2",
"Age_0", "Age_1", "Age_2", "Age_3",
"Fare_0", "Fare_1", "Fare_2", "Fare_3",]
CDT = pd.DataFrame([
data.Sex_Enc.astype(np.int) == 0,
data.Sex_Enc.astype(np.int) == 1,
data.Embarked_Enc.astype(np.int) == 0,
data.Embarked_Enc.astype(np.int) == 1,
data.Embarked_Enc.astype(np.int) == 2,
data.Age_Enc.astype(np.int) == 0,
data.Age_Enc.astype(np.int) == 1,
data.Age_Enc.astype(np.int) == 2,
data.Age_Enc.astype(np.int) == 3,
data.Fare_Enc.astype(np.int) == 0,
data.Fare_Enc.astype(np.int) == 1,
data.Fare_Enc.astype(np.int) == 2,
data.Fare_Enc.astype(np.int) == 3,
],
index = columns,
dtype=np.int).T.to_numpy()
return CDT
def mca_based_training(output=True):
train_data = pd.read_csv(train_file_path)
test_data = pd.read_csv(test_file_path)
quantile_age = train_data.Age.quantile([.0, 0.25, 0.5, 0.75, 1.])
quantile_fare = train_data.Fare.quantile([.0, 0.25, 0.5, 0.75, 1.])
CDT_train = get_CDT(train_data, quantile_age, quantile_fare)
F = CDT_train / CDT_train.sum()
Fi = F.sum(axis=1).reshape((F.shape[0], 1))
Fj = F.sum(axis=0).reshape((1, F.shape[1]))
# CREATE X, D and M
X = F / (Fi @ Fj) - 1
D = np.eye(F.shape[0]) * Fi
M = np.eye(F.shape[1]) * Fj
# CALCULATE EIGENVALUES & EIGENVECTORS
L, U = np.linalg.eig(X.T @ D @ X @ M)
U = U[:, np.argsort(L)[::-1]]
L =
|
np.sort(L)
|
numpy.sort
|
"""
Perform polarisation analysis of three orthogonal component time series data.
Assumes data are in righthanded fieldaligned coordinate system
with Z pointing the direction of the ambient magnetic field.
The program outputs five spectral results derived from the
fourier transform of the covariance matrix (spectral matrix).
Similar to wavpol.pro in SPEDAS.
Notes:
-----
The program outputs five spectral results derived from the
fourier transform of the covariance matrix (spectral matrix)
These are follows:
1. Wave power: On a linear scale
(units of nT^2/Hz if input Bx, By, Bz are in nT)
2. Degree of Polarisation:
This is similar to a measure of coherency between the input
signals, however unlike coherency it is invariant under
coordinate transformation and can detect pure state waves
which may exist in one channel only.100% indicates a pure
state wave. Less than 70% indicates noise. For more
information see <NAME> and <NAME> 'Some comments
on the description of the polarization states
of waves' Geophys. J. R. Astr. Soc. (1980) v61 115-130
3. Wavenormal Angle:
The angle between the direction of minimum variance
calculated from the complex off diagonal elements of the
spectral matrix and the Z direction of the input ac field data.
for magnetic field data in field aligned coordinates this is the
wavenormal angle assuming a plane wave. See:
<NAME>. (1972), Use of the three-dimensional covariance
matrix in analyzing the polarization properties of plane waves,
J. Geophys. Res., 77(28), 5551-5559, doi:10.1029/JA077i028p05551.
4. Ellipticity:
The ratio (minor axis)/(major axis) of the ellipse transcribed
by the field variations of the components transverse to the
Z direction (Samson and Olson, 1980). The sign indicates
the direction of rotation of the field vector in the plane (cf.
Means, (1972)).
Negative signs refer to left-handed rotation about the Z
direction. In the field aligned coordinate system these signs
refer to plasma waves of left and right handed polarization.
5. Helicity:
Similar to Ellipticity except defined in terms of the
direction of minimum variance instead of Z. Stricltly the Helicity
is defined in terms of the wavenormal direction or k.
However since from single point observations the
sense of k cannot be determined, helicity here is
simply the ratio of the minor to major axis transverse to the
minimum variance direction without sign.
Restrictions:
If one component is an order of magnitude or more greater than
the other two then the polarisation results saturate and erroneously
indicate high degrees of polarisation at all times and
frequencies. Time series should be eyeballed before running the program.
For time series containing very rapid changes or spikes
the usual problems with Fourier analysis arise.
Care should be taken in evaluating degree of polarisation results.
For meaningful results there should be significant wave power at the
frequency where the polarisation approaches
100%. Remembercomparing two straight lines yields 100% polarisation.
"""
import warnings
import numpy as np
from pytplot import get_data, store_data, options
from pyspedas import tnames
def atan2c(zx, zy):
"""Define arctan2 with complex numbers."""
if not np.isfinite(zx) or not np.isfinite(zy):
res = np.nan
if np.isreal(zx) and np.isreal(zy):
res = np.arctan2(zx, zy)
else:
res = -1j * np.log((zx + 1j*zy)/np.sqrt(zx**2 + zy**2))
return res
def wpol_ematspec(i1, i2, i3, i4, aa, nosmbins, matspec):
"""Calculate ematspec array."""
id0 = (i2 - int((nosmbins-1)/2))
id1 = (i2 + int((nosmbins-1)/2)) + 1
res = np.sum(aa[0:nosmbins] * matspec[i1, id0:id1, i3, i4])
return res
def wpol_matsqrd(i1, i2, i3, ematspec):
"""Calculate matsqrd array."""
res = (ematspec[i1, :, i2, 0] * ematspec[i1, :, 0, i3] +
ematspec[i1, :, i2, 1] * ematspec[i1, :, 1, i3] +
ematspec[i1, :, i2, 2] * ematspec[i1, :, 2, i3])
return res
def wpol_helicity(nosteps, nopfft, KK, ematspec, waveangle):
"""Calculate helicity, ellipticity."""
# Avoid warnings.
warnings.simplefilter("ignore", np.ComplexWarning)
# Define arrays.
helicity = np.empty((nosteps, int(nopfft/2), 3))
ellip = np.empty((nosteps, int(nopfft/2), 3))
lam = np.array([0] * 2)
alphax = np.empty((nosteps, int(nopfft/2)))
alphasin2x = np.empty((nosteps, int(nopfft/2)))
alphacos2x = np.empty((nosteps, int(nopfft/2)))
alphasin3x = np.empty((nosteps, int(nopfft/2)))
alphacos3x = np.empty((nosteps, int(nopfft/2)))
alphay = np.empty((nosteps, int(nopfft/2)))
alphasin2y = np.empty((nosteps, int(nopfft/2)))
alphacos2y = np.empty((nosteps, int(nopfft/2)))
alphasin3y = np.empty((nosteps, int(nopfft/2)))
alphacos3y = np.empty((nosteps, int(nopfft/2)))
alphaz = np.empty((nosteps, int(nopfft/2)))
alphasin2z = np.empty((nosteps, int(nopfft/2)))
alphacos2z = np.empty((nosteps, int(nopfft/2)))
alphasin3z = np.empty((nosteps, int(nopfft/2)))
alphacos3z = np.empty((nosteps, int(nopfft/2)))
lambdau = np.empty((nosteps, int(nopfft/2), 3, 3), dtype=complex)
lambday = np.empty((nosteps, int(nopfft/2), 3, 3), dtype=complex)
lambdaurot = np.empty((nosteps, int(nopfft/2), 2), dtype=complex)
alphax[KK, :] = np.sqrt(ematspec[KK, :, 0, 0])
alphacos2x[KK, :] = (np.real(ematspec[KK, :, 0, 1]) /
np.sqrt(ematspec[KK, :, 0, 0]))
alphasin2x[KK, :] = (-np.imag(ematspec[KK, :, 0, 1]) /
np.sqrt(ematspec[KK, :, 0, 0]))
alphacos3x[KK, :] = (np.real(ematspec[KK, :, 0, 2]) /
np.sqrt(ematspec[KK, :, 0, 0]))
alphasin3x[KK, :] = (-np.imag(ematspec[KK, :, 0, 2]) /
np.sqrt(ematspec[KK, :, 0, 0]))
lambdau[KK, :, 0, 0] = alphax[KK, :]
lambdau[KK, :, 0, 1] = (alphacos2x[KK, :] +
1j * alphasin2x[KK, :])
lambdau[KK, :, 0, 2] = (alphacos3x[KK, :] +
1j * alphasin3x[KK, :])
alphay[KK, :] = np.sqrt(ematspec[KK, :, 1, 1])
alphacos2y[KK, :] = (np.real(ematspec[KK, :, 1, 0]) /
np.sqrt(ematspec[KK, :, 1, 1]))
alphasin2y[KK, :] = (-np.imag(ematspec[KK, :, 1, 0]) /
np.sqrt(ematspec[KK, :, 1, 1]))
alphacos3y[KK, :] = (np.real(ematspec[KK, :, 1, 2]) /
np.sqrt(ematspec[KK, :, 1, 1]))
alphasin3y[KK, :] = (-np.imag(ematspec[KK, :, 1, 2]) /
np.sqrt(ematspec[KK, :, 1, 1]))
lambdau[KK, :, 1, 0] = alphay[KK, :]
lambdau[KK, :, 1, 1] = (alphacos2y[KK, :] +
1j * alphasin2y[KK, :])
lambdau[KK, :, 1, 2] = (alphacos3y[KK, :] +
1j * alphasin3y[KK, :])
alphaz[KK, :] = np.sqrt(ematspec[KK, :, 2, 2])
alphacos2z[KK, :] = (np.real(ematspec[KK, :, 2, 0]) /
np.sqrt(ematspec[KK, :, 2, 2]))
alphasin2z[KK, :] = (-np.imag(ematspec[KK, :, 2, 0]) /
np.sqrt(ematspec[KK, :, 2, 2]))
alphacos3z[KK, :] = (np.real(ematspec[KK, :, 2, 1]) /
np.sqrt(ematspec[KK, :, 2, 2]))
alphasin3z[KK, :] = (-np.imag(ematspec[KK, :, 2, 1]) /
np.sqrt(ematspec[KK, :, 2, 2]))
lambdau[KK, :, 2, 0] = alphaz[KK, :]
lambdau[KK, :, 2, 1] = (alphacos2z[KK, :] +
1j * alphasin2z[KK, :])
lambdau[KK, :, 2, 2] = (alphacos3z[KK, :] +
1j * alphasin3z[KK, :])
for k in range(int(nopfft/2)):
for k1 in range(3):
upper = np.sum(2*np.real(lambdau[KK, k, k1, 0:3]) *
np.imag(lambdau[KK, k, k1, 0:3]))
la2 = np.imag(lambdau[KK, k, k1, 0:3])**2
lower = np.sum(np.real(lambdau[KK, k, k1, 0:3])**2 - la2)
gammay = np.nan
if np.isfinite(upper) and np.isfinite(lower):
if upper > 0.0:
gammay = atan2c(upper, lower)
else:
gammay = (2*np.pi + atan2c(upper, lower))
lambday[KK, k, k1, :] = (np.exp((0.0 - 1j*0.5*gammay)) *
lambdau[KK, k, k1, :])
lay2 = np.imag(lambday[KK, k, k1, 0:3])**2
helicity[KK, k, k1] = (1 /
(np.sqrt(np.real(lambday[KK, k, k1, 0])**2 +
np.real(lambday[KK, k, k1, 1])**2 +
np.real(lambday[KK, k, k1, 2])**2) /
np.sqrt(np.sum(lay2))))
uppere = (np.imag(lambday[KK, k, k1, 0]) *
np.real(lambday[KK, k, k1, 0]) +
np.imag(lambday[KK, k, k1, 1]) *
np.real(lambday[KK, k, k1, 1]))
lowere = (-np.imag(lambday[KK, k, k1, 0])**2 +
np.real(lambday[KK, k, k1, 0])**2 -
np.imag(lambday[KK, k, k1, 1])**2 +
np.real(lambday[KK, k, k1, 1])**2)
gammarot = np.nan
if np.isfinite(uppere) and np.isfinite(lowere):
if uppere > 0.0:
gammarot = atan2c(uppere, lowere)
else:
gammarot = 2*np.pi + atan2c(uppere, lowere)
lam = lambday[KK, k, k1, 0:2]
lambdaurot[KK, k, :] = np.exp(0 - 1j*0.5*gammarot) * lam[:]
ellip[KK, k, k1] = (np.sqrt(np.imag(lambdaurot[KK, k, 0])**2 +
np.imag(lambdaurot[KK, k, 1])**2) /
np.sqrt(np.real(lambdaurot[KK, k, 0])**2 +
np.real(lambdaurot[KK, k, 1])**2))
ellip[KK, k, k1] = (-ellip[KK, k, k1] *
(np.imag(ematspec[KK, k, 0, 1]) *
np.sin(waveangle[KK, k])) /
np.abs(np.imag(ematspec[KK, k, 0, 1]) *
np.sin(waveangle[KK, k])))
elliptict0 = np.empty((int(nopfft/2)))
helict0 = np.empty((int(nopfft/2)))
# Average over helicity and ellipticity results.
elliptict0 = (ellip[KK, :, 0]+ellip[KK, :, 1]+ellip[KK, :, 2])/3
helict0 = (helicity[KK, :, 0]+helicity[KK, :, 1]+helicity[KK, :, 2])/3
return (helict0, elliptict0)
def wavpol(ct, bx, by, bz,
nopfft=256,
steplength=-1,
bin_freq=3):
"""
Perform polarisation analysis of Bx, By, Bz time series data.
Parameters
----------
ct : list of float
Time.
b1 : list of float
Bx field.
b2 : list of float
By field.
b3 : list of float
Bz field.
nopfft : int, optional
Number of points in FFT. The default is 256.
steplength : int, optional
The amount of overlap between successive FFT intervals.
The default is -1 which means nopfft/2.
bin_freq : int, optional
Number of bins in frequency domain. The default is 3.
Returns
-------
result: tuple with 9 items
timeline : list of float
Times.
freqline : list of float
Frequencies.
powspec : 2-dim array of float
Wave power.
degpol : 2-dim array of float
Degree of Polarisation.
waveangle : 2-dim array of float
Wavenormal Angle.
elliptict : 2-dim array of float
Ellipticity.
helict : 2-dim array of float
Helicity.
pspec3 : 3-dim array of float
Power spectra.
err_flag : bool
Error flag. The default is 0.
Returns 1 if there are large number of batches and aborts.
"""
# Default values.
if nopfft < 0:
nopfft = 256
if steplength < 0:
steplength = nopfft / 2
if bin_freq < 0:
bin_freq = 3
# Convert to numpy arrays.
ct = np.array(ct, np.float64)
bx = np.array(bx, np.float64)
by =
|
np.array(by, np.float64)
|
numpy.array
|
#!/usr/bin/env python
# coding=UTF-8
"""
@Author: Linna
@LastEditors: Linna
@Description:
@Date: 2019-04-26 11:00:10
@LastEditTime: 2019-04-26
"""
import numpy as np
import torch
from torch.autograd import Variable
from EvalBox.Attack.AdvAttack.attack import Attack
class BA(Attack):
def __init__(self, model=None, device=None, IsTargeted=None, **kwargs):
"""
@description: The Boundary Attack
@param {
model:
device:
kwargs:
}
@return: None
"""
super(BA, self).__init__(model, device, IsTargeted)
# self.criterion = torch.nn.CrossEntropyLoss()
self._parse_params(**kwargs)
def _parse_params(self, **kwargs):
"""
@description:
@param {
epsilon:
eps_iter:
num_steps:
}
@return: adversarial_samples
"""
# 扰动的步长系数
self.epsilon = float(kwargs.get("epsilon", 0.01))
# 重新缩放的扰动的尺度
self.delta = float(kwargs.get("delta", 0.01))
# 归一化数据的上下边界
self.lower_bound = float(kwargs.get("lower_bound", 0.0))
self.upper_bound = float(kwargs.get("upper_bound", 1.0))
# 扰动样本更新的最大内层迭代次数
self.max_iter = int(kwargs.get("max_iter", 10))
# 用来搜索合适的epsilon的迭代次数
self.binary_search_steps = int(kwargs.get("binary_search_steps", 20))
# 单次批处理
self.batch_size = int(kwargs.get("batch_size", 8))
# 用来调整delta系数的更新系数
self.step_adapt = float(kwargs.get("step_adapt", 0.9))
# 过程中生成的潜在扰动样本的采样的数目
self.sample_size = int(kwargs.get("sample_size", 80))
# 初始化的随机样本的数目
self.init_size = int(kwargs.get("init_size", 200))
# 获得样本之间距离
def get_diff(self, sample1, sample2):
return np.linalg.norm((sample1 - sample2).astype(np.float32))
# 获得高斯噪声的样本
def gaussian_sample_noise(self, epsilon, imageshape, bounds):
min_, max_ = bounds
std = epsilon / np.sqrt(3) * (max_ - min_)
noise = np.random.normal(scale=std, size=imageshape)
noise = noise.astype(np.float32)
return noise
# 获得均匀分布的样本
def unifom_sample_noise(self, epsilon, imageshape, bounds):
min_, max_ = bounds
w = epsilon * (max_ - min_)
noise = np.random.uniform(-w, w, size=imageshape)
noise = noise.astype(np.float32)
return noise
# 计算样本的L2距离
def get_dist(self, xs, x2s):
l2dist = torch.sum((xs - x2s) ** 2, [1, 2, 3])
return l2dist
def _perturb(self, x, y, y_p):
clip_min, clip_max = self.classifier.clip_values
# First, create an initial adversarial sample
initial_sample = self._init_sample(x, y, y_p, clip_min, clip_max)
# If an initial adversarial example is not found, then return the original image
if initial_sample is None:
return x
# If an initial adversarial example found, then go with boundary attack
if self.targeted:
x_adv = self._attack(
initial_sample, x, y, self.delta, self.epsilon, clip_min, clip_max
)
else:
x_adv = self._attack(
initial_sample, x, y_p, self.delta, self.epsilon, clip_min, clip_max
)
return 0
# 初始化随机样本
def _init_sample(self, x, y, targeted, clip_min, clip_max):
nprd = np.random.RandomState()
initial_sample = None
if targeted:
# Attack satisfied
# Attack unsatisfied yet
for _ in range(self.init_size):
random_img_numpy = nprd.uniform(
clip_min, clip_max, size=x.shape
).astype(x.dtype)
random_img = np.expand_dims(random_img_numpy, axis=0)
tensor_random_img = Variable(
torch.from_numpy(random_img).to(self.device)
)
output = self.model(tensor_random_img)
random_class = torch.argmax(output, 1)
random_class = random_class.data.cpu().numpy()
if random_class[0] == y:
initial_sample = random_img_numpy
break
else:
for _ in range(self.init_size):
# random_img_numpy = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype)
mean_, std_ = np.mean(x), np.std(x)
random_img_numpy = nprd.normal(
loc=mean_, scale=2 * std_, size=x.shape
).astype(x.dtype)
random_img = np.expand_dims(random_img_numpy, axis=0)
tensor_random_img = Variable(
torch.from_numpy(random_img).to(self.device)
)
output = self.model(tensor_random_img)
random_class = torch.argmax(output, 1)
random_class = random_class.data.cpu().numpy()
initial_sample = random_img_numpy
if random_class[0] != y:
initial_sample = random_img_numpy
break
return initial_sample
# 正交扰动生成
def _orthogonal_perturb(self, delta, current_sample, original_sample):
perturb = np.random.randn(
original_sample.shape[0], original_sample.shape[1], original_sample.shape[2]
)
# Rescale the perturbation
perturb /= np.linalg.norm(perturb)
perturb *= delta * np.linalg.norm(original_sample - current_sample)
# Project the perturbation onto sphere
direction = original_sample - current_sample
perturb = np.swapaxes(perturb, 0, 0 - 1)
direction = np.swapaxes(direction, 0, 0 - 1)
vdot = np.vdot(perturb, direction)
perturb -= vdot * direction
perturb = np.swapaxes(perturb, 0, 0 - 1)
return perturb
def compare(object1, object2, target_flag):
return object1 == object2 if target_flag else object1 != object2
def generate(self, xs=None, ys=None, target_flag=False):
"""
@description:
@param {
xs:
ys:
device:
}
@return: adv_xs{numpy.ndarray}
"""
device = self.device
targeted = self.IsTargeted
var_xs, var_ys = Variable(xs.to(device)), Variable(ys.to(device))
with torch.no_grad():
outputs = self.model(var_xs)
preds = torch.argmax(outputs, 1)
preds = preds.data.cpu().numpy()
labels = ys.cpu().numpy()
n_xs = var_xs.cpu().numpy()
epsilon_list = [self.epsilon] * self.batch_size
delta_list = [self.delta] * self.batch_size
# 注意是复制,不是直接赋值
adversarial_samples = n_xs.copy()
# get the first step of boudary as init parameter and input
adversarial_sample = n_xs[0]
numbers = n_xs.shape[0]
rangenumbers = 0
if numbers <= self.batch_size:
rangenumbers = numbers
else:
rangenumbers = self.batch_size
for i in range(rangenumbers):
origin_sample = n_xs[i]
# Move to the first boundary
adversarial_sample = self._init_sample(
origin_sample, preds[i], target_flag, 0, 1
)
for search_for_epsilon in range(self.binary_search_steps):
for iteration_times in range(self.max_iter):
potential_perturbed_images = []
for _ in range(self.sample_size):
perturbed_image = adversarial_sample + self._orthogonal_perturb(
delta_list[i], adversarial_sample, origin_sample
)
perturbed_image = np.array(perturbed_image)
perturbed_image = np.clip(
perturbed_image, self.lower_bound, self.upper_bound
)
potential_perturbed_images.append(perturbed_image)
# potential_perturbed_images
var_images = Variable(
torch.from_numpy(
|
np.array(potential_perturbed_images)
|
numpy.array
|
import numpy
import random
import matplotlib.pyplot
import pickle
import time
import warnings
class GA:
supported_int_types = [int, numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64, numpy.uint, numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64]
supported_float_types = [float, numpy.float, numpy.float16, numpy.float32, numpy.float64]
supported_int_float_types = supported_int_types + supported_float_types
def __init__(self,
num_generations,
num_parents_mating,
fitness_func,
initial_population=None,
sol_per_pop=None,
num_genes=None,
init_range_low=-4,
init_range_high=4,
gene_type=float,
parent_selection_type="sss",
keep_parents=-1,
K_tournament=3,
crossover_type="single_point",
crossover_probability=None,
mutation_type="random",
mutation_probability=None,
mutation_by_replacement=False,
mutation_percent_genes='default',
mutation_num_genes=None,
random_mutation_min_val=-1.0,
random_mutation_max_val=1.0,
gene_space=None,
allow_duplicate_genes=True,
on_start=None,
on_fitness=None,
on_parents=None,
on_crossover=None,
on_mutation=None,
callback_generation=None,
on_generation=None,
on_stop=None,
delay_after_gen=0.0,
save_best_solutions=False,
save_solutions=False,
suppress_warnings=False,
stop_criteria=None):
"""
The constructor of the GA class accepts all parameters required to create an instance of the GA class. It validates such parameters.
num_generations: Number of generations.
num_parents_mating: Number of solutions to be selected as parents in the mating pool.
fitness_func: Accepts a function that must accept 2 parameters (a single solution and its index in the population) and return the fitness value of the solution. Available starting from PyGAD 1.0.17 until 1.0.20 with a single parameter representing the solution. Changed in PyGAD 2.0.0 and higher to include the second parameter representing the solution index.
initial_population: A user-defined initial population. It is useful when the user wants to start the generations with a custom initial population. It defaults to None which means no initial population is specified by the user. In this case, PyGAD creates an initial population using the 'sol_per_pop' and 'num_genes' parameters. An exception is raised if the 'initial_population' is None while any of the 2 parameters ('sol_per_pop' or 'num_genes') is also None.
sol_per_pop: Number of solutions in the population.
num_genes: Number of parameters in the function.
init_range_low: The lower value of the random range from which the gene values in the initial population are selected. It defaults to -4. Available in PyGAD 1.0.20 and higher.
init_range_high: The upper value of the random range from which the gene values in the initial population are selected. It defaults to -4. Available in PyGAD 1.0.20.
# It is OK to set the value of any of the 2 parameters ('init_range_low' and 'init_range_high') to be equal, higher or lower than the other parameter (i.e. init_range_low is not needed to be lower than init_range_high).
gene_type: The type of the gene. It is assigned to any of these types (int, float, numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64, numpy.uint, numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64, numpy.float, numpy.float16, numpy.float32, numpy.float64) and forces all the genes to be of that type.
parent_selection_type: Type of parent selection.
keep_parents: If 0, this means no parent in the current population will be used in the next population. If -1, this means all parents in the current population will be used in the next population. If set to a value > 0, then the specified value refers to the number of parents in the current population to be used in the next population. For some parent selection operators like rank selection, the parents are of high quality and it is beneficial to keep them in the next generation. In some other parent selection operators like roulette wheel selection (RWS), it is not guranteed that the parents will be of high quality and thus keeping the parents might degarde the quality of the population.
K_tournament: When the value of 'parent_selection_type' is 'tournament', the 'K_tournament' parameter specifies the number of solutions from which a parent is selected randomly.
crossover_type: Type of the crossover opreator. If crossover_type=None, then the crossover step is bypassed which means no crossover is applied and thus no offspring will be created in the next generations. The next generation will use the solutions in the current population.
crossover_probability: The probability of selecting a solution for the crossover operation. If the solution probability is <= crossover_probability, the solution is selected. The value must be between 0 and 1 inclusive.
mutation_type: Type of the mutation opreator. If mutation_type=None, then the mutation step is bypassed which means no mutation is applied and thus no changes are applied to the offspring created using the crossover operation. The offspring will be used unchanged in the next generation.
mutation_probability: The probability of selecting a gene for the mutation operation. If the gene probability is <= mutation_probability, the gene is selected. It accepts either a single value for fixed mutation or a list/tuple/numpy.ndarray of 2 values for adaptive mutation. The values must be between 0 and 1 inclusive. If specified, then no need for the 2 parameters mutation_percent_genes and mutation_num_genes.
mutation_by_replacement: An optional bool parameter. It works only when the selected type of mutation is random (mutation_type="random"). In this case, setting mutation_by_replacement=True means replace the gene by the randomly generated value. If False, then it has no effect and random mutation works by adding the random value to the gene.
mutation_percent_genes: Percentage of genes to mutate which defaults to the string 'default' which means 10%. This parameter has no action if any of the 2 parameters mutation_probability or mutation_num_genes exist.
mutation_num_genes: Number of genes to mutate which defaults to None. If the parameter mutation_num_genes exists, then no need for the parameter mutation_percent_genes. This parameter has no action if the mutation_probability parameter exists.
random_mutation_min_val: The minimum value of the range from which a random value is selected to be added to the selected gene(s) to mutate. It defaults to -1.0.
random_mutation_max_val: The maximum value of the range from which a random value is selected to be added to the selected gene(s) to mutate. It defaults to 1.0.
gene_space: It accepts a list of all possible values of the gene. This list is used in the mutation step. Should be used only if the gene space is a set of discrete values. No need for the 2 parameters (random_mutation_min_val and random_mutation_max_val) if the parameter gene_space exists. Added in PyGAD 2.5.0. In PyGAD 2.11.0, the gene_space can be assigned a dict.
on_start: Accepts a function to be called only once before the genetic algorithm starts its evolution. This function must accept a single parameter representing the instance of the genetic algorithm. Added in PyGAD 2.6.0.
on_fitness: Accepts a function to be called after calculating the fitness values of all solutions in the population. This function must accept 2 parameters: the first one represents the instance of the genetic algorithm and the second one is a list of all solutions' fitness values. Added in PyGAD 2.6.0.
on_parents: Accepts a function to be called after selecting the parents that mates. This function must accept 2 parameters: the first one represents the instance of the genetic algorithm and the second one represents the selected parents. Added in PyGAD 2.6.0.
on_crossover: Accepts a function to be called each time the crossover operation is applied. This function must accept 2 parameters: the first one represents the instance of the genetic algorithm and the second one represents the offspring generated using crossover. Added in PyGAD 2.6.0.
on_mutation: Accepts a function to be called each time the mutation operation is applied. This function must accept 2 parameters: the first one represents the instance of the genetic algorithm and the second one represents the offspring after applying the mutation. Added in PyGAD 2.6.0.
callback_generation: Accepts a function to be called after each generation. This function must accept a single parameter representing the instance of the genetic algorithm. If the function returned "stop", then the run() method stops without completing the other generations. Starting from PyGAD 2.6.0, the callback_generation parameter is deprecated and should be replaced by the on_generation parameter.
on_generation: Accepts a function to be called after each generation. This function must accept a single parameter representing the instance of the genetic algorithm. If the function returned "stop", then the run() method stops without completing the other generations. Added in PyGAD 2.6.0.
on_stop: Accepts a function to be called only once exactly before the genetic algorithm stops or when it completes all the generations. This function must accept 2 parameters: the first one represents the instance of the genetic algorithm and the second one is a list of fitness values of the last population's solutions. Added in PyGAD 2.6.0.
delay_after_gen: Added in PyGAD 2.4.0. It accepts a non-negative number specifying the number of seconds to wait after a generation completes and before going to the next generation. It defaults to 0.0 which means no delay after the generation.
save_best_solutions: Added in PyGAD 2.9.0 and its type is bool. If True, then the best solution in each generation is saved into the 'best_solutions' attribute. Use this parameter with caution as it may cause memory overflow when either the number of generations or the number of genes is large.
save_solutions: Added in PyGAD 2.15.0 and its type is bool. If True, then all solutions in each generation are saved into the 'solutions' attribute. Use this parameter with caution as it may cause memory overflow when either the number of generations, number of genes, or number of solutions in population is large.
suppress_warnings: Added in PyGAD 2.10.0 and its type is bool. If True, then no warning messages will be displayed. It defaults to False.
allow_duplicate_genes: Added in PyGAD 2.13.0. If True, then a solution/chromosome may have duplicate gene values. If False, then each gene will have a unique value in its solution.
stop_criteria: Added in PyGAD 2.15.0. It is assigned to some criteria to stop the evolution if at least one criterion holds.
"""
# If suppress_warnings is bool and its valud is False, then print warning messages.
if type(suppress_warnings) is bool:
self.suppress_warnings = suppress_warnings
else:
self.valid_parameters = False
raise TypeError("The expected type of the 'suppress_warnings' parameter is bool but {suppress_warnings_type} found.".format(suppress_warnings_type=type(suppress_warnings)))
# Validating mutation_by_replacement
if not (type(mutation_by_replacement) is bool):
self.valid_parameters = False
raise TypeError("The expected type of the 'mutation_by_replacement' parameter is bool but ({mutation_by_replacement_type}) found.".format(mutation_by_replacement_type=type(mutation_by_replacement)))
self.mutation_by_replacement = mutation_by_replacement
# Validate gene_space
self.gene_space_nested = False
if type(gene_space) is type(None):
pass
elif type(gene_space) in [list, tuple, range, numpy.ndarray]:
if len(gene_space) == 0:
self.valid_parameters = False
raise TypeError("'gene_space' cannot be empty (i.e. its length must be >= 0).")
else:
for index, el in enumerate(gene_space):
if type(el) in [list, tuple, range, numpy.ndarray]:
if len(el) == 0:
self.valid_parameters = False
raise TypeError("The element indexed {index} of 'gene_space' with type {el_type} cannot be empty (i.e. its length must be >= 0).".format(index=index, el_type=type(el)))
else:
for val in el:
if not (type(val) in [type(None)] + GA.supported_int_float_types):
raise TypeError("All values in the sublists inside the 'gene_space' attribute must be numeric of type int/float/None but ({val}) of type {typ} found.".format(val=val, typ=type(val)))
self.gene_space_nested = True
elif type(el) == type(None):
pass
# self.gene_space_nested = True
elif type(el) is dict:
if len(el.items()) == 2:
if ('low' in el.keys()) and ('high' in el.keys()):
pass
else:
self.valid_parameters = False
raise TypeError("When an element in the 'gene_space' parameter is of type dict, then it can have the keys 'low', 'high', and 'step' (optional) but the following keys found: {gene_space_dict_keys}".format(gene_space_dict_keys=el.keys()))
elif len(el.items()) == 3:
if ('low' in el.keys()) and ('high' in el.keys()) and ('step' in el.keys()):
pass
else:
self.valid_parameters = False
raise TypeError("When an element in the 'gene_space' parameter is of type dict, then it can have the keys 'low', 'high', and 'step' (optional) but the following keys found: {gene_space_dict_keys}".format(gene_space_dict_keys=el.keys()))
else:
self.valid_parameters = False
raise TypeError("When an element in the 'gene_space' parameter is of type dict, then it must have only 2 items but ({num_items}) items found.".format(num_items=len(el.items())))
self.gene_space_nested = True
elif not (type(el) in GA.supported_int_float_types):
self.valid_parameters = False
raise TypeError("Unexpected type {el_type} for the element indexed {index} of 'gene_space'. The accepted types are list/tuple/range/numpy.ndarray of numbers, a single number (int/float), or None.".format(index=index, el_type=type(el)))
elif type(gene_space) is dict:
if len(gene_space.items()) == 2:
if ('low' in gene_space.keys()) and ('high' in gene_space.keys()):
pass
else:
self.valid_parameters = False
raise TypeError("When the 'gene_space' parameter is of type dict, then it can have only the keys 'low', 'high', and 'step' (optional) but the following keys found: {gene_space_dict_keys}".format(gene_space_dict_keys=gene_space.keys()))
elif len(gene_space.items()) == 3:
if ('low' in gene_space.keys()) and ('high' in gene_space.keys()) and ('step' in gene_space.keys()):
pass
else:
self.valid_parameters = False
raise TypeError("When the 'gene_space' parameter is of type dict, then it can have only the keys 'low', 'high', and 'step' (optional) but the following keys found: {gene_space_dict_keys}".format(gene_space_dict_keys=gene_space.keys()))
else:
self.valid_parameters = False
raise TypeError("When the 'gene_space' parameter is of type dict, then it must have only 2 items but ({num_items}) items found.".format(num_items=len(gene_space.items())))
else:
self.valid_parameters = False
raise TypeError("The expected type of 'gene_space' is list, tuple, range, or numpy.ndarray but ({gene_space_type}) found.".format(gene_space_type=type(gene_space)))
self.gene_space = gene_space
# Validate init_range_low and init_range_high
if type(init_range_low) in GA.supported_int_float_types:
if type(init_range_high) in GA.supported_int_float_types:
self.init_range_low = init_range_low
self.init_range_high = init_range_high
else:
self.valid_parameters = False
raise ValueError("The value passed to the 'init_range_high' parameter must be either integer or floating-point number but the value ({init_range_high_value}) of type {init_range_high_type} found.".format(init_range_high_value=init_range_high, init_range_high_type=type(init_range_high)))
else:
self.valid_parameters = False
raise ValueError("The value passed to the 'init_range_low' parameter must be either integer or floating-point number but the value ({init_range_low_value}) of type {init_range_low_type} found.".format(init_range_low_value=init_range_low, init_range_low_type=type(init_range_low)))
# Validate random_mutation_min_val and random_mutation_max_val
if type(random_mutation_min_val) in GA.supported_int_float_types:
if type(random_mutation_max_val) in GA.supported_int_float_types:
if random_mutation_min_val == random_mutation_max_val:
if not self.suppress_warnings: warnings.warn("The values of the 2 parameters 'random_mutation_min_val' and 'random_mutation_max_val' are equal and this causes a fixed change to all genes.")
else:
self.valid_parameters = False
raise TypeError("The expected type of the 'random_mutation_max_val' parameter is numeric but ({random_mutation_max_val_type}) found.".format(random_mutation_max_val_type=type(random_mutation_max_val)))
else:
self.valid_parameters = False
raise TypeError("The expected type of the 'random_mutation_min_val' parameter is numeric but ({random_mutation_min_val_type}) found.".format(random_mutation_min_val_type=type(random_mutation_min_val)))
self.random_mutation_min_val = random_mutation_min_val
self.random_mutation_max_val = random_mutation_max_val
# Validate gene_type
if gene_type in GA.supported_int_float_types:
self.gene_type = [gene_type, None]
self.gene_type_single = True
# A single data type of float with precision.
elif len(gene_type) == 2 and gene_type[0] in GA.supported_float_types and (type(gene_type[1]) in GA.supported_int_types or gene_type[1] is None):
self.gene_type = gene_type
self.gene_type_single = True
elif type(gene_type) in [list, tuple, numpy.ndarray]:
if not len(gene_type) == num_genes:
self.valid_parameters = False
raise TypeError("When the parameter 'gene_type' is nested, then it can be either [float, int<precision>] or with length equal to the value passed to the 'num_genes' parameter. Instead, value {gene_type_val} with len(gene_type) ({len_gene_type}) != len(num_genes) ({num_genes}) found.".format(gene_type_val=gene_type, len_gene_type=len(gene_type), num_genes=num_genes))
for gene_type_idx, gene_type_val in enumerate(gene_type):
if gene_type_val in GA.supported_float_types:
# If the gene type is float and no precision is passed, set it to None.
gene_type[gene_type_idx] = [gene_type_val, None]
elif gene_type_val in GA.supported_int_types:
gene_type[gene_type_idx] = [gene_type_val, None]
elif type(gene_type_val) in [list, tuple, numpy.ndarray]:
# A float type is expected in a list/tuple/numpy.ndarray of length 2.
if len(gene_type_val) == 2:
if gene_type_val[0] in GA.supported_float_types:
if type(gene_type_val[1]) in GA.supported_int_types:
pass
else:
self.valid_parameters = False
raise ValueError("In the 'gene_type' parameter, the precision for float gene data types must be an integer but the element {gene_type_val} at index {gene_type_idx} has a precision of {gene_type_precision_val} with type {gene_type_type} .".format(gene_type_val=gene_type_val, gene_type_precision_val=gene_type_val[1], gene_type_type=gene_type_val[0], gene_type_idx=gene_type_idx))
else:
self.valid_parameters = False
raise ValueError("In the 'gene_type' parameter, a precision is expected only for float gene data types but the element {gene_type} found at index {gene_type_idx}. Note that the data type must be at index 0 followed by precision at index 1.".format(gene_type=gene_type_val, gene_type_idx=gene_type_idx))
else:
self.valid_parameters = False
raise ValueError("In the 'gene_type' parameter, a precision is specified in a list/tuple/numpy.ndarray of length 2 but value ({gene_type_val}) of type {gene_type_type} with length {gene_type_length} found at index {gene_type_idx}.".format(gene_type_val=gene_type_val, gene_type_type=type(gene_type_val), gene_type_idx=gene_type_idx, gene_type_length=len(gene_type_val)))
else:
self.valid_parameters = False
raise ValueError("When a list/tuple/numpy.ndarray is assigned to the 'gene_type' parameter, then its elements must be of integer, floating-point, list, tuple, or numpy.ndarray data types but the value ({gene_type_val}) of type {gene_type_type} found at index {gene_type_idx}.".format(gene_type_val=gene_type_val, gene_type_type=type(gene_type_val), gene_type_idx=gene_type_idx))
self.gene_type = gene_type
self.gene_type_single = False
else:
self.valid_parameters = False
raise ValueError("The value passed to the 'gene_type' parameter must be either a single integer, floating-point, list, tuple, or numpy.ndarray but ({gene_type_val}) of type {gene_type_type} found.".format(gene_type_val=gene_type, gene_type_type=type(gene_type)))
# Build the initial population
if initial_population is None:
if (sol_per_pop is None) or (num_genes is None):
self.valid_parameters = False
raise ValueError("Error creating the initail population\n\nWhen the parameter initial_population is None, then neither of the 2 parameters sol_per_pop and num_genes can be None at the same time.\nThere are 2 options to prepare the initial population:\n1) Create an initial population and assign it to the initial_population parameter. In this case, the values of the 2 parameters sol_per_pop and num_genes will be deduced.\n2) Allow the genetic algorithm to create the initial population automatically by passing valid integer values to the sol_per_pop and num_genes parameters.")
elif (type(sol_per_pop) is int) and (type(num_genes) is int):
# Validating the number of solutions in the population (sol_per_pop)
if sol_per_pop <= 0:
self.valid_parameters = False
raise ValueError("The number of solutions in the population (sol_per_pop) must be > 0 but ({sol_per_pop}) found. \nThe following parameters must be > 0: \n1) Population size (i.e. number of solutions per population) (sol_per_pop).\n2) Number of selected parents in the mating pool (num_parents_mating).\n".format(sol_per_pop=sol_per_pop))
# Validating the number of gene.
if (num_genes <= 0):
self.valid_parameters = False
raise ValueError("The number of genes cannot be <= 0 but ({num_genes}) found.\n".format(num_genes=num_genes))
# When initial_population=None and the 2 parameters sol_per_pop and num_genes have valid integer values, then the initial population is created.
# Inside the initialize_population() method, the initial_population attribute is assigned to keep the initial population accessible.
self.num_genes = num_genes # Number of genes in the solution.
# In case the 'gene_space' parameter is nested, then make sure the number of its elements equals to the number of genes.
if self.gene_space_nested:
if len(gene_space) != self.num_genes:
self.valid_parameters = False
raise TypeError("When the parameter 'gene_space' is nested, then its length must be equal to the value passed to the 'num_genes' parameter. Instead, length of gene_space ({len_gene_space}) != num_genes ({num_genes})".format(len_gene_space=len(gene_space), num_genes=self.num_genes))
self.sol_per_pop = sol_per_pop # Number of solutions in the population.
self.initialize_population(self.init_range_low, self.init_range_high, allow_duplicate_genes, True, self.gene_type)
else:
self.valid_parameters = False
raise TypeError("The expected type of both the sol_per_pop and num_genes parameters is int but ({sol_per_pop_type}) and {num_genes_type} found.".format(sol_per_pop_type=type(sol_per_pop), num_genes_type=type(num_genes)))
elif numpy.array(initial_population).ndim != 2:
self.valid_parameters = False
raise ValueError("A 2D list is expected to the initail_population parameter but a ({initial_population_ndim}-D) list found.".format(initial_population_ndim=numpy.array(initial_population).ndim))
else:
# Forcing the initial_population array to have the data type assigned to the gene_type parameter.
if self.gene_type_single == True:
if self.gene_type[1] == None:
self.initial_population = numpy.array(initial_population, dtype=self.gene_type[0])
else:
self.initial_population = numpy.round(
|
numpy.array(initial_population, dtype=self.gene_type[0])
|
numpy.array
|
#!/usr/bin/python
from collections import defaultdict
import os.path
import os, sys
import json
import numpy as np
import re
from scipy.spatial import distance
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
"""
Student Name: <NAME>
Student Number: 20236454
Github Repo: https://github.com/cbrandon20/ARC
"""
"""
Summary Reflection:
Numpy and Scipy were the only non-standard libraries used:
Scipy, the distance module was used for calculating the euclidian distance between coloured points in the matrix, to identify clusters
Numpy was used for matrix and vector manipulation
The approach that was taken to the problem, was viewing this a coordinate system from (0,0) to (grid_height, grid_width) and then to iterate
over column by column (this was somewhat counter-intuitive as the coordinates took the format of (y,x) rather than (x,y)),
it also lead to alot of nested loops which isn't the perfect ideal way to work with numpy, but it yielded adequate results.
First step in each of the solutions was to identify the background colour, in order to do this the assumption was made that the most frequently occuring colour was the background.
This assumption held true for each of the problems attempted here, but there would definitely be exceptions in some ARC problems, so a more sophisticated algorithm would be needed for a general solution.
Once the background was found the second common step was to find the clusters of coloured points. A cluster in this case was a set of non background coloured points that each have atleast one other
non point in the set within a euclidean distance of sqrt(2). Each of the problems required some sort of "image" translation where it was reflecting across and x, scaling, etc..
and in the main they required mapping/translating between various cartesian coordinate systems. Finally when it came to generating the output matrix the same paradigm was used,
this was creating a "colour_map" a dictionary with key->[tuple(i,j)] value->[colour_number] pairs, a matrix with the correct output shape was generated, then iterated through, if the (i,j) coordinate was a keyin
the colour map the value in the matrix was set to that keys value, if it wasn't it was set to zero.
6b9890af's unique challenge was distinguishing between the "shape" and the "frame", then seeting the frame to the global coordinate system and scaling the shape up
(through a simple form of interpolation) to the correct proportions of the frame such that is its edges touch the interior of the frame. b775ac94's was breaking the clusters into sub clusters,
finding the point on the full shape that was closest to each of the single colour points, the finding where the points were relative to eachoter
This mapping was used-> (0: topleft, 1: topright, 2: bottomleft, 3: bottomright), this then guided which translation needed to be done on the orignal shape.
Finally with 0a938d79, the unique challenge was finding which edges(top|bottom, left|right), the points were, then finding there orientation relative to eachother, which enable the generation of the new lines
with the correct displacement.
One observation was noticed during the undertaking of this assignment, when sandboxing solutions I began writing a function that compared the input matrixes to the output matrixes,
to give binary answers to questions such as does the background stay the same, does the matrix shape change, etc.. that could then guide the solve methods flow, and which of the transformation functions were used.
However it quickly became how many tests were required and in order to abstract some input->output feature mappings to a binary question were increasily complex depending on the task, and would sometimes require
several binary questions. This was for a set of 3 of the problems in ARC and Hand coding these tests was somewhat infeasible in the time frame and for this problem there was little to gain so was abandonded, although it was valuable because through the process I gained
some insight the difficulty of this problem. Based on this anecdotal evidence coming up with a general AI solution for all the puzzles in ARC would seem incredible, without any human insight about the problem domains built into the system,
I can't see it learning all the relationships with the limited sets of training given. Which would indicate that we're a long way from truly Abstract Reasoning through AI at this point.
"""
# get_background and identify_clusters used by all solve_* functions
def get_background(x):
### The assumption is made that the most prevalent colour is the background
#This gets all the unique colours and in the matrix and their counts
x_palette, x_palette_counts = np.unique(x, return_counts=True)
x_background = x_palette[np.argmax(x_palette_counts)]
#returns the colour with the largest count value
return x_background
def identify_clusters(x, b_colour):
#Iterates recursively trough the the graph of overlapping clusters, generate is single tuple for for each set of clusters that need to be combined
def dfs(adj_list, visited, vertex, result, key):
visited.add(vertex)
result[key].append(vertex)
for neighbor in adj_list[vertex]:
if neighbor not in visited:
dfs(adj_list, visited, neighbor, result, key)
#Scans the image column by column left to right, if it encounters a non background colour,
#The first one it encounters it it add the coordinates to a dict under the key (cluster_id) 0
#From then on it checks to see if any nonbackground coloured point encountered's euclidian distance is < sqrt of 2
#If it is, it adds appends the coordinates to the last under the cluster_id, if not it creates a new cluster
coord_dict = {}
cluster_no = 0
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if x[i][j] != b_colour:
if len(coord_dict) == 0:
coord_dict[cluster_no] = list()
coord_dict[cluster_no].append(tuple([i, j]))
cluster_no += 1
else:
in_a_cluster = False
for cd in coord_dict.keys():
if len([distance.euclidean(coord, (i, j)) for coord in coord_dict[cd] if
distance.euclidean(coord, (i, j)) <= np.sqrt(2)]) > 0:
coord_dict[cd].append(tuple([i, j]))
in_a_cluster = True
break
if not in_a_cluster:
coord_dict[cluster_no] = list()
coord_dict[cluster_no].append(tuple([i, j]))
cluster_no += 1
# As the sytems scan row by row or column by column, it's guaranteed to cluster U shapres or similar seperately
# There it it's necessary to find overlaps between the first iteration of clusters.
refine_cluster = {}
for i in range(len(coord_dict.values()) - 1):
for j in range(i + 1, len(coord_dict.values())):
distance_list = [len([distance.euclidean(coord, coord_) for coord_ in (list(coord_dict.values())[j]) if
distance.euclidean(coord, coord_) <= np.sqrt(2)]) for coord in
(list(coord_dict.values())[i])]
distance_list = [d for d in distance_list if d > 0]
if len(distance_list) > 0:
refine_cluster[(i, j)] = len(distance_list)
#The previous step finds overlaps between found clusters
#This step groups together clusters that all share overlaps
edges = list(refine_cluster.keys())
adj_list = defaultdict(list)
for x, y in edges:
adj_list[x].append(y)
adj_list[y].append(x)
result = defaultdict(list)
visited = set()
for vertex in adj_list:
if vertex not in visited:
dfs(adj_list, visited, vertex, result, vertex)
x,y = list(coord_dict.keys()),list(result.values())
#The clusters that had no errors are now added to the new clusters that were created in the previous step
#and subsequently returned.
for v in x:
included = False
for xt in y:
if v in xt:
included = True
if not included:
y.append([v])
final_clusters = {}
cluster_index = 0
#This creates the final set of clusters, by combining the overlaping clusters and also adding the clusters that were correct from the start to the dict final_clusters
for sl in y:
temp_list = list()
for l in sl:
temp_list = temp_list + (coord_dict[l])
temp_list = sorted(temp_list)
final_clusters[cluster_index] = temp_list
cluster_index += 1
return final_clusters
"""
b775ac94 required transformations
Identify the clusters of non background colours
Each cluster contains a single colour which contains the full shape?? to be translated
There is then either 2 or 3 other single colured points joined to the full shape
Depending on the singles points position relative to the full shape it indicates a reflection
of the full shape across the x or y axis or both, in the colour of the single point
Get single Colour Shape -> Copy -> Reflect Across Axis (based on adjoining coloured dots) -> Repeat for all coloured dots
This implemenation solves all the training and testing data correctly
"""
def solve_b775ac94(x):
background = get_background(x)
clu = identify_clusters(x, background)
for k, v in clu.items():
clu[k] = get_subsections(v, x)
colour_map = {}
#This dictionary is used in conjunction with the (0: topleft, 1: topright, 2: bottomleft, 3: bottomright) to apply the reflections across the appropriate axis
#for instances if the translation is from 2: bottomleft -> 3: bottomright, k+k = 5 -> [0,1] a translation across the y axis
axis_translating = {2: np.array([1, 0]), 4: np.array([1, 0]), 1: np.array([0, 1]), 5: np.array([0, 1]),
3: np.array([1, 1])}
# Data structure for values in clu
# First is which cluster
# Second is Tuple, index 0 is Dict of subclusters, index 1 is the key for the main sub cluster to be reflected
# Is the key for the subclusters (key is their colour)
# value index 0 is a list of the subcluster points, index -1 it's position relative to origin, for main sub cluster index 1 = the pivot points
for k, v in clu.items():
translated_subcluster = {}
for k_, v_ in v[0].items():
if k_ != v[1]:
tran_const = axis_translating[v_[-1] + v[0][v[1]][-1]]
# Get the midpoint between the too pivot points
# get the distance between the points and the midpoint
# divide them by 0.5 which is the step size, this gives the translation
midpoint = (np.array(v[0][v[1]][1]) + v_[0][0]) / 2
translated_coordinates = []
#Iterates the through each point and calculates the number of steps to the origin
#Trans_const sets the irrevelant steps to 0, i.e if translating across the x axis, it sets the y steps to zero
for v_c in v[0][v[1]][0]:
steps = ((midpoint - np.array(v_c)) / 0.5) * tran_const
translated_coordinates.append(tuple((np.array(v_c) + steps).astype(int)))
translated_subcluster[k_] = translated_coordinates
else:
#This is the orignal shape that does not need to be translated, is copied directly
translated_subcluster[k_] = v[0][v[1]][0]
pass
#Flat maps the dictionary, with the keys being the coordinates and the value the colour at that coordinate
for k, v in translated_subcluster.items():
for v_ in v:
colour_map[v_] = k
#Creates the output matrix, with the correct shape but filled with ones
#then fills in the colours correctly using the colour_map dictionary
y = np.ones(shape=x.shape)
for i in range(y.shape[0]):
for j in range(y.shape[1]):
if (i, j) in colour_map.keys():
y[i][j] = y[i][j] * colour_map[(i, j)]
else:
y[i][j] = background
return y.astype(int)
def get_subsections(cluster, x):
sub_cluster = {}
for v in cluster:
if x[v[0]][v[1]] in sub_cluster.keys():
sub_cluster[x[v[0]][v[1]]].append(v)
else:
sub_cluster[x[v[0]][v[1]]] = list()
sub_cluster[x[v[0]][v[1]]].append(v)
main_shape = None
for k, v in sub_cluster.items():
if len(v) > 1:
main_shape = k
break
min_distance = 100
pivot_point = None
for k, v in sub_cluster.items():
if k != main_shape:
min_distance = 100
for v_ in sub_cluster[main_shape]:
if distance.euclidean(v, v_) < min_distance:
min_distance = distance.euclidean(v, v_)
pivot_point = v_
#These are translation mappings used for or in conjunction with the (0: topleft, 1: topright, 2: bottomleft, 3: bottomright)
rel = {(1, 0): -2, (-1, 0): 2, (0, 1): -1, (0, -1): 1}
rel_corners = {(1, 1): 3, (-1, 1): 1, (1, -1): 2, (-1, -1): 0}
# Needed if there is no coloured point on the diagnol to the main shape
if_no_foil = {(-1): 0, (1): 1, (-2): 0, (2): 2, (-2, 1): 2, (2, 1): 0, (-2, -1): 1, (2, 1): 3}
relative_pos = []
pivot_has_foil = False
for k, v in sub_cluster.items():
if k != main_shape:
rel_pos = tuple(np.array(pivot_point) - np.array(v[0]))
if rel_pos in rel_corners.keys():
sub_cluster[main_shape] = (sub_cluster[main_shape], pivot_point, rel_corners[rel_pos])
rel_pos_ = tuple(np.array(v[0]) - np.array(pivot_point))
sub_cluster[k] = (v, rel_corners[rel_pos_])
pivot_has_foil = True
else:
relative_pos.append((k, rel[rel_pos]))
if not pivot_has_foil:
sub_cluster[main_shape] = ([main_shape], pivot_point, if_no_foil[tuple([x[1] for x in relative_pos])])
#Needed if there is no coloured point on the diagnol to the main shape
for k, v in relative_pos:
sub_cluster[k] = (sub_cluster[k], sub_cluster[main_shape][-1] + v)
# Subcluster is a dictionary, it's keys are the colour, it's values are a tuple the first entry is the points for the subcluter,
# The second is the subclusters location relative to the others 0: topleft, 1: topright, 2: bottomleft, 3: bottomright
return (sub_cluster, main_shape)
"""
There is a red frame and a shape in each problem.
The red frame becomes the global coordinates and the shape needs to be interpolated to fully occupy the interior of the red frame
the goal is essentially zooming in on the shape, with the magnifcation being relative to the differece in size between the shape and the frame
Get Frame -> Get Shape -> Set output to Frame -> Scale Shape to fit Frame -> Center Shape in Frame
This implemenation solves all the training and testing data correctly
"""
def solve_6b9890af(x):
background = get_background(x)
clu = identify_clusters(x, background)
frame, shape = None, None
container_shape = []
#Iterates over the clusters identifies which is the shape and which is the frame
for k, v in clu.items():
container_shape.append(extract_frame(v, x))
#
for k, v, c_, og in container_shape:
if k == True:
frame = v
frame_cluster = c_
frame_og_co = og
else:
shape = v
original_coordinates = og
shape_cluster = c_
##This returns the shape with a frame, the final step is to colour in the frame
Y = matrix_scaler(get_scale(frame, shape), shape, shape_cluster, original_coordinates, 0, x)
#This colours in the frame, as it was added as 2 rows, 2 columns of 1's using np's hstack and vstack
x = colour_frame(frame_cluster, frame_og_co, Y, x)
return x.astype(int)
def extract_frame(cluster, x):
#Specific to 6b9890af it decides which cluster is the shape and which is the red frame
cluster_ = np.array(cluster)
#Gets the extrema's in the cluster
i_min, i_max = np.min(cluster_[:, 0]), np.max(cluster_[:, 0])
j_min, j_max =
|
np.min(cluster_[:, 1])
|
numpy.min
|
import numpy as np
import tensorflow as tf
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
'''Preprocess true boxes to training input format
Parameters
----------
true_boxes: array, shape=(m, T, 5)
Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
input_shape: array-like, hw, multiples of 32
anchors: array, shape=(N, 2), wh
num_classes: integer
Returns
-------
y_true: list of array, shape like yolo_outputs, xywh are reletive value
'''
assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors)//3 # default setting
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]
m = true_boxes.shape[0]
grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),
dtype='float32') for l in range(num_layers)]
# Expand dim to apply broadcasting.
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
valid_mask = boxes_wh[..., 0]>0
for b in range(m):
# Discard zero rows.
wh = boxes_wh[b, valid_mask[b]]
if len(wh)==0: continue
# Expand dim to apply broadcasting.
wh =
|
np.expand_dims(wh, -2)
|
numpy.expand_dims
|
"""
Helper functions to create a mashup dataset (Potrait dataset * Indoor dataset). This combined dataset will be used for training.
This file is copied from https://github.com/geek101/focal/blob/master/data/portrait_indoor_aug.py and modified as per requirements
"""
import scipy.ndimage
import skimage.color
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import warp, AffineTransform
import random
from skimage import io
from skimage import img_as_uint
random.seed(42)
def warp_helper(img, affine_t):
return warp(img, affine_t, mode="constant", cval=0.0, preserve_range=True).astype(
np.uint8
)
def random_affine_helper(
img,
img_mask,
intensity=1.0,
rotation_disabled=True,
shear_disabled=True,
scale_disabled=True,
):
if rotation_disabled:
rotation = None
else:
rotation = random.uniform(-0.15 * intensity, 0.15 * intensity)
if shear_disabled:
shear = None
else:
shear = random.uniform(-0.15 * intensity, 0.15 * intensity)
if scale_disabled:
scale = None
else:
scale_rnd = random.uniform(0.9, 1.1)
scale = (scale_rnd, scale_rnd)
affine_t = AffineTransform(rotation=rotation, shear=shear, scale=scale)
return warp_helper(img, affine_t), warp_helper(img_mask, affine_t)
def shift_helper(image, shift):
return scipy.ndimage.shift(image, shift=shift, mode="constant", cval=0)
def shift_mask_corners(portrait_image, portrait_mask, shift_arg="none"):
"""
Given resize portrait image and the mask shift the mask
and the image to right and left corner. With some randomness
to enable partial occlusion of left or right side of the body
and or face.
shift_arg options:
['none', 'left_corner', 'right_corner', 'left_random', 'right_random']
"""
aw = np.argwhere(portrait_mask != 0)
if len(aw) == 0:
return None, None
aw_col1 = aw[:, 1:]
# print(aw_col1.reshape(aw_col1.shape[0]))
# print('Min: {}'.format(aw_col1.min()))
# print('Max: {}'.format(aw_col1.max()))
shift_param = shift_arg.strip().lower()
col1_min = aw_col1.min()
if shift_param == "left_corner":
shift = [0, -col1_min, 0]
elif shift_param == "right_corner":
shift = [0, col1_min, 0]
elif shift_param == "left_random":
shift = [0, random.randint(int(-col1_min / 2), int(-col1_min / 4)), 0]
elif shift_param == "right_random":
shift = [0, random.randint(int(col1_min / 4), int(col1_min / 2)), 0]
elif shift_param == "none":
shift = [0, 0, 0]
else:
raise Exception(
"Invalid shift arg: {}, allow params: {}".format(
shift_arg,
["none", "left_corner", "right_corner", "left_random", "right_random"],
)
)
return shift_helper(portrait_image, shift), shift_helper(portrait_mask, shift[:2])
def convert_to_color_safe(input_image):
if len(input_image.shape) == 2 or input_image.shape[2] == 1:
return skimage.color.grey2rgb(input_image).astype(dtype=np.uint8)
else:
return input_image.astype(dtype=np.uint8)
def diff_pad(diff):
flip = random.randint(0, 1) == 1
pad = (0, 0)
if diff > 0:
if flip:
pad = (diff - int(diff / 2), int(diff / 2))
else:
pad = (int(diff / 2), diff - int(diff / 2))
return pad
def embed_helper(extra_padded, extra_padded_mask, target_image_input):
target_image = convert_to_color_safe(target_image_input)
pasted_image =
|
np.zeros(target_image.shape, dtype="uint8")
|
numpy.zeros
|
#!/usr/bin/env python3
from shared_setting import *
import numpy
import csv
#soma exc synapse
EEmax=18.0
EEwidth=5.0
#som<-som
x=numpy.arange(NE)
y=numpy.arange(NE)
z=numpy.meshgrid(x,y)
dif=numpy.abs(z[1]-z[0])
WEE=EEmax*numpy.exp(-0.5*(dif/EEwidth)**2)
WEE[WEE<0.0]=0.0
WEE[
|
numpy.eye(NE, dtype=bool)
|
numpy.eye
|
#!/usr/bin/env python3
"""Trains LightGBM models on various features, data splits. Dumps models and predictions"""
import pickle
import numpy as np
import lightgbm as lgb
from sklearn.metrics import accuracy_score
from utils import load_data
from os.path import join, exists
from os import makedirs
import argparse
CROP_SIZES = [400, 650]
SCALES = [0.5]
NN_MODELS = ["ResNet", "Inception", "VGG"]
AUGMENTATIONS_PER_IMAGE = 50
NUM_CLASSES = 4
RANDOM_STATE = 1
N_SEEDS = 5
VERBOSE_EVAL = False
with open("data/folds-10.pkl", "rb") as f:
FOLDS = pickle.load(f)
LGBM_MODELS_ROOT = "models/LGBMs"
CROSSVAL_PREDICTIONS_ROOT = "predictions"
DEFAULT_PREPROCESSED_ROOT = "data/preprocessed/train"
def _mean(x, mode="arithmetic"):
"""
Calculates mean probabilities across augmented data
# Arguments
x: Numpy 3D array of probability scores, (N, AUGMENTATIONS_PER_IMAGE, NUM_CLASSES)
mode: type of averaging, can be "arithmetic" or "geometric"
# Returns
Mean probabilities 2D array (N, NUM_CLASSES)
"""
assert mode in ["arithmetic", "geometric"]
if mode == "arithmetic":
x_mean = x.mean(axis=1)
else:
x_mean = np.exp(np.log(x + 1e-7).mean(axis=1))
x_mean = x_mean / x_mean.sum(axis=1, keepdims=True)
return x_mean
if __name__ == "__main__":
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg("--features",
required=False,
default=DEFAULT_PREPROCESSED_ROOT,
metavar="feat_dir",
help="Feature root dir. Default: data/preprocessed/train")
args = parser.parse_args()
PREPROCESSED_ROOT = args.features
learning_rate = 0.1
num_round = 70
param = {
"objective": "multiclass",
"num_class": NUM_CLASSES,
"metric": ["multi_logloss", "multi_error"],
"verbose": 0,
"learning_rate": learning_rate,
"num_leaves": 191,
"feature_fraction": 0.46,
"bagging_fraction": 0.69,
"bagging_freq": 0,
"max_depth": 7,
}
for SCALE in SCALES:
print("SCALE:", SCALE)
for NN_MODEL in NN_MODELS:
print("NN_MODEL:", NN_MODEL)
for CROP_SZ in CROP_SIZES:
print("PATCH_SZ:", CROP_SZ)
INPUT_DIR = join(PREPROCESSED_ROOT, "{}-{}-{}".format(NN_MODEL, SCALE, CROP_SZ))
acc_all_seeds = []
for seed in range(N_SEEDS):
accuracies = []
for fold in range(len(FOLDS)):
feature_fraction_seed = RANDOM_STATE + seed * 10 + fold
bagging_seed = feature_fraction_seed + 1
param.update({"feature_fraction_seed": feature_fraction_seed, "bagging_seed": bagging_seed})
print("Fold {}/{}, seed {}".format(fold + 1, len(FOLDS), seed))
x_train, y_train, x_test, y_test = load_data(INPUT_DIR, FOLDS, fold)
train_data = lgb.Dataset(x_train, label=y_train)
test_data = lgb.Dataset(x_test, label=y_test)
gbm = lgb.train(param, train_data, num_round, valid_sets=[test_data], verbose_eval=VERBOSE_EVAL)
# pickle model
model_file = "lgbm-{}-{}-{}-f{}-s{}.pkl".format(NN_MODEL, SCALE, CROP_SZ, fold, seed)
model_root = join(LGBM_MODELS_ROOT, NN_MODEL)
if not exists(model_root):
makedirs(model_root)
with open(join(model_root, model_file), "wb") as f:
pickle.dump(gbm, f)
scores = gbm.predict(x_test)
scores = scores.reshape(-1, AUGMENTATIONS_PER_IMAGE, NUM_CLASSES)
preds = {
"files": FOLDS[fold]["test"]["x"],
"y_true": y_test,
"scores": scores,
}
preds_file = "lgbm_preds-{}-{}-{}-f{}-s{}.pkl".format(NN_MODEL, SCALE, CROP_SZ,
fold, seed)
preds_root = join(CROSSVAL_PREDICTIONS_ROOT, NN_MODEL)
if not exists(preds_root):
makedirs(preds_root)
with open(join(preds_root, preds_file), "wb") as f:
pickle.dump(preds, f)
mean_scores = _mean(scores, mode="arithmetic")
y_pred = np.argmax(mean_scores, axis=1)
y_true = y_test[::AUGMENTATIONS_PER_IMAGE]
acc = accuracy_score(y_true, y_pred)
print("Accuracy:", acc)
accuracies.append(acc)
acc_seed =
|
np.array(accuracies)
|
numpy.array
|
from queue import PriorityQueue
import numpy as np
def heuristic(n1, n2):
return np.linalg.norm(
|
np.array(n2)
|
numpy.array
|
# added for girder interaction as plugin arbor task
from girder_worker.app import app
from girder_worker.utils import girder_job
from tempfile import NamedTemporaryFile
# declared for subprocess to do GPU stuff. Package 'billiard' comes with celery
# and is a workaround for subprocess limitations on 'daemonic' processes.
import billiard as multiprocessing
from billiard import Queue, Process
import json
#-------------------------------------------
@girder_job(title='inferWSI')
@app.task(bind=True)
def infer_wsi(self,image_file,**kwargs):
#print(" input image filename = {}".format(image_file))
# setup the GPU environment for pytorch
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
DEVICE = 'cuda'
print('perform forward inferencing')
subprocess = False
if (subprocess):
# declare a subprocess that does the GPU allocation to keep the GPU memory from leaking
msg_queue = Queue()
gpu_process = Process(target=start_inference, args=(msg_queue,image_file))
gpu_process.start()
predict_image = msg_queue.get()
gpu_process.join()
else:
predict_image = start_inference_mainthread(image_file)
predict_bgr = cv2.cvtColor(predict_image,cv2.COLOR_RGB2BGR)
print('output conversion and inferencing complete')
# generate unique names for multiple runs. Add extension so it is easier to use
outname = NamedTemporaryFile(delete=False).name+'.png'
# write the output object using openCV
print('writing output')
cv2.imwrite(outname,predict_bgr)
print('writing completed')
# new output of segmentation statistics in a string
statistics = generateStatsString(predict_image)
# generate unique names for multiple runs. Add extension so it is easier to use
statoutname = NamedTemporaryFile(delete=False).name+'.json'
open(statoutname,"w").write(statistics)
# return the name of the output file and the stats
return outname,statoutname
import random
#import argparse
import torch
import torch.nn as nn
import cv2
import os, glob
import numpy as np
from skimage.io import imread, imsave
from skimage import filters
from skimage.color import rgb2gray
import gc
# from github.com/girder/large_image
import large_image
from PIL import Image, ImageColor
Image.MAX_IMAGE_PIXELS = None
import albumentations as albu
import segmentation_models_pytorch as smp
ml = nn.Softmax(dim=1)
NE = 50
ST = 100
ER = 150
AR = 200
PRINT_FREQ = 20
BATCH_SIZE = 80
ENCODER = 'efficientnet-b4'
ENCODER_WEIGHTS = 'imagenet'
ACTIVATION = None
DEVICE = 'cuda'
# the weights file is in the same directory, so make this path reflect that. If this is
# running in a docker container, then we should assume the weights are at the toplevel
# directory instead
if (os.getenv('DOCKER') == 'True') or (os.getenv('DOCKER') == 'True'):
WEIGHT_PATH = '/'
else:
WEIGHT_PATH = '/'
# these aren't used in the girder version, no files are directly written out
# by the routines written by FNLCR (Hyun Jung)
WSI_PATH = '.'
PREDICTION_PATH = '.'
IMAGE_SIZE = 384
IMAGE_HEIGHT = 384
IMAGE_WIDTH = 384
CHANNELS = 3
NUM_CLASSES = 5
CLASS_VALUES = [0, 50, 100, 150, 200]
BLUE = [0, 0, 255] # ARMS: 200
RED = [255, 0, 0] # ERMS: 150
GREEN = [0, 255, 0] # STROMA: 100
YELLOW = [255, 255, 0] # NECROSIS: 50
EPSILON = 1e-6
# what magnification should this pipeline run at
ANALYSIS_MAGNIFICATION = 10
THRESHOLD_MAGNIFICATION = 2.5
ASSUMED_SOURCE_MAGNIFICATION = 20.0
rot90 = albu.Rotate(limit=(90, 90), always_apply=True)
rotn90 = albu.Rotate(limit=(-90, -90), always_apply=True)
rot180 = albu.Rotate(limit=(180, 180), always_apply=True)
rotn180 = albu.Rotate(limit=(-180, -180), always_apply=True)
rot270 = albu.Rotate(limit=(270, 270), always_apply=True)
rotn270 = albu.Rotate(limit=(-270, -270), always_apply=True)
hflip = albu.HorizontalFlip(always_apply=True)
vflip = albu.VerticalFlip(always_apply=True)
tpose = albu.Transpose(always_apply=True)
pad = albu.PadIfNeeded(p=1.0, min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=0, value=(255, 255, 255), mask_value=0)
# supporting subroutines
#-----------------------------------------------------------------------------
from numpy import asarray
def _generate_th(source, height,width):
# extract a low-res version of the entire image for tissue-detection threshold processing
myRegion = {'top': 0, 'left': 0, 'width': width, 'height': height}
threshold_source_image, mimetype = source.getRegion(format=large_image.tilesource.TILE_FORMAT_NUMPY,
region=myRegion,
scale={'magnification': THRESHOLD_MAGNIFICATION})
org_height = height
org_width = width
print('OTSU image')
print(type(threshold_source_image))
print(threshold_source_image.shape)
thumbnail_gray = rgb2gray(threshold_source_image)
val = filters.threshold_otsu(thumbnail_gray)
# create empty output for threshold
otsu_seg = np.zeros((threshold_source_image.shape[0],threshold_source_image.shape[1]), np.uint8)
# generate a mask=true image where the source pixels were darker than the
# # threshold value (indicating tissue instead of bright background)
otsu_seg[thumbnail_gray <= val] = 255
# OTSU algo. was applied at reduced scale, so scale image back up
aug = albu.Resize(p=1.0, height=org_height, width=org_width)
augmented = aug(image=otsu_seg, mask=otsu_seg)
otsu_seg = augmented['mask']
print('rescaled threshold shape is:',otsu_seg.shape)
#imsave('otsu.png', (otsu_seg.astype('uint8')))
print('Otsu segmentation finished')
return otsu_seg
def _infer_batch(model, test_patch):
# print('Test Patch Shape: ', test_patch.shape)
with torch.no_grad():
logits_all = model(test_patch[:, :, :, :])
logits = logits_all[:, 0:NUM_CLASSES, :, :]
prob_classes_int = ml(logits)
prob_classes_all = prob_classes_int.cpu().numpy().transpose(0, 2, 3, 1)
return prob_classes_all
def _augment(index, image):
if index == 0:
image= image
if index == 1:
augmented = rot90(image=image)
image = augmented['image']
if index ==2:
augmented = rot180(image=image)
image= augmented['image']
if index == 3:
augmented = rot270(image=image)
image = augmented['image']
if index == 4:
augmented = vflip(image=image)
image = augmented['image']
if index == 5:
augmented = hflip(image=image)
image = augmented['image']
if index == 6:
augmented = tpose(image=image)
image = augmented['image']
return image
def _unaugment(index, image):
if index == 0:
image= image
if index == 1:
augmented = rotn90(image=image)
image = augmented['image']
if index ==2:
augmented = rotn180(image=image)
image= augmented['image']
if index == 3:
augmented = rotn270(image=image)
image = augmented['image']
if index == 4:
augmented = vflip(image=image)
image = augmented['image']
if index == 5:
augmented = hflip(image=image)
image = augmented['image']
if index == 6:
augmented = tpose(image=image)
image = augmented['image']
return image
def _gray_to_color(input_probs):
index_map = (np.argmax(input_probs, axis=-1)*50).astype('uint8')
height = index_map.shape[0]
width = index_map.shape[1]
heatmap = np.zeros((height, width, 3), np.float32)
# Background
heatmap[index_map == 0, 0] = input_probs[:, :, 0][index_map == 0]
heatmap[index_map == 0, 1] = input_probs[:, :, 0][index_map == 0]
heatmap[index_map == 0, 2] = input_probs[:, :, 0][index_map == 0]
# Necrosis
heatmap[index_map==50, 0] = input_probs[:, :, 1][index_map==50]
heatmap[index_map==50, 1] = input_probs[:, :, 1][index_map==50]
heatmap[index_map==50, 2] = 0.
# Stroma
heatmap[index_map==100, 0] = 0.
heatmap[index_map==100, 1] = input_probs[:, :, 2][index_map==100]
heatmap[index_map==100, 2] = 0.
# ERMS
heatmap[index_map==150, 0] = input_probs[:, :, 3][index_map==150]
heatmap[index_map==150, 1] = 0.
heatmap[index_map==150, 2] = 0.
# ARMS
heatmap[index_map==200, 0] = 0.
heatmap[index_map==200, 1] = 0.
heatmap[index_map==200, 2] = input_probs[:, :, 4][index_map==200]
heatmap[np.average(heatmap, axis=-1)==0, :] = 1.
return heatmap
# return a string identifier of the basename of the current image file
def returnIdentifierFromImagePath(impath):
# get the full name of the image
file = os.path.basename(impath)
# strip off the extension
base = file.split('.')[0]
return(base)
def displayTileMetadata(tile,region,i,j):
if (tile.shape[0] != 384 or tile.shape[1] != 384):
print('i,j:',i,j)
print('tile sizeX:',tile.shape[0],'sizeY:',tile.shape[1])
print('top:',region['top'],'left:',region['left'],'width:',region['width'],'height:',region['height'])
def isNotANumber(variable):
# this try clause will work for integers and float values, since floats can be cast. If the
# variable is any other type (include None), the clause will cause an exception and we will return False
try:
tmp = int(variable)
return False
except:
return True
#---------------- main inferencing routine ------------------
def _inference(model, image_path, BATCH_SIZE, num_classes, kernel, num_tta=1):
model.eval()
# open an access handler on the large image
source = large_image.getTileSource(image_path)
# print image metadata
metadata = source.getMetadata()
print(metadata)
print('sizeX:', metadata['sizeX'], 'sizeY:', metadata['sizeY'], 'levels:', metadata['levels'])
# figure out the size of the actual image and the size that this analysis
# processing will run at. The size calculations are made in two steps to make sure the
# rescaled threshold image size and the analysis image size match without rounding error
height_org = metadata['sizeY']
width_org = metadata['sizeX']
# if we are processing using a reconstructed TIF from VIPS, there will not be a magnification value.
# So we will assume 20x as the native magnification, which matches the source data the
# IVG has provided.
if isNotANumber(metadata['magnification']):
print('warning: No magnfication value in source image. Assuming the source image is at ',
ASSUMED_SOURCE_MAGNIFICATION,' magnification')
metadata['magnification'] = ASSUMED_SOURCE_MAGNIFICATION
assumedMagnification = True
else:
assumedMagnification = False
# the theoretical adjustment for the magnification would be as below:
# height_proc = int(height_org * (ANALYSIS_MAGNIFICATION/metadata['magnification']))
# width_proc = int(width_org * (ANALYSIS_MAGNIFICATION/metadata['magnification']))
height_proc = int(height_org * THRESHOLD_MAGNIFICATION/metadata['magnification'])*int(ANALYSIS_MAGNIFICATION/THRESHOLD_MAGNIFICATION)
width_proc = int(width_org * THRESHOLD_MAGNIFICATION/metadata['magnification'])*int(ANALYSIS_MAGNIFICATION/THRESHOLD_MAGNIFICATION)
print('analysis image size :',height_proc, width_proc)
basename_string = os.path.splitext(os.path.basename(image_path))[0]
print('Basename String: ', basename_string)
# generate a binary mask for the image
height_otsu = int(height_proc * THRESHOLD_MAGNIFICATION/ANALYSIS_MAGNIFICATION)
width_otsu = int(width_proc * THRESHOLD_MAGNIFICATION / ANALYSIS_MAGNIFICATION)
print('size of threshold mask:',height_otsu,width_otsu)
myRegion = {'top': 0, 'left': 0, 'width': width_org, 'height': height_org}
if assumedMagnification:
# we have to manage the downsizing to the threshold magnification.
threshold_source_image, mimetype = source.getRegion(format=large_image.tilesource.TILE_FORMAT_NUMPY,
region=myRegion,output={'maxWidth':width_otsu,'maxHeight':height_otsu})
print('used maxOutput for threshold size')
else:
threshold_source_image, mimetype = source.getRegion(format=large_image.tilesource.TILE_FORMAT_NUMPY,
region=myRegion,
scale={'magnification': THRESHOLD_MAGNIFICATION})
print('OTSU image')
print(type(threshold_source_image))
print(threshold_source_image.shape)
thumbnail_gray = rgb2gray(threshold_source_image)
val = filters.threshold_otsu(thumbnail_gray)
# create empty output for threshold
otsu_seg = np.zeros((threshold_source_image.shape[0], threshold_source_image.shape[1]), np.uint8)
# generate a mask=true image where the source pixels were darker than the
# # threshold value (indicating tissue instead of bright background)
otsu_seg[thumbnail_gray <= val] = 255
# OTSU algo. was applied at reduced scale, so scale image back up
aug = albu.Resize(p=1.0, height=height_proc, width=width_proc)
augmented = aug(image=otsu_seg, mask=otsu_seg)
otsu_org = augmented['mask'] // 255
print('rescaled threshold shape is:', otsu_org.shape)
#imsave('otsu.png', (augmented['mask'] .astype('uint8')))
print('Otsu segmentation finished')
#otsu_org = _generate_th(source,height_org,width_org) // 255
# initialize the output probability map
prob_map_seg_stack = np.zeros((height_proc, width_proc, num_classes), dtype=np.float32)
for b in range(num_tta):
height = height_proc
width = width_proc
PATCH_OFFSET = IMAGE_SIZE // 2
SLIDE_OFFSET = IMAGE_SIZE // 2
# these are the counts in the x and y direction. i.e. how many samples across the image.
# the divident is slide_offset because this is how much the window is moved each time
heights = (height + PATCH_OFFSET * 2 - IMAGE_SIZE) // SLIDE_OFFSET +1
widths = (width + PATCH_OFFSET * 2 - IMAGE_SIZE) // SLIDE_OFFSET +1
print('heights,widths:',heights,widths)
heights_v2 = (height + PATCH_OFFSET * 2) // (SLIDE_OFFSET)
widths_v2 = (width + PATCH_OFFSET * 2) // (SLIDE_OFFSET)
print('heights_v2,widths_v2',heights_v2,widths_v2)
# extend the size to allow for the whole actual image to be processed without actual
# pixels being at a tile boundary.
height_ext = SLIDE_OFFSET * heights + PATCH_OFFSET * 2
width_ext = SLIDE_OFFSET * widths + PATCH_OFFSET * 2
print('height_ext,width_ext:',height_ext,width_ext)
org_slide_ext = np.ones((height_ext, width_ext, 3), np.uint8) * 255
otsu_ext = np.zeros((height_ext, width_ext), np.uint8)
prob_map_seg = np.zeros((height_ext, width_ext, num_classes), dtype=np.float32)
weight_sum = np.zeros((height_ext, width_ext, num_classes), dtype=np.float32)
#org_slide_ext[PATCH_OFFSET: PATCH_OFFSET + height, PATCH_OFFSET:PATCH_OFFSET + width, 0:3] = image_working[:, :,
# 0:3]
# load the otsu results
otsu_ext[PATCH_OFFSET: PATCH_OFFSET + height, PATCH_OFFSET:PATCH_OFFSET + width] = otsu_org[:, :]
linedup_predictions = np.zeros((heights * widths, IMAGE_SIZE, IMAGE_SIZE, num_classes), dtype=np.float32)
linedup_predictions[:, :, :, 0] = 1.0
test_patch_tensor = torch.zeros([BATCH_SIZE, 3, IMAGE_SIZE, IMAGE_SIZE], dtype=torch.float).cuda(
non_blocking=True)
# get an identifier for the patch files to be written out as debugging
unique_identifier = returnIdentifierFromImagePath(image_path)
patch_iter = 0
inference_index = []
position = 0
stopcounter = 0
for i in range(heights-2):
for j in range(widths-2):
#test_patch = org_slide_ext[i * SLIDE_OFFSET: i * SLIDE_OFFSET + IMAGE_SIZE,
# j * SLIDE_OFFSET: j * SLIDE_OFFSET + IMAGE_SIZE, 0:3]
# specify the region to extract and pull it at the proper magnification. If a region is outside
# of the image boundary, the returned tile will be padded with white pixels (background). The region
# coordinates are in the coordinate frame of the original, full-resolution image, so we need to calculate
# them from the analytical coordinates
top_in_orig = int(i * SLIDE_OFFSET * metadata['magnification']/ANALYSIS_MAGNIFICATION)
left_in_orig = int(j * SLIDE_OFFSET * metadata['magnification'] / ANALYSIS_MAGNIFICATION)
image_size_in_orig = int(IMAGE_SIZE* metadata['magnification'] / ANALYSIS_MAGNIFICATION)
myRegion = {'top': top_in_orig, 'left': left_in_orig, 'width': image_size_in_orig, 'height': image_size_in_orig}
rawtile, mimetype = source.getRegion(format=large_image.tilesource.TILE_FORMAT_NUMPY,
region=myRegion, scale={'magnification': ANALYSIS_MAGNIFICATION},
fill="white",output={'maxWidth':IMAGE_SIZE,'maxHeight':IMAGE_SIZE})
test_patch = rawtile[:,:,0:3]
#displayTileMetadata(test_patch,myRegion,i,j)
otsu_patch = otsu_ext[i * SLIDE_OFFSET: i * SLIDE_OFFSET + IMAGE_SIZE,
j * SLIDE_OFFSET: j * SLIDE_OFFSET + IMAGE_SIZE]
if np.sum(otsu_patch) > int(0.05 * IMAGE_SIZE * IMAGE_SIZE):
inference_index.append(patch_iter)
test_patch_tensor[position, :, :, :] = torch.from_numpy(test_patch.transpose(2, 0, 1)
.astype('float32') / 255.0)
position += 1
patch_iter += 1
if position == BATCH_SIZE:
batch_predictions = _infer_batch(model, test_patch_tensor)
for k in range(BATCH_SIZE):
linedup_predictions[inference_index[k], :, :, :] = batch_predictions[k, :, :, :]
position = 0
inference_index = []
# save data to look at
#if (temp_i>100) and (temp_i<400):
if (False):
np.save('hyun-patch-'+unique_identifier+'-'+str(temp_i)+'_'+str(temp_j)+'.npy', test_patch)
print('test_patch shape:', test_patch.shape, 'i:',temp_i,' j:',temp_j)
np.save('hyun-tensor-' + unique_identifier + '-' + str(temp_i) + '_' + str(temp_j) + '.npy', test_patch_tensor.cpu())
print('test_tensor shape:', test_patch.shape, 'i:', temp_i, ' j:', temp_j)
from PIL import Image
im = Image.fromarray(test_patch)
im.save('hyun-patch-'+str(temp_i)+'_'+str(temp_j)+'.jpeg')
# Very last part of the region. This is if there is a partial batch of tiles left at the
# end of the image.
batch_predictions = _infer_batch(model, test_patch_tensor)
for k in range(position):
linedup_predictions[inference_index[k], :, :, :] = batch_predictions[k, :, :, :]
print('GPU inferencing complete. Constructing out image from patches')
patch_iter = 0
for i in range(heights - 2):
for j in range(widths-2):
prob_map_seg[i * SLIDE_OFFSET: i * SLIDE_OFFSET + IMAGE_SIZE,
j * SLIDE_OFFSET: j * SLIDE_OFFSET + IMAGE_SIZE,:] \
+= np.multiply(linedup_predictions[patch_iter, :, :, :], kernel)
weight_sum[i * SLIDE_OFFSET: i * SLIDE_OFFSET + IMAGE_SIZE,
j * SLIDE_OFFSET: j * SLIDE_OFFSET + IMAGE_SIZE,:] \
+= kernel
patch_iter += 1
#np.save("prob_map_seg.npy",prob_map_seg)
#np.save('weight_sum.npy',weight_sum)
prob_map_seg = np.true_divide(prob_map_seg, weight_sum)
prob_map_valid = prob_map_seg[PATCH_OFFSET:PATCH_OFFSET + height, PATCH_OFFSET:PATCH_OFFSET + width, :]
prob_map_valid = _unaugment(b, prob_map_valid)
prob_map_seg_stack += prob_map_valid / num_tta
pred_map_final =
|
np.argmax(prob_map_seg_stack, axis=-1)
|
numpy.argmax
|
import numpy as np
import random
import math
import os
import datetime
from expert_data import store_saved_data_into_replay
import glob
import matplotlib.pyplot as plt
import utils
def split_replay(replay_buffer=None,save_dir=None):
print("In Split replay")
if replay_buffer is None:
replay_buffer = utils.ReplayBuffer_Queue(82, 4, 10100)
# Default expert pid file path
expert_file_path = "./expert_replay_data/Expert_data_11_18_20_0253/"
print("Expert PID filepath: ", expert_file_path)
replay_buffer = store_saved_data_into_replay(replay_buffer, expert_file_path)
# Check and create directory
expert_replay_saving_dir = "./expert_replay_data_split"
if not os.path.isdir(expert_replay_saving_dir):
os.mkdir(expert_replay_saving_dir)
# Size of replay buffer data subset
subset_size = 100
# If number of episodes does not split evenly into 100, round up
num_subsets = math.ceil(replay_buffer.replay_ep_num / subset_size)
print("in SPLIT REPLAY, replay_buffer.replay_ep_num: ",replay_buffer.replay_ep_num)
print("in SPLIT REPLAY, num_subsets: ",num_subsets)
for idx in range(int(num_subsets)):
print("**** idx: ",idx)
print("idx + subset_size: ",idx + subset_size)
# Get the final subset of episodes (even if it is a bit smaller than the subset size)
if replay_buffer.replay_ep_num < idx + subset_size:
subset_size = replay_buffer.replay_ep_num - idx
# Get the beginning timestep index and the ending timestep index within an episode
selected_indexes = np.arange(0, idx*subset_size)
# Get subset batch of replay buffer data
state_subset = [replay_buffer.state[x] for x in selected_indexes]
action_subset = [replay_buffer.action[x] for x in selected_indexes]
next_state_subset = [replay_buffer.next_state[x] for x in selected_indexes]
reward_subset = [replay_buffer.reward[x] for x in selected_indexes]
not_done_subset = [replay_buffer.not_done[x] for x in selected_indexes]
# Set filename for subset
state_filename = "state_" + str(idx) + ".npy"
action_filename = "action_" + str(idx) + ".npy"
next_state_filename = "next_state_" + str(idx) + ".npy"
reward_filename = "reward_" + str(idx) + ".npy"
not_done_filename = "node_done_" + str(idx) + ".npy"
curr_save_dir = "Expert_data_" + datetime.datetime.now().strftime("%m_%d_%y_%H%M")
if not os.path.exists(os.path.join(expert_replay_saving_dir, curr_save_dir)):
os.makedirs(os.path.join(expert_replay_saving_dir, curr_save_dir))
save_filepath = expert_replay_saving_dir + "/" + curr_save_dir + "/"
print("save_filepath: ", save_filepath)
np.save(save_filepath + state_filename, state_subset)
np.save(save_filepath + action_filename, action_subset)
np.save(save_filepath + next_state_filename, next_state_subset)
np.save(save_filepath + reward_filename, reward_subset)
np.save(save_filepath + not_done_filename, not_done_subset)
np.save(save_filepath + "episodes", replay_buffer.episodes) # Keep track of episode start/finish indexes
np.save(save_filepath + "episodes_info", [replay_buffer.max_episode, replay_buffer.size, replay_buffer.episodes_count, replay_buffer.replay_ep_num])
return save_filepath
def load_split_replay(replay_buffer=None, filepath=None):
print("#### Getting SPLIT expert replay buffer from SAVED location: ",filepath)
expert_state = []
expert_action = []
expert_next_state = []
expert_reward = []
expert_not_done = []
for state_subset in glob.glob(filepath + '/state_*.npy'):
state_subset = filepath + '/' + os.path.basename(state_subset)
print("getting state file: ", state_subset)
state = np.load(state_subset, allow_pickle=True).tolist()
print("state: ", state)
expert_state.append(state)
print("expert_state: ",expert_state)
for action_subset in glob.glob(filepath + '/action_*.npy'):
expert_action.append(np.load(action_subset, allow_pickle=True).tolist())
for next_state_subset in glob.glob(filepath + '/next_state_*.npy'):
expert_next_state.append(np.load(next_state_subset, allow_pickle=True).tolist())
for reward_subset in glob.glob(filepath + '/reward_*.npy'):
expert_reward.append(np.load(reward_subset, allow_pickle=True).tolist())
for not_done_subset in glob.glob(filepath + '/not_done_*.npy'):
expert_not_done.append(np.load(not_done_subset, allow_pickle=True).tolist())
expert_episodes = np.load(filepath + "/episodes.npy", allow_pickle=True).tolist() # Keep track of episode start/finish indexes
expert_episodes_info =
|
np.load(filepath + "/episodes_info.npy", allow_pickle=True)
|
numpy.load
|
"""
@package ion_functions.test.test_wav_functions
@file ion_functions/test/test_wav_functions.py
@author <NAME>
@brief Unit tests for wav_functions module
"""
import numpy as np
from ion_functions.test.base_test import BaseUnitTestCase
from nose.plugins.attrib import attr
from ion_functions.data import wav_functions as wv
from ion_functions.utils import fill_value as vfill
@attr('UNIT', group='func')
class TestWAVFunctionsUnit(BaseUnitTestCase):
def setUp(self):
# repeat count for testing multiple record cases
self.nrep = 10
# for calculating dataproducts from # of values, initial value, and interval
self.nvalue = np.array([5])
self.value0 = np.array([0.01])
self.deltav = np.array([0.001])
# resulting values are (2D; row vector)
self.ivalue = np.array([[0.010, 0.011, 0.012, 0.013, 0.014]])
# for testing the correction of buoy displacement values for magnetic declination
# (some values taken from the unit test module test_adcp_functions.py)
self.lat = np.array([50.0, 45.0])
self.lon = np.array([-145.0, -128.0])
self.ntp = np.array([3545769600.0, 3575053740.0])
self.xx = np.array([[0.2175, -0.2814, -0.1002, 0.4831, 1.2380],
[0.2455, 0.6218, -0.1807, 0.0992, -0.9063]])
self.yy = np.array([[-0.3367, -0.1815, -1.0522, -0.8676, -0.8919],
[0.2585, -0.8497, -0.0873, 0.3073, 0.5461]])
# set expected results -- magnetic variation correction applied
# (computed in Matlab using above values and mag_var.m)
self.xx_cor = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243],
[0.3087, 0.3555, -0.1980, 0.1822, -0.7144]])
self.yy_cor = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140],
[0.1783, -0.9911, -0.0325, 0.2666, 0.7805]])
def test_wav_triaxys_nondir_freq(self):
"""
Tests calculation of non-directional wave frequency bin values for WAVSS instruments.
Values were not defined in DPS, are created above.
OOI (2012). Data Product Specification for Wave Statistics. Document Control
Number 1341-00450. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00450_Data_Product_WAVE_STATISTICS_OOI.pdf)
2014-04-08: <NAME>. Initial code.
2015-04-27: <NAME>. Verified that code is compliant with time-vectorized inputs.
"""
# the single input record case
desired = self.ivalue
actual = wv.wav_triaxys_nondir_freq(self.nvalue, self.value0, self.deltav)
# test
np.testing.assert_allclose(actual, desired, rtol=1e-8, atol=0)
#print self.nvalue.shape, self.value0.shape, self.deltav.shape
#print actual.shape, desired.shape
# the multi-record case -- inputs
nvalues = np.repeat(self.nvalue, self.nrep)
value0s = np.repeat(self.value0, self.nrep)
deltavs = np.repeat(self.deltav, self.nrep)
# the multi-record case -- outputs
desired = np.tile(self.ivalue, (self.nrep, 1))
actual = wv.wav_triaxys_nondir_freq(nvalues, value0s, deltavs)
# test
np.testing.assert_allclose(actual, desired, rtol=1e-8, atol=0)
#print nvalues.shape, value0s.shape, deltavs.shape
#print actual.shape, desired.shape
def test_wav_triaxys_dir_freq(self):
"""
Tests calculation of directional wave frequency bin values for WAVSS instruments.
Values were not defined in DPS, are created here and above.
OOI (2012). Data Product Specification for Wave Statistics. Document Control
Number 1341-00450. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00450_Data_Product_WAVE_STATISTICS_OOI.pdf)
2014-04-08: <NAME>. Initial code.
2015-04-27: <NAME>. Verified that code is compliant with time-vectorized inputs.
"""
# the single input record case
desired = self.ivalue
actual = wv.wav_triaxys_dir_freq(self.nvalue, self.nvalue, self.value0, self.deltav)
# test
np.testing.assert_allclose(actual, desired, rtol=1e-8, atol=0)
#print self.nvalue.shape, self.value0.shape, self.deltav.shape
#print actual.shape, desired.shape
# the multi-record case -- all nvalues_dir are equal -- inputs
nvalues = np.repeat(self.nvalue, self.nrep)
value0s = np.repeat(self.value0, self.nrep)
deltavs = np.repeat(self.deltav, self.nrep)
# the multi-record case -- all nvalues_dir are equal -- outputs
desired = np.tile(self.ivalue, (self.nrep, 1))
actual = wv.wav_triaxys_dir_freq(nvalues, nvalues, value0s, deltavs)
# test
np.testing.assert_allclose(actual, desired, rtol=1e-8, atol=0)
#print nvalues.shape, value0s.shape, deltavs.shape
#print actual.shape, desired.shape
# the multi-record case -- all nvalues_dir are not the same -- inputs
nvalues_nondir = np.repeat(self.nvalue, self.nrep)
nvalues_dir = np.array([4, 5, 3, 5, 4, 5, 1, 2, 5, 3])
value0s = np.repeat(self.value0, self.nrep)
deltavs = np.repeat(self.deltav, self.nrep)
# the multi-record case -- all nvalues_dir are not the same -- outputs
desired = np.array([[0.010, 0.011, 0.012, 0.013, vfill],
[0.010, 0.011, 0.012, 0.013, 0.014],
[0.010, 0.011, 0.012, vfill, vfill],
[0.010, 0.011, 0.012, 0.013, 0.014],
[0.010, 0.011, 0.012, 0.013, vfill],
[0.010, 0.011, 0.012, 0.013, 0.014],
[0.010, vfill, vfill, vfill, vfill],
[0.010, 0.011, vfill, vfill, vfill],
[0.010, 0.011, 0.012, 0.013, 0.014],
[0.010, 0.011, 0.012, vfill, vfill]])
#print nvalues_nondir.shape, nvalues_dir.shape, value0s.shape, deltavs.shape
actual = wv.wav_triaxys_dir_freq(nvalues_nondir, nvalues_dir, value0s, deltavs)
# test
np.testing.assert_allclose(actual, desired, rtol=1e-8, atol=0)
#print nvalues_nondir.shape, nvalues_dir.shape, value0s.shape, deltavs.shape
#print actual.shape, desired.shape
def test_wav_triaxys_buoymotion_time(self):
"""
Tests calculation of times corresponding to (x,y,z) buoy displacement measurements
for WAVSS instruments.
Values were not defined in DPS, are created above.
OOI (2012). Data Product Specification for Wave Statistics. Document Control
Number 1341-00450. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00450_Data_Product_WAVE_STATISTICS_OOI.pdf)
2014-04-08: <NAME>. Initial code.
2015-04-30: <NAME>. Revised multi-record case to use time-vectorized
ntp_timestamps; the DPA algorithm itself
(wav_triaxys_buoymotion_time) was correctly coded
and did not need to be modified.
"""
# the single input record case
ntp_timestamp = np.array([3176736750.736])
desired = self.ivalue + ntp_timestamp
actual = wv.wav_triaxys_buoymotion_time(ntp_timestamp, self.nvalue, self.value0, self.deltav)
# test to msec
np.testing.assert_allclose(actual, desired, rtol=0, atol=0.001)
#print ntp_timestamp.shape, self.nvalue.shape, self.value0.shape, self.deltav.shape
#print actual.shape, desired.shape
# the multi-record case -- inputs
t_rep = 3
nvalues = np.repeat(self.nvalue, t_rep)
value0s = np.repeat(self.value0, t_rep)
deltavs = np.repeat(self.deltav, t_rep)
# time vectorize ntp_timestamp, too; pick a 1 hr sampling interval
ntp_timestamp = ntp_timestamp + np.array([0.0, 3600.0, 7200.0])
# the multi-record case -- outputs
# reshape timestamp array into a column vector then tile to number of ivalue columns
ntp_2D = np.tile(ntp_timestamp.reshape(-1, 1), (1, self.ivalue.shape[-1]))
desired = ntp_2D + np.tile(self.ivalue, (t_rep, 1))
actual = wv.wav_triaxys_buoymotion_time(ntp_timestamp, nvalues, value0s, deltavs)
#print ntp_timestamp.shape, nvalues.shape, value0s.shape, deltavs.shape
#ntp_floor = np.floor(ntp_timestamp[0])
#print actual.shape, actual - ntp_floor
#print desired.shape, desired - ntp_floor
# test to msec
np.testing.assert_allclose(actual, desired, rtol=0, atol=0.001)
def test_wav_triaxys_correct_mean_wave_direction(self):
"""
Tests magnetic declination correction of mean wave direction WAVSTAT-D_L0
from WAVSS instruments.
Values were not defined in DPS. The values for the magnetic declination
test are calculated directly from the magnetic_declination function in
ion_functions/data/generic_functions.py.
OOI (2012). Data Product Specification for Wave Statistics. Document Control
Number 1341-00450. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00450_Data_Product_WAVE_STATISTICS_OOI.pdf)
2014-04-08: <NAME>. Initial code.
2015-04-27: <NAME>. Verified that code is compliant with time-vectorized inputs.
"""
# the single input record case
dir_raw = np.array([50.0])
lat = np.array([45.0])
lon = np.array([-128.0])
ntp_ts = np.array([3575053740.0])
desired = np.array([50.0 + 16.461005])
actual = wv.wav_triaxys_correct_mean_wave_direction(dir_raw, lat, lon, ntp_ts)
# test to first decimal place, in case model changes
np.testing.assert_allclose(actual, desired, rtol=0, atol=0.1)
#print dir_raw.shape, lat.shape, lon.shape, ntp_ts.shape
#print actual.shape, desired.shape
# the multi-record case -- inputs
# test "going around the corner" in both directions.
dir_raw = np.array([50.0, 350.0, 1.0])
lat = np.array([45.0, 45.0, 80.0])
lon = np.array([-128.0, -128.0, 0.0])
ntp_ts = np.array([3575053740.0, 3575053740.0, 3471292800.0])
# the multi-record case -- outputs
desired = np.array([66.461005, 366.461005 - 360.0, 1.0 - 6.133664 + 360.0])
actual = wv.wav_triaxys_correct_mean_wave_direction(dir_raw, lat, lon, ntp_ts)
# test
np.testing.assert_allclose(actual, desired, rtol=0, atol=0.1)
#print dir_raw.shape, lat.shape, lon.shape, ntp_ts.shape
#print actual.shape, desired.shape
def test_wav_triaxys_correct_directional_wave_direction(self):
"""
Tests magnetic declination correction of directional wave directions WAVSTAT-DDS_L0
from WAVSS instruments.
Values were not defined in DPS. The values for the magnetic declination
test are calculated directly from the magnetic_declination function in
ion_functions/data/generic_functions.py.
OOI (2012). Data Product Specification for Wave Statistics. Document Control
Number 1341-00450. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00450_Data_Product_WAVE_STATISTICS_OOI.pdf)
2014-04-10: <NAME>. Initial code.
2015-04-27: <NAME>. Verified that code is compliant with time-vectorized inputs.
"""
# the single input record case - no fill values (nfreq_dir = nfreq_nondir)
dir_raw = np.array([[50.0, 1.0, 359.0, 180.0, 245.0]])
lat = np.array([45.0])
lon = np.array([-128.0])
ntp_ts = np.array([3575053740.0])
# outputs
desired = np.array([[66.461, 17.461, 15.461, 196.461, 261.461]])
actual = wv.wav_triaxys_correct_directional_wave_direction(dir_raw, lat, lon, ntp_ts)
# test to first decimal place, in case model changes
np.testing.assert_allclose(actual, desired, rtol=0, atol=0.1)
#print dir_raw.shape, lat.shape, lon.shape, ntp_ts.shape
#print actual.shape, desired.shape
# the single input record case - with fill values (nfreq_dir < nfreq_nondir)
dir_raw = np.array([[50.0, 1.0, 359.0, vfill, vfill]])
lat = np.array([45.0])
lon = np.array([-128.0])
ntp_ts = np.array([3575053740.0])
# outputs
desired = np.array([[66.461, 17.461, 15.461, vfill, vfill]])
actual = wv.wav_triaxys_correct_directional_wave_direction(dir_raw, lat, lon, ntp_ts)
# test to first decimal place, in case model changes
np.testing.assert_allclose(actual, desired, rtol=0, atol=0.1)
#print dir_raw.shape, lat.shape, lon.shape, ntp_ts.shape
#print actual.shape, desired.shape
# the multi-record case -- inputs
# test "going around the corner" in both directions.
dir_raw = np.array([[50.0, 350.0, 1.0, 170.0, 240.0, vfill],
[150.0, 250.0, 11.0, vfill, vfill, vfill],
[50.0, 350.0, 1.0, 170.0, vfill, vfill]])
lat = np.array([45.0, 45.0, 80.0])
lon = np.array([-128.0, -128.0, 0.0])
ntp_ts = np.array([3575053740.0, 3575053740.0, 3471292800.0])
# the multi-record case -- outputs
desired = np.array([[66.461, 6.461, 17.461, 186.461, 256.461, vfill],
[166.461, 266.461, 27.461, vfill, vfill, vfill],
[43.866, 343.866, 354.866, 163.866, vfill, vfill]])
actual = wv.wav_triaxys_correct_directional_wave_direction(dir_raw, lat, lon, ntp_ts)
# test
np.testing.assert_allclose(actual, desired, rtol=0, atol=0.1)
#print dir_raw.shape, lat.shape, lon.shape, ntp_ts.shape
#print actual.shape, desired.shape
def test_wav_triaxys_magcor_buoymotion_x(self):
"""
Tests calculation of magnetic corrections to eastward buoy displacements for WAVSS instruments.
Values were not defined in DPS, are created as documented in setup module above.
OOI (2012). Data Product Specification for Wave Statistics. Document Control
Number 1341-00450. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00450_Data_Product_WAVE_STATISTICS_OOI.pdf)
2014-04-10: <NAME>. Initial code.
2015-04-27: <NAME>. Verified that code is compliant with time-vectorized inputs.
"""
# the single input record case
lat = self.lat[[0]]
lon = self.lon[[0]]
ntp = self.ntp[[0]]
xx = self.xx[[0], :]
yy = self.yy[[0], :]
# outputs
desired = self.xx_cor[[0], :]
actual = wv.wav_triaxys_magcor_buoymotion_x(xx, yy, lat, lon, ntp)
# test
np.testing.assert_allclose(actual, desired, rtol=0, atol=0.0001)
#print xx.shape, yy.shape, lat.shape, lon.shape, ntp.shape
#print actual.shape, desired.shape
# multiple records
desired =
|
np.array(self.xx_cor)
|
numpy.array
|
from collections import OrderedDict
from taps.db import Database
import sqlite3
from scipy.spatial import KDTree
import time
import numpy as np
from numpy import concatenate as cat
from numpy import newaxis as nax
from taps.utils.antenna import packing
from taps.projectors import Projector
class ImageDatabase(Database):
"""
Database for image
TODO: Make it handle Coordinate class
"""
entries=OrderedDict(
coord='blob',
label='text',
status='text',
start_time='real',
potential='blob',
potentials='blob',
gradients='blob',
finish_time='real',
positions='blob',
forces='blob'
)
def __init__(self, data_ids=None, data_bounds=None, table_name='image',
prj=None, timeout=60, **kwargs):
self.data_ids = data_ids or list()
self.data_bounds = data_bounds or {}
self.table_name = table_name
self.prj = prj or Projector()
self.timeout = timeout
super().__init__(**kwargs)
def read(self, ids, prj=None, table_name=None, entries=None, **kwargs):
prj = prj or self.prj
entries = entries or self.entries
table_name = table_name or self.table_name
data = super().read(ids,
table_name=table_name, entries=entries, **kwargs)
return data
def read_all(self, prj=None, table_name=None, entries=None, **kwargs):
prj = prj or self.prj
entries = entries or self.entries
table_name = table_name or self.table_name
data = super().read_all(table_name=table_name, entries=entries,
**kwargs)
return data
def write(self, data, prj=None, table_name=None, entries=None, **kwargs):
prj = prj or self.prj
entries = entries or self.entries
table_name = table_name or self.table_name
return super().write(data, table_name=table_name,
entries=entries, **kwargs)
def update(self, ids, data, prj=None, table_name=None, entries=None,
**kwargs):
prj = prj or self.prj
entries = entries or self.entries
table_name = table_name or self.table_name
super().update(ids, data, table_name=table_name, entries=entries,
**kwargs)
def delete(self, ids, prj=None, table_name=None, entries=None, **kwargs):
prj = prj or self.prj
entries = entries or self.entries
table_name = table_name or self.table_name
super().delete(ids, table_name=table_name, entries=entries, **kwargs)
def get_image_data(self, paths, prj=None, data_ids=None, data_bounds=None):
"""
data_ids : dictionary of lists
{'image': [...], 'descriptor': [...]}
for each key, return
'coords' -> 'X'; D x M
'potential' -> 'V'; M
'gradient' -> 'F'; D x M
return : dictionary contain X, V, F
{'X' : np.array(D x M), 'V': np.array(M), }
"""
prj = prj or self.prj
data_ids = data_ids or self.data_ids
data_bounds = data_bounds or getattr(self, 'data_bounds', {})
pcoords = prj.x(paths.coords(index=[0]))
shape = pcoords.shape[:-1]
# Initial state
if self._cache.get('old_data_ids') is None:
shape_raw = paths.coords(index=[0]).shape[:-1]
self._cache['old_data_ids'] = []
self._cache['data'] = {
'coords': np.zeros((*shape, 0), dtype=float),
'potential': np.zeros(0, dtype=float),
'gradients': np.zeros((*shape, 0), dtype=float),
'coords_raw': np.zeros((*shape_raw, 0), dtype=float),
'gradients_raw': np.zeros((*shape_raw, 0), dtype=float),
'data_ids': [],
'changed': True,
}
if self._cache.get('old_data_ids') == data_ids:
self._cache['data']['changed'] = False
return self._cache['data']
else:
self._cache['data']['changed'] = True
new_data_ids = []
for id in data_ids:
if id not in self._cache['old_data_ids']:
new_data_ids.append(id)
self._cache['data']['data_ids'].append(id)
new_data = self.read(new_data_ids)
M = len(new_data_ids)
data = self._cache['data']
keys = list(new_data[0].keys())
if 'coord' in keys:
coords_raw = []
coords_prj = []
for i in range(M):
coord_raw = new_data[i]['coord'][..., nax]
coord_prj = prj._x(new_data[i]['coord'][..., nax])
coords_raw.append(coord_raw)
coords_prj.append(coord_prj)
if M != 0:
new_coords_raw = cat(coords_raw, axis=-1)
new_coords_prj = cat(coords_prj, axis=-1)
data['coords'] = cat([data['coords'],
new_coords_prj], axis=-1)
data['coords_raw'] = cat([data['coords_raw'],
new_coords_raw], axis=-1)
if 'potential' in keys:
potential = []
for i in range(M):
new_pot = new_data[i]['potential']
# Bounds for pot
if data_bounds.get('potential') is not None:
ub = data_bounds['potential'].get('upperbound')
lb = data_bounds['potential'].get('lowerbound')
if ub is not None and new_pot[0] > ub:
print('Potential ub fix')
new_pot = [ub]
elif lb is not None and new_pot[0] < lb:
print('Potential lb fix')
new_pot = [lb]
potential.append(new_pot)
if M != 0:
new_potential = np.concatenate(potential, axis=-1)
data['potential'] = np.concatenate([data['potential'],
new_potential], axis=-1)
if 'gradients' in keys:
gradients_raws = []
gradients_prjs = []
for i in range(M):
coords_raw = new_data[i]['coord'][..., nax]
gradients_raw = new_data[i]['gradients'].copy()
gradients_prj, _ = prj.f(gradients_raw,
pcoords.similar(coords_raw))
if data_bounds.get('gradients') is not None:
ub = data_bounds['gradients'].get('upperbound')
lb = data_bounds['gradients'].get('lowerbound')
if ub is not None:
if np.any(gradients_raw > ub):
print('Gradients ub fix')
gradients_raw[gradients_raw > ub] = ub
gradients_prj[gradients_prj > ub] = ub
elif lb is not None:
if np.any(gradients_raw < lb):
print('Gradients lb fix')
gradients_raw[gradients_raw < lb] = lb
gradients_prj[gradients_prj < lb] = lb
gradients_raws.append(gradients_raw)
gradients_prjs.append(gradients_prj)
if M != 0:
new_grad_raw = cat(gradients_raws, axis=-1)
new_grad_prj =
|
cat(gradients_prjs, axis=-1)
|
numpy.concatenate
|
from __future__ import division
import numpy as np
import netCDF4
from collections import OrderedDict
from kid_readout.analysis.resonator import Resonator
class Dataset(object):
def __init__(self, filename):
self.ds = netCDF4.Dataset(filename)
def sweep(self, sweep_name):
"""
Return the netCDF4.Group object corresponding to sweep_name.
"""
match = [group for name, group in self.ds.groups['sweeps'].groups.items()
if sweep_name in name]
if not match:
raise ValueError("No sweep names contain {0}".format(sweep_name))
elif len(match) > 1:
raise ValueError("Multiple sweep names contain {0}".format(sweep_name))
else:
return match.pop()
def sweep_names(self):
return self.ds.groups['sweeps'].groups.keys()
class CoarseSweep(object):
"""
This interface is going to change soon -- it will figure out
everything from the netCDF group, like the FineSweep.
"""
def __init__(self, f, s21, coarse_resolution, n_subsweeps):
self.f = f
self.s21 = s21
self.coarse_resolution = coarse_resolution
self.n_subsweeps = n_subsweeps
self.resolution = self.coarse_resolution / self.n_subsweeps
def nth_subsweep(self, n):
f_int =
|
np.round((self.f-self.f[0])/self.resolution)
|
numpy.round
|
import numpy as np
# region 辅助函数
# RGB2XYZ空间的系数矩阵
M = np.array([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]])
# im_channel取值范围:[0,1]
def f(im_channel):
return np.power(im_channel, 1 / 3) if im_channel > 0.008856 else 7.787 * im_channel + 0.137931
def anti_f(im_channel):
return np.power(im_channel, 3) if im_channel > 0.206893 else (im_channel - 0.137931) / 7.787
# endregion
# region RGB 转 Lab
# 像素值RGB转XYZ空间,pixel格式:(R, G, B)
# 返回XYZ空间下的值
def __rgb2xyz__(pixel):
r, g, b = pixel[0], pixel[1], pixel[2]
rgb = np.array([r, g, b])
# rgb = rgb / 255.0
# RGB = np.array([gamma(c) for c in rgb])
XYZ = np.dot(M, rgb.T)
XYZ = XYZ / 255.0
return (XYZ[0] / 0.95047, XYZ[1] / 1.0, XYZ[2] / 1.08883)
def __xyz2lab__(xyz):
"""
XYZ空间转Lab空间
:param xyz: 像素xyz空间下的值
:return: 返回Lab空间下的值
"""
F_XYZ = [f(x) for x in xyz]
L = 116 * F_XYZ[1] - 16 if xyz[1] > 0.008856 else 903.3 * xyz[1]
a = 500 * (F_XYZ[0] - F_XYZ[1])
b = 200 * (F_XYZ[1] - F_XYZ[2])
return (L, a, b)
def RGB2Lab(pixel):
"""
RGB空间转Lab空间
:param pixel: RGB空间像素值,格式:[G,B,R]
:return: 返回Lab空间下的值
"""
xyz = __rgb2xyz__(pixel)
Lab = __xyz2lab__(xyz)
return tuple(Lab)
# endregion
# region Lab 转 RGB
def __lab2xyz__(Lab):
fY = (Lab[0] + 16.0) / 116.0
fX = Lab[1] / 500.0 + fY
fZ = fY - Lab[2] / 200.0
x = anti_f(fX)
y = anti_f(fY)
z = anti_f(fZ)
x = x * 0.95047
y = y * 1.0
z = z * 1.0883
return (x, y, z)
def __xyz2rgb(xyz):
xyz =
|
np.array(xyz)
|
numpy.array
|
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import random
from functools import partial
import numpy as np
import pytest
from pde import CartesianGrid, UnitGrid
from pde.grids.boundaries import Boundaries, DomainError, PeriodicityError
def _get_cartesian_grid(dim=2, periodic=True):
""" return a random Cartesian grid of given dimension """
bounds = [[0, 1 + np.random.random()] for _ in range(dim)]
shape = np.random.randint(32, 64, size=dim)
return CartesianGrid(bounds, shape, periodic=periodic)
def test_degenerated_grid():
""" test degenerated grids """
with pytest.raises(ValueError):
UnitGrid([])
with pytest.raises(ValueError):
CartesianGrid([], 1)
def test_generic_cartesian_grid():
""" test generic cartesian grid functions """
for dim in (1, 2, 3):
periodic = random.choices([True, False], k=dim)
shape = np.random.randint(2, 8, size=dim)
a = np.random.random(dim)
b = a + np.random.random(dim)
cases = [
UnitGrid(shape, periodic=periodic),
CartesianGrid(np.c_[a, b], shape, periodic=periodic),
]
for grid in cases:
assert grid.dim == dim
dim_axes = len(grid.axes) + len(grid.axes_symmetric)
assert dim_axes == dim
vol = np.prod(grid.discretization) * np.prod(shape)
assert grid.volume == pytest.approx(vol)
assert grid.uniform_cell_volumes
# random points
points = [
[np.random.uniform(a[i], b[i]) for i in range(dim)] for _ in range(10)
]
c = grid.point_to_cell(points)
p = grid.cell_to_point(c)
np.testing.assert_array_equal(c, grid.point_to_cell(p))
assert grid.contains_point(grid.get_random_point())
w = 0.499 * (b - a).min()
assert grid.contains_point(grid.get_random_point(w))
assert "laplace" in grid.operators
@pytest.mark.parametrize("periodic", [True, False])
def test_unit_grid_1d(periodic):
""" test 1D grids """
grid = UnitGrid(4, periodic=periodic)
assert grid.dim == 1
assert grid.numba_type == "f8[:]"
assert grid.volume == 4
np.testing.assert_array_equal(grid.discretization, np.ones(1))
dist, angle = grid.polar_coordinates_real(0, ret_angle=True)
if periodic:
np.testing.assert_allclose(dist, [0.5, 1.5, 1.5, 0.5])
else:
np.testing.assert_allclose(dist, np.arange(4) + 0.5)
assert angle.shape == (4,)
grid = UnitGrid(8, periodic=periodic)
assert grid.dim == 1
assert grid.volume == 8
norm_numba = grid.make_normalize_point_compiled(reflect=False)
def norm_numba_wrap(x):
y = np.array([x])
norm_numba(y)
return y
for normalize in [partial(grid.normalize_point, reflect=False), norm_numba_wrap]:
if periodic:
np.testing.assert_allclose(normalize(-1e-10), 8 - 1e-10)
np.testing.assert_allclose(normalize(1e-10), 1e-10)
np.testing.assert_allclose(normalize(8 - 1e-10), 8 - 1e-10)
np.testing.assert_allclose(normalize(8 + 1e-10), 1e-10)
else:
for x in [-1e-10, 1e-10, 8 - 1e-10, 8 + 1e-10]:
np.testing.assert_allclose(normalize(x), x)
grid = UnitGrid(8, periodic=periodic)
# test conversion between polar and Cartesian coordinates
c1 = grid.cell_coords
p = np.random.random(1) * grid.shape
d, a = grid.polar_coordinates_real(p, ret_angle=True)
c2 = grid.from_polar_coordinates(d, a, p)
assert np.allclose(grid.distance_real(c1, c2), 0)
# test boundary points
np.testing.assert_equal(grid._boundary_coordinates(0, False), np.array([0]))
np.testing.assert_equal(grid._boundary_coordinates(0, True), np.array([8]))
def test_unit_grid_2d():
""" test 2D grids """
# test special case
grid = UnitGrid([4, 4], periodic=True)
assert grid.dim == 2
assert grid.numba_type == "f8[:, :]"
assert grid.volume == 16
np.testing.assert_array_equal(grid.discretization, np.ones(2))
assert grid.get_image_data(np.zeros(grid.shape))["extent"] == [0, 4, 0, 4]
for _ in range(10):
p = np.random.randn(2)
assert np.all(grid.polar_coordinates_real(p) < np.sqrt(8))
large_enough = grid.polar_coordinates_real((0, 0)) > np.sqrt(4)
assert np.any(large_enough)
periodic = random.choices([True, False], k=2)
grid = UnitGrid([4, 4], periodic=periodic)
assert grid.dim == 2
assert grid.volume == 16
assert grid.polar_coordinates_real((1, 1)).shape == (4, 4)
grid = UnitGrid([4, 8], periodic=periodic)
assert grid.dim == 2
assert grid.volume == 32
assert grid.polar_coordinates_real((1, 1)).shape == (4, 8)
# test conversion between polar and Cartesian coordinates
c1 = grid.cell_coords
p = np.random.random(2) * grid.shape
d, a = grid.polar_coordinates_real(p, ret_angle=True)
c2 = grid.from_polar_coordinates(d, a, p)
assert np.allclose(grid.distance_real(c1, c2), 0)
# test boundary points
np.testing.assert_equal(
grid._boundary_coordinates(0, False),
np.c_[np.full(8, 0), np.linspace(0.5, 7.5, 8)],
)
np.testing.assert_equal(
grid._boundary_coordinates(0, True),
np.c_[np.full(8, 4), np.linspace(0.5, 7.5, 8)],
)
np.testing.assert_equal(
grid._boundary_coordinates(1, False),
np.c_[np.linspace(0.5, 3.5, 4), np.full(4, 0)],
)
np.testing.assert_equal(
grid._boundary_coordinates(1, True),
np.c_[np.linspace(0.5, 3.5, 4), np.full(4, 8)],
)
def test_unit_grid_3d():
""" test 3D grids """
grid = UnitGrid([4, 4, 4])
assert grid.dim == 3
assert grid.numba_type == "f8[:, :, :]"
assert grid.volume == 64
np.testing.assert_array_equal(grid.discretization, np.ones(3))
assert grid.get_image_data(np.zeros(grid.shape))["extent"] == [0, 4, 0, 4]
assert grid.polar_coordinates_real((1, 1, 3)).shape == (4, 4, 4)
periodic = random.choices([True, False], k=3)
grid = UnitGrid([4, 6, 8], periodic=periodic)
assert grid.dim == 3
assert grid.volume == 192
assert grid.polar_coordinates_real((1, 1, 2)).shape == (4, 6, 8)
grid = UnitGrid([4, 4, 4], periodic=True)
assert grid.dim == 3
assert grid.volume == 64
for _ in range(10):
p = np.random.randn(3)
not_too_large = grid.polar_coordinates_real(p) < np.sqrt(12)
assert np.all(not_too_large)
large_enough = grid.polar_coordinates_real((0, 0, 0)) > np.sqrt(6)
assert np.any(large_enough)
# test boundary points
for bndry in grid._iter_boundaries():
assert grid._boundary_coordinates(*bndry).shape == (4, 4, 3)
def test_rect_grid_1d():
""" test 1D grids """
grid = CartesianGrid([32], 16, periodic=False)
assert grid.dim == 1
assert grid.volume == 32
assert grid.typical_discretization == 2
np.testing.assert_array_equal(grid.discretization, np.full(1, 2))
assert grid.polar_coordinates_real(0).shape == (16,)
grid = CartesianGrid([[-16, 16]], 8, periodic=True)
assert grid.cuboid.pos == [-16]
assert grid.shape == (8,)
assert grid.dim == 1
assert grid.volume == 32
assert grid.typical_discretization == 4
assert grid.polar_coordinates_real(1).shape == (8,)
np.testing.assert_allclose(grid.normalize_point(-16 - 1e-10), 16 - 1e-10)
np.testing.assert_allclose(grid.normalize_point(-16 + 1e-10), -16 + 1e-10)
np.testing.assert_allclose(grid.normalize_point(16 - 1e-10), 16 - 1e-10)
np.testing.assert_allclose(grid.normalize_point(16 + 1e-10), -16 + 1e-10)
for periodic in [True, False]:
a, b = np.random.random(2)
grid = CartesianGrid([[a, a + b]], 8, periodic=periodic)
# test conversion between polar and Cartesian coordinates
c1 = grid.cell_coords
p = np.random.random(1) * grid.shape
d, a = grid.polar_coordinates_real(p, ret_angle=True)
c2 = grid.from_polar_coordinates(d, a, p)
assert np.allclose(grid.distance_real(c1, c2), 0)
def test_rect_grid_2d():
""" test 2D grids """
grid = CartesianGrid([[2], [2]], 4, periodic=True)
assert grid.get_image_data(np.zeros(grid.shape))["extent"] == [0, 2, 0, 2]
for _ in range(10):
p = np.random.randn(2)
assert np.all(grid.polar_coordinates_real(p) < np.sqrt(2))
periodic = random.choices([True, False], k=2)
grid = CartesianGrid([[4], [4]], 4, periodic=periodic)
assert grid.dim == 2
assert grid.volume == 16
np.testing.assert_array_equal(grid.discretization, np.ones(2))
assert grid.typical_discretization == 1
assert grid.polar_coordinates_real((1, 1)).shape == (4, 4)
grid = CartesianGrid([[-2, 2], [-2, 2]], [4, 8], periodic=periodic)
assert grid.dim == 2
assert grid.volume == 16
assert grid.typical_discretization == 0.75
assert grid.polar_coordinates_real((1, 1)).shape == (4, 8)
# test conversion between polar and Cartesian coordinates
c1 = grid.cell_coords
p = np.random.random(2) * grid.shape
d, a = grid.polar_coordinates_real(p, ret_angle=True)
c2 = grid.from_polar_coordinates(d, a, p)
assert np.allclose(grid.distance_real(c1, c2), 0)
def test_rect_grid_3d():
""" test 3D grids """
grid = CartesianGrid([4, 4, 4], 4)
assert grid.dim == 3
assert grid.volume == 64
assert grid.typical_discretization == 1
np.testing.assert_array_equal(grid.discretization, np.ones(3))
assert grid.polar_coordinates_real((1, 1, 3)).shape == (4, 4, 4)
bounds = [[-2, 2], [-2, 2], [-2, 2]]
grid = CartesianGrid(bounds, [4, 6, 8])
assert grid.dim == 3
np.testing.assert_allclose(grid.axes_bounds, bounds)
assert grid.volume == 64
assert grid.typical_discretization == pytest.approx(0.7222222222222)
assert grid.polar_coordinates_real((1, 1, 2)).shape == (4, 6, 8)
grid = CartesianGrid([[2], [2], [2]], 4, periodic=True)
for _ in range(10):
p = np.random.randn(3)
assert np.all(grid.polar_coordinates_real(p) < np.sqrt(3))
@pytest.mark.parametrize("periodic", [True, False])
def test_unit_rect_grid(periodic):
"""test whether the rectangular grid behaves like a unit grid in special cases"""
dim = random.randrange(1, 4)
shape =
|
np.random.randint(2, 10, size=dim)
|
numpy.random.randint
|
#-------------------------------------------------------------------------------
# Copyright 2016 - 2021 Esri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A copy of the license and additional notices are located with the
# source distribution at:
#
# http://github.com/Esri/lerc/
#
# Contributors: <NAME>
#
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#
# How to run the test:
#
# You need 2 files, this file "_lerc.py" and the Lerc dll (for Windows) or the
# Lerc .so file (for Linux) next to it. Then
#
# >>> import _lerc
# >>> _lerc.test()
#
#-------------------------------------------------------------------------------
import numpy as np
import ctypes as ct
from timeit import default_timer as timer
import platform
import os
def _get_lib():
dir_path = os.path.dirname(os.path.realpath(__file__))
if platform.system() == "Windows":
lib = os.path.join(dir_path, 'Lerc.dll')
elif platform.system() == "Linux":
lib = os.path.join(dir_path, 'Lerc.so')
elif platform.system() == "Darwin":
lib = os.path.join(dir_path, 'Lerc.dylib')
else:
lib = None
if not lib or not os.path.exists(lib):
import ctypes.util
lib = ctypes.util.find_library('Lerc')
return lib
lercDll = ct.CDLL (_get_lib())
del _get_lib
#-------------------------------------------------------------------------------
# helper functions:
# data types supported by Lerc, all little endian byte order
def getLercDatatype(npDtype):
switcher = {
np.dtype('b'): 0, # char or int8
np.dtype('B'): 1, # byte or uint8
np.dtype('h'): 2, # short or int16
np.dtype('H'): 3, # ushort or uint16
np.dtype('i'): 4, # int or int32
np.dtype('I'): 5, # uint or uint32
np.dtype('f'): 6, # float or float32
np.dtype('d'): 7 # double or float64
}
return switcher.get(npDtype, -1)
#-------------------------------------------------------------------------------
# Lerc expects an image of size nRows x nCols.
# Optional, it allows multiple values per pixel, like [RGB, RGB, RGB, ... ]. Or an array of values per pixel. As a 3rd dimension.
# Optional, it allows multiple bands. As an outer 3rd or 4th dimension.
def getLercShape(npArr, nValuesPerPixel):
nBands = 1
dim = npArr.ndim
npShape = npArr.shape
if nValuesPerPixel == 1:
if dim == 2:
(nRows, nCols) = npShape
elif dim == 3:
(nBands, nRows, nCols) = npShape # or band interleaved
elif nValuesPerPixel > 1:
if dim == 3:
(nRows, nCols, nValpp) = npShape # or pixel interleaved
elif dim == 4:
(nBands, nRows, nCols, nValpp) = npShape # 4D array
if nValpp != nValuesPerPixel:
return (0, 0, 0)
return (nBands, nRows, nCols)
#-------------------------------------------------------------------------------
def findMaxZError(npArr1, npArr2):
npDiff = npArr2 - npArr1
yMin = np.amin(npDiff)
yMax = np.amax(npDiff)
return max(abs(yMin), abs(yMax))
#-------------------------------------------------------------------------------
def findDataRange(npArr, bHasMask, npValidMask, nBands, printInfo = False):
start = timer()
if not bHasMask:
zMin = np.amin(npArr)
zMax = np.amax(npArr)
else:
if nBands == 1 or npValidMask.ndim == 3: # one mask per band
zMin = np.amin(npArr[npValidMask])
zMax = np.amax(npArr[npValidMask])
elif nBands > 1: # same mask for all bands
zMin = float("inf")
zMax = -zMin
for m in range(nBands):
zMin = min(np.amin(npArr[m][npValidMask]), zMin)
zMax = max(np.amax(npArr[m][npValidMask]), zMax)
end = timer()
if printInfo:
print('time findDataRange() = ', (end - start))
return (zMin, zMax)
#-------------------------------------------------------------------------------
# see include/Lerc_c_api.h
lercDll.lerc_computeCompressedSize.restype = ct.c_uint
lercDll.lerc_computeCompressedSize.argtypes = (ct.c_void_p, ct.c_uint, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_char_p, ct.c_double, ct.POINTER(ct.c_uint))
lercDll.lerc_encode.restype = ct.c_uint
lercDll.lerc_encode.argtypes = (ct.c_void_p, ct.c_uint, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_char_p, ct.c_double, ct.c_char_p, ct.c_uint, ct.POINTER(ct.c_uint))
lercDll.lerc_getBlobInfo.restype = ct.c_uint
lercDll.lerc_getBlobInfo.argtypes = (ct.c_char_p, ct.c_uint, ct.POINTER(ct.c_uint), ct.POINTER(ct.c_double), ct.c_int, ct.c_int)
lercDll.lerc_getBlobInfo.restype = ct.c_uint
lercDll.lerc_getDataRanges.argtypes = (ct.c_char_p, ct.c_uint, ct.c_int, ct.c_int, ct.POINTER(ct.c_double), ct.POINTER(ct.c_double))
lercDll.lerc_decode.restype = ct.c_uint
lercDll.lerc_decode.argtypes = (ct.c_char_p, ct.c_uint, ct.c_int, ct.c_char_p, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_uint, ct.c_void_p)
#-------------------------------------------------------------------------------
# npArr can be 2D, 3D, or 4D array. See also getLercShape() above.
#
# npValidMask can be None (bHasMask == False), 2D byte array, or 3D byte array (bHasMask == True).
# if 2D or [nRows, nCols], it is one mask for all bands. 1 means pixel is valid, 0 means invalid.
# if 3D or [nBands, nRows, nCols], it is one mask PER band.
# note that an array of values per pixel is either all valid or all invalid.
# the case that such inner array values are partially valid or invalid is not represented by a mask
# yet but a noData value would have to be used.
#
# nBytesHint can be
# 0 - compute num bytes needed for output buffer, but do not encode it (faster than encode)
# 1 - do both, compute exact buffer size needed and encode (slower than encode alone)
# > 1 - create buffer of that given size and encode, if buffer too small encode will fail.
def encode(npArr, nValuesPerPixel, bHasMask, npValidMask, maxZErr, nBytesHint, printInfo = False):
global lercDll
dataType = getLercDatatype(npArr.dtype)
if dataType == -1:
print('Error in encode(): unsupported numpy data type.')
return (-1, 0)
(nBands, nRows, nCols) = getLercShape(npArr, nValuesPerPixel)
if nBands == 0:
print('Error in encode(): unsupported numpy array shape.')
return (-1, 0)
nMasks = 0
if bHasMask:
(nMasks, nRows2, nCols2) = getLercShape(npValidMask, 1)
if not(nMasks == 0 or nMasks == 1 or nMasks == nBands) or not(nRows2 == nRows and nCols2 == nCols):
print('Error in encode(): unsupported mask array shape.')
return (-1, 0)
if printInfo:
print('dataType = ', dataType)
print('nBands = ', nBands)
print('nRows = ', nRows)
print('nCols = ', nCols)
print('nValuesPerPixel = ', nValuesPerPixel)
print('nMasks = ', nMasks)
byteArr = npArr.tobytes('C') # C order
cpData = ct.cast(byteArr, ct.c_void_p)
if bHasMask:
npValidBytes = npValidMask.astype('B')
validArr = npValidBytes.tobytes('C')
cpValidArr = ct.cast(validArr, ct.c_char_p)
else:
cpValidArr = None
ptr = ct.cast((ct.c_uint * 1)(), ct.POINTER(ct.c_uint))
if nBytesHint == 0 or nBytesHint == 1:
start = timer()
result = lercDll.lerc_computeCompressedSize(cpData, dataType, nValuesPerPixel, nCols, nRows, nBands, nMasks, cpValidArr, maxZErr, ptr)
nBytesNeeded = ptr[0]
end = timer()
if result > 0:
print('Error in encode(): lercDll.lerc_computeCompressedSize() failed with error code = ', result)
return (result, 0)
if printInfo:
print('time lerc_computeCompressedSize() = ', (end - start))
else:
nBytesNeeded = nBytesHint
if nBytesHint > 0:
outBytes = ct.create_string_buffer(nBytesNeeded)
cpOutBuffer = ct.cast(outBytes, ct.c_char_p)
start = timer()
result = lercDll.lerc_encode(cpData, dataType, nValuesPerPixel, nCols, nRows, nBands, nMasks, cpValidArr, maxZErr, cpOutBuffer, nBytesNeeded, ptr)
nBytesWritten = ptr[0]
end = timer()
if result > 0:
print('Error in encode(): lercDll.lerc_encode() failed with error code = ', result)
return (result, 0)
if printInfo:
print('time lerc_encode() = ', (end - start))
if nBytesHint == 0:
return (result, nBytesNeeded)
else:
return (result, nBytesWritten, outBytes)
#-------------------------------------------------------------------------------
def getLercBlobInfo(lercBlob, printInfo = False):
global lercDll
info = ['version', 'data type', 'nValuesPerPixel', 'nCols', 'nRows', 'nBands', 'nValidPixels', 'blob size', 'nMasks']
dataRange = ['zMin', 'zMax', 'maxZErrorUsed']
nBytes = len(lercBlob)
len0 = len(info)
len1 = len(dataRange)
p0 = ct.cast((ct.c_uint * len0)(), ct.POINTER(ct.c_uint))
p1 = ct.cast((ct.c_double * len1)(), ct.POINTER(ct.c_double))
cpBytes = ct.cast(lercBlob, ct.c_char_p)
result = lercDll.lerc_getBlobInfo(cpBytes, nBytes, p0, p1, len0, len1)
if result > 0:
print('Error in getLercBlobInfo(): lercDLL.lerc_getBlobInfo() failed with error code = ', result)
return (result, 0,0,0,0,0,0,0,0,0,0,0,0)
if printInfo:
for i in range(len0):
print(info[i], p0[i])
for i in range(len1):
print(dataRange[i], p1[i])
return (result, p0[0], p0[1], p0[2], p0[3], p0[4], p0[5], p0[6], p0[7], p0[8], p1[0], p1[1], p1[2])
#-------------------------------------------------------------------------------
def getLercDataRanges(lercBlob, nDim, nBands, printInfo = False):
global lercDll
nBytes = len(lercBlob)
len0 = nDim * nBands;
cpBytes = ct.cast(lercBlob, ct.c_char_p)
mins = ct.create_string_buffer(len0 * 8)
maxs = ct.create_string_buffer(len0 * 8)
cpMins = ct.cast(mins, ct.POINTER(ct.c_double))
cpMaxs = ct.cast(maxs, ct.POINTER(ct.c_double))
start = timer()
result = lercDll.lerc_getDataRanges(cpBytes, nBytes, nDim, nBands, cpMins, cpMaxs)
end = timer()
if result > 0:
print('Error in getLercDataRanges(): lercDLL.lerc_getDataRanges() failed with error code = ', result)
return (result)
if printInfo:
print('time lerc_getDataRanges() = ', (end - start))
print('data ranges per band and depth:')
for i in range(nBands):
for j in range(nDim):
print('band', i, 'depth', j, ': [', cpMins[i * nDim + j], ',', cpMaxs[i * nDim + j], ']')
npMins = np.frombuffer(mins, 'd')
npMaxs = np.frombuffer(maxs, 'd')
npMins.shape = (nBands, nDim)
npMaxs.shape = (nBands, nDim)
return (result, npMins, npMaxs)
#-------------------------------------------------------------------------------
def decode(lercBlob, printInfo = False):
global lercDll
(result, version, dataType, nValuesPerPixel, nCols, nRows, nBands, nValidPixels, blobSize, nMasks, zMin, zMax, maxZErrUsed) = getLercBlobInfo(lercBlob, printInfo)
if result > 0:
print('Error in decode(): getLercBlobInfo() failed with error code = ', result)
return result
# convert Lerc dataType to np data type
npDtArr = ['b', 'B', 'h', 'H', 'i', 'I', 'f', 'd']
npDtype = npDtArr[dataType]
# convert Lerc shape to np shape
if nBands == 1:
if nValuesPerPixel == 1:
shape = (nRows, nCols)
elif nValuesPerPixel > 1:
shape = (nRows, nCols, nValuesPerPixel)
elif nBands > 1:
if nValuesPerPixel == 1:
shape = (nBands, nRows, nCols)
elif nValuesPerPixel > 1:
shape = (nBands, nRows, nCols, nValuesPerPixel)
# create empty buffer for decoded data
dataSize = [1, 1, 2, 2, 4, 4, 4, 8]
nBytes = nBands * nRows * nCols * nValuesPerPixel * dataSize[dataType]
dataBuf = ct.create_string_buffer(nBytes)
cpData = ct.cast(dataBuf, ct.c_void_p)
cpBytes = ct.cast(lercBlob, ct.c_char_p)
# create empty buffer for valid pixels masks, if needed
cpValidArr = None
if nMasks > 0:
validBuf = ct.create_string_buffer(nMasks * nRows * nCols)
cpValidArr = ct.cast(validBuf, ct.c_char_p)
# call decode
start = timer()
result = lercDll.lerc_decode(cpBytes, len(lercBlob), nMasks, cpValidArr, nValuesPerPixel, nCols, nRows, nBands, dataType, cpData)
end = timer()
if result > 0:
print('Error in decode(): lercDll.lerc_decode() failed with error code = ', result)
return result
if printInfo:
print('time lerc_decode() = ', (end - start))
# return result, np data array, and np valid pixels array if there
npArr = np.frombuffer(dataBuf, npDtype)
npArr.shape = shape
if nMasks > 0:
npValidBytes = np.frombuffer(validBuf, dtype='B')
if nMasks == 1:
npValidBytes.shape = (nRows, nCols)
else:
npValidBytes.shape = (nMasks, nRows, nCols)
npValidMask = (npValidBytes != 0)
return (result, npArr, npValidMask)
else:
return (result, npArr, None)
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
def test():
# data types supported by Lerc, all little endian byte order
# 'b', 'B', 'h', 'H', 'i', 'I', 'f', 'd'
print(' -------- encode test 1 -------- ')
nBands = 1
nRows = 256
nCols = 256
nValuesPerPixel = 3 # values or array per pixel, could be RGB values or hyper spectral image
npArr = np.zeros((nRows, nCols, nValuesPerPixel), 'f', 'C') # data type float, C order
#npValidMask = np.full((nRows, nCols), True) # set all pixels valid
npValidMask = None # same as all pixels valid
maxZErr = 0.001
# fill it with something
for i in range(nRows):
for j in range(nCols):
for k in range(nValuesPerPixel):
npArr[i][j][k] = 0.001 * i * j + k
# call with buffer size 0 to only compute compressed size, optional
numBytesNeeded = 0
(result, numBytesNeeded) = encode(npArr, nValuesPerPixel, False, npValidMask, maxZErr, numBytesNeeded, True)
if result > 0:
print('Error in test(): error code = ', result)
return result
print('computed compressed size = ', numBytesNeeded)
# encode with numBytesNeeded from above or big enough estimate
(result, numBytesWritten, outBuffer) = encode(npArr, nValuesPerPixel, False, npValidMask, maxZErr, numBytesNeeded, True)
if result > 0:
print('Error in test(): error code = ', result)
return result
print('num bytes written to buffer = ', numBytesWritten)
# decode again
(result, npArrDec, npValidMaskDec) = decode(outBuffer, True)
if result > 0:
print('Error in test(): decode() failed with error code = ', result)
return result
# evaluate the difference to orig (assuming no mask all valid)
maxZErrFound = findMaxZError(npArr, npArrDec)
print('maxZErr found = ', maxZErrFound)
# find the range [zMin, zMax] in the numpy array
(zMin, zMax) = findDataRange(npArrDec, False, None, nBands, True)
print('data range found = ', zMin, zMax)
print(' -------- encode test 2 -------- ')
nBands = 3
nRows = 256
nCols = 256
nValuesPerPixel = 1
npArr =
|
np.zeros((nBands, nRows, nCols), 'f', 'C')
|
numpy.zeros
|
import os
import numpy as np
import pandas as pd
import xarray as xr
from xclim import run_length as rl
TESTS_HOME = os.path.abspath(os.path.dirname(__file__))
TESTS_DATA = os.path.join(TESTS_HOME, "testdata")
K2C = 273.15
class TestRLE:
def test_dataarray(self):
values = np.zeros(365)
time = pd.date_range(
"7/1/2000", periods=len(values), freq=pd.DateOffset(days=1)
)
values[1:11] = 1
da = xr.DataArray(values, coords={"time": time}, dims="time")
v, l, p = rl.rle_1d(da != 0)
class TestLongestRun:
nc_pr = os.path.join(TESTS_DATA, "NRCANdaily", "nrcan_canada_daily_pr_1990.nc")
def test_simple(self):
values = np.zeros(365)
time = pd.date_range(
"7/1/2000", periods=len(values), freq=pd.DateOffset(days=1)
)
values[1:11] = 1
da = xr.DataArray(values != 0, coords={"time": time}, dims="time")
lt = da.resample(time="M").apply(rl.longest_run_ufunc)
assert lt[0] == 10
np.testing.assert_array_equal(lt[1:], 0)
# n-dim version versus ufunc
da3d = xr.open_dataset(self.nc_pr).pr[:, 40:50, 50:68] != 0
lt_orig = da3d.resample(time="M").apply(rl.longest_run_ufunc)
# override 'auto' usage of ufunc for small number of gridpoints
lt_Ndim = da3d.resample(time="M").apply(
rl.longest_run, dim="time", ufunc_1dim=False
)
np.testing.assert_array_equal(lt_orig, lt_Ndim)
def test_start_at_0(self):
values =
|
np.zeros(365)
|
numpy.zeros
|
#!/usr/bin/env python
"""
logtools is a Python port of the Control File Functions of
Logtools, the Logger Tools Software of <NAME>, MPI-BGC Jena, (c) 2012.
Some functions are renamed compared to the original logger tools:
`chs` -> `varchs`
`add` -> `varadd`
`sub` -> `varsub`
`mul` -> `varmul`
`div` -> `vardiv`
`sqr` -> `varsqr`/`varsqrt`
`exp` -> `varexp`
`log` -> `varlog`
`pot` -> `varpot`
Not all functions are implemented (yet). Missing functions are:
`varset`
`met_torad`
`met_psy_rh`
`met_dpt_rh`
`write`
Some functions are slightly enhanced, which is reflected in the
documentation of the indidual functions.
All functions have an additional keyword `undef`, which defaults to -9999.:
elements are excluded from the calculations if any of the inputs equals `undef`.
Only bit_test and the if-statements `ifeq`, `ifne`, `ifle`, `ifge`, `iflt`, `igt`
do not have the `undef` keyword.
The Looger Tools control functions are:
1. Assignment # not implemented
2. Change sign
x = varchs(a) means x = -a, where a is a variable or a number.
def varchs(var1, undef=-9999.):
3. Addition
x = varadd(a, b) means x = a + b, where a and b are ndarray or float.
def varadd(var1, var2, undef=-9999.):
4. Subtraction
x = varsub(a, b) means x = a - b, where a and b are ndarray or float.
def varsub(var1, var2, undef=-9999.):
5. Multiplication
x = varmul(a, b) means x = a * b, where a and b are ndarray or float.
def varmul(var1, var2, undef=-9999.):
6. Division
x = vardiv(a, b) means x = a/b, where a and b are ndarray or float.
def vardiv(var1, var2, undef=-9999.):
7. Square root
x = varsqr(a) means x = sqrt(a), where a is a variable or a number.
x = varsqrt(a) means x = sqrt(a), where a is a variable or a number.
def varsqr(var1, undef=-9999.):
def varsqrt(var1, undef=-9999.):
8. Exponentiation of e
x = varexp(a) means x = exp(a), where a is a variable or a number.
def varexp(var1, undef=-9999.):
9. Natural logarithm
x = varlog(a) means x = ln(a), where a is a variable or a number.
def varlog(var1, undef=-9999.):
10. Exponentiation
x = varpot(a, b) means x = a**b, where a and b are ndarray or float.
def varpot(var1, var2, undef=-9999.):
11. Apply linear function
x = lin(y, a0, a1) means x = a0 + a1 * y,
where a0 and a1 are ndarray or float.
def lin(var1, a, b, undef=-9999.):
12. Apply 2nd order function
x=quad(y,a0,a1,a2) means x = a0 +a1*y + a2*y**2,
where a0, a1 and a2 are ndarray or float.
def quad(var1, a, b, c, undef=-9999.):
13. Apply 3rd order function
x=cubic(y,a0,a1,a2,a3) means x = a0 +a1*y+a2*y**2+a3*y**3,
where a0, a1, a2 and a3 are ndarray or float.
def cubic(var1, a, b, c, d, undef=-9999.):
14. Calculate fraction of day from hours, minutes and seconds
x = hms(h, m, s) means x = (h + m/60 + s/3600)/24,
where h, m and s (hours, minutes and seconds) are ndarray or float.
def hms(h, m, s, undef=-9999.):
15. Bitwise test
x = bit_test(y, b, start=0) means x = 1 if bit b ist set in y otherwise x = 0.
Returns a list of b is an array.
Counting of b starts at start.
For the behaviour of the original logger tools, set start=1.
Negative b's is not implemented.
def bit_test(var1, var2, start=0):
16. Replacement of underflows by new value
x = setlow(y,lo,ln=None) means IF (y > lo) THEN x = ln ELSE x = y,
where lo and ln are ndarray or float.
ln is optional. If not given lo will be used.
This function may be used to adjust small negative values of short wave radiation
during nighttime to zero values.
def setlow(dat, low, islow=None, undef=-9999.):
17. Replacement of overflows by new value
x = sethigh(y,lo,ln=None) means IF (y < lo) THEN x = ln ELSE x = y,
where lo and ln are ndarray or float.
ln is optional. If not given lo will be used.
This function may be used to adjust relative humidity values of a little bit more than 100 % to 100 %.
def sethigh(dat, high, ishigh=None, undef=-9999.):
18. Replacement of underflows or overflows by the undef
x = limits(y, ll, lh) means
IF (y > ll) OR (y < lh) THEN x = undef ELSE x = y,
where ll and lh are ndarray or float.
This function may be used to check values lying in between certain limits.
If one of the limits is exceeded the value is set to undef.
def limits(dat, mini, maxi, undef=-9999.):
19. Calculation of mean value
x = mean(y1, y2, ..., yn) means x = (y1 + y2 + ... + yn)/n,
where y1, y2, ..., yn are ndarray or float.
def mean(var1, axis=None, undef=-9999.):
20. Calculation of minimum value
x = mini(y1,y2,...,yn) means x = min(y1,y2,...,yn),
where y1, y2, ..., yn are ndarray or float.
def mini(var1, axis=None, undef=-9999.):
21. Calculation of maximum value
x = maxi(y1,y2,...,yn) means x = max(y1,y2,...,yn),
where y1, y2, ..., yn are ndarray or float.
def maxi(var1, axis=None, undef=-9999.):
22. Calculation of total radiation from net radiometer # no implemented
23. Calculation of long wave radiation from net radiometer
x = met_lwrad(y, Tp) where
y is the output voltage of the net radiometer in mV,
Tp is the temperature of the net radiometer body in degC.
The total radiation in W m-2 is calculated according to the following formula:
x=y*fl +sigma*(Tp +273.16)**4
where sigma = 5.67051 * 10**8 W m-2 K-4 is the Stephan-Boltzmann-Constant and
fl is the factor for long wave radiation (reciprocal value of sensitivity) in W m-2 per mV.
The function assumes that fl was already applied before.
All parameters may be ndarray or float.
def met_lwrad(dat, tpyr, undef=-9999.): # assumes that dat was already multiplied with calibration factor
24. Calculation of radiation temperature from long wave radiation
x = met_trad(Rl, epsilon) where
Rl is the long wave radiation in W m-2,
epsilon is the long wave emissivity of the surface (between 0 and 1).
The radiation temperature in degC is calculated according to the following formula:
x= sqrt4(Rl/(sigma*epsilon)) - 273.16
where sigma = 5.67051 * 108 W m-2 K-4 is the Stephan-Boltzmann-Constant.
Both parameters may be ndarray or float.
def met_trad(dat, eps, undef=-9999.):
25. Calculation of albedo from short wave downward and upward radiation
x = met_alb(Rsd, Rsu) where
Rsd is the short wave downward radiation in Wm-2, Rsu is the short wave upward radiation in Wm-2,
The albedo in % is calculated according to the following formula:
x = 100 * ( Rsu / Rsd )
If Rsd > 50 W m-2 or Rsu > 10 W m-2 the result is undef.
Both parameters may be ndarray or float.
def met_alb(swd, swu, swdmin=50., swumin=10., undef=-9999.):
26. Calculation of albedo from short wave downward and upward radiation with limits
x=met_albl(Rsd,Rsu,Rsd_limit,Rsu_limit)where
Rsd is the short wave downward radiation in Wm-2,
Rsu is the short wave upward radiation in Wm-2,
Rsd_limit is the short wave downward radiation limit in Wm-2,
Rsu_limit is the short wave upward radiation limit in Wm-2,
The albedo in % is calculated according to the following formula:
x = 100 * ( Rsu / Rsd )
If Rsd > Rsd_limit or Rsu > Rsu_limit the result is undef.
All four parameters may be ndarray or float.
def met_albl(swd, swu, swdmin, swumin, undef=-9999.):
27. Calculation of saturation water vapour pressure
x = met_vpmax(T) where
T is the air temperature in degC.
The saturation water vapour pressure in mbar (hPa) is calculated according to the following formula:
x = 6.1078 * exp(17.08085 * T / (234.175 + T))
The parameter may be a variable or a number.
def met_vpmax(temp, undef=-9999.):
28. Calculation of actual water vapour pressure
x = met_vpact(T,rh) where T is the air temperature in degC, rh is the relative humidity in %.
The actual water vapour pressure in mbar (hPa) is calculated according to the following for- mulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
x = Es * rh/100
Both parameters may be ndarray or float.
def met_vpact(temp, rh, undef=-9999.):
29. Calculation of water vapour pressure deficit
x = met_vpdef(T, rh) where T is the air temperature in degC, rh is the relative humidity in %.
The water vapour pressure deficit in mbar (hPa) is calculated according to the following for- mulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
E = Es * rh/100
x = Es - E
Both parameters may be ndarray or float.
def met_vpdef(temp, rh, undef=-9999.):
30. Calculation of specific humidity
x = met_sh(T, rh, p) where
T is the air temperature in degC,
rh is the relative humidity in %,
p is the air pressure in mbar (hPa).
The specific humidity in g kg-1 is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
E = Es * rh/100
x = 622 * E/(p-0.378*E)
All parameters may be ndarray or float.
def met_sh(temp, rh, p, undef=-9999.):
31. Calculation of potential temperature
x = met_tpot(T, p) where
T is the air temperature in degC,
p is the air pressure in mbar (hPa).
The potential temperature in K is calculated according to the following formula:
x = (T + 273.16) * (1000/p)**0.286
Both parameters may be ndarray or float.
def met_tpot(temp, p, undef=-9999.):
32. Calculation of air density
x = met_rho(T, rh, p) where
T is the air temperature in degC,
rh is the relative humidity in %,
p is the air pressure in mbar (hPa).
The air density in kg m-3 is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
E = Es * rh/100
sh = 622 * E/(p-0.378*E)
Tv = ((T + 273.16) * (1 + 0.000608 * sh)) - 273.16
x = p * 100 / (287.05 * (Tv + 273.16))
All parameters may be ndarray or float.
def met_rho(temp, rh, p, undef=-9999.):
33. Calculation of dew point temperature
x = met_dpt(T, rh) where
T is the air temperature in degC, rh is the relative humidity in %.
The dew point temperature in degC is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/(234.175 + T))
E = Es * rh/100
x = 234.175 * ln(E/6.1078)/(17.08085 - ln(E/6.1078))
Both parameters may be ndarray or float.
def met_dpt(temp, rh, undef=-9999.):
34. Calculation of water vapour concentration
x = met_h2oc(T, rh, p) where T is the air temperature in degC,
rh is the relative humidity in %,
p is the air pressure in mbar (hPa).
The water vapour concentration in mmol mol-1 is calculated according to the following formu- las:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
E = Es * rh/100
x = 0.1 * E /(0.001*p*100*0.001)
All parameters may be ndarray or float.
def met_h2oc(temp, rh, p, undef=-9999.):
35. Calculation of relative humidity from dry and wet bulb temperature # not implemented
36. Calculation of relative humidity from dew point temperature # not implemented
37. Calculation of relative humidity from water vapour concentration
x = met_h2oc_rh(T, [H2O], p) where
T is the air temperature in degC,
[H2O] is the water vapour concentration in mmolmol-1, p is the air pressure in mbar (hPa).
The relative humidity in % is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/(234.175 + T))
E = 10 * [H2O] * 0.001 * p * 100 * 0.001
x = 100 * E / Es
All parameters may be ndarray or float.
def met_h2oc_rh(temp, h, p, undef=-9999.):
38. Rotation of wind direction
x = met_wdrot(wd, a) where
wd is the wind direction in degree,
a is the rotation angle in degree (positive is clockwise).
The rotated wind direction is calculated according to the following formulas:
x = wd + a
IF x > 0 THEN x = x + 360
IF x >= 360 THEN x = x - 360
Both parameters may be ndarray or float.
def met_wdrot(wd, a, undef=-9999.):
39. Rotation of u-component of wind vector
x = met_urot(u, v, a) where
u is the u-component of the wind vector,
v is the v-component of the wind vector,
a is the rotation angle in degree (positive is clockwise).
The rotated u-component is calculated according to the following formula:
x = u * cos (a) + v * sin (a)
All three parameters may be ndarray or float.
def met_urot(u, v, a, undef=-9999.):
40. Rotation of v-component of wind vector
x = met_vrot(u, v, a) where
u is the u-component of the wind vector,
v is the v-component of the wind vector,
a is the rotation angle in degree (positive is clockwise).
The rotated v-component is calculated according to the following formula:
x = -u * sin (a) + v * cos (a)
All three parameters may be ndarray or float.
def met_vrot(u, v, a, undef=-9999.):
41. Calculation of wind velocity from u- and v-component of wind vector
x = met_uv_wv(u, v) where
u is the u-component of the wind vector, v is the v-component of the wind vector.
The horizontal wind velocity is calculated according to the following formula:
x = sqrt(u**2 + v**2)
Both parameters may be ndarray or float.
def met_uv_wv(u, v, undef=-9999.):
42. Calculation of wind direction from u- and v-component of wind vector
x = met_uv_wd(u, v) where
u is the u-component of the wind vector, v is the v-component of the wind vector.
The horizontal wind velocity is calculated according to the following formulas:
IF u = 0 AND v = 0 THEN x = 0
IF u = 0 AND v > 0 THEN x = 360
IF u = 0 AND v < 0 THEN x = 180
IF u < 0 THEN x = 270 - arctan(v/u)
IF u > 0 THEN x = 90 - arctan(v/u)
Both parameters may be ndarray or float.
def met_uv_wd(u, v, undef=-9999.):
43. Calculation of u-component of wind vector from wind velocity and wind direction
x = met_wvwd_u(wv, wd) where wv is the horizontal wind velocity, wd is the horizontal wind direction.
The u-component of the wind vector is calculated according to the following formula:
x = -wv * sin (wd)
Both parameters may be ndarray or float.
def met_wvwd_u(wv, wd, undef=-9999.):
44. Calculation of v-component of wind vector from wind velocity and wind direction
x = met_wvwd_v(wv, wd) where wv is the horizontal wind velocity, wd is the horizontal wind direction.
The v-component of the wind vector is calculated according to the following formula:
x = -wv * cos (wd)
Both parameters may be ndarray or float.
def met_wvwd_v(wv, wd, undef=-9999.):
45. If-statements
x = ifeq(y,a0,a1,a2) means IF y == a0 THEN x = a1 ELSE x = a2
x = ifne(y,a0,a1,a2) means IF y != a0 THEN x = a1 ELSE x = a2
x = ifle(y,a0,a1,a2) means IF y <= a0 THEN x = a1 ELSE x = a2
x = ifge(y,a0,a1,a2) means IF y >= a0 THEN x = a1 ELSE x = a2
x = iflt(y,a0,a1,a2) means IF y > a0 THEN x = a1 ELSE x = a2
x = ifgt(y,a0,a1,a2) means IF y < a0 THEN x = a1 ELSE x = a2
All parameters may be ndarray or float.
def ifeq(var1, iif, ithen, ielse):
def ifne(var1, iif, ithen, ielse):
def ifle(var1, iif, ithen, ielse):
def ifge(var1, iif, ithen, ielse):
def iflt(var1, iif, ithen, ielse):
def ifgt(var1, iif, ithen, ielse):
46. Write variables to a file # not implemented
This module was written by <NAME> while at Department of
Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany, and continued while at Institut
National de Recherche pour l'Agriculture, l'Alimentation et
l'Environnement (INRAE), Nancy, France.
Copyright (c) 2014-2020 <NAME> - mc (at) macu (dot) de
Released under the MIT License; see LICENSE file for details.
* Written Jun-Dec 2014 by <NAME> (mc (at) macu (dot) de)
* Corrected type in met_tpot, Jun 2014, Corinna Rebmann
* Changed to Sphinx docstring and numpydoc, May 2020, <NAME>
.. moduleauthor:: <NAME>
.. autosummary::
varchs
varadd
varsub
varmul
vardiv
varsqr
varsqrt
varexp
varlog
varpot
lin
quad
cubic
hms
bit_test
setlow
sethigh
limits
mean
mini
maxi
met_lwrad
met_trad
met_alb
met_albl
met_vpmax
met_vpact
met_vpdef
met_sh
met_tpot
met_rho
met_dpt
met_h2oc
met_h2oc_rh
met_wdrot
met_urot
met_vrot
met_uv_wv
met_uv_wd
met_wvwd_u
met_wvwd_v
ifeq
ifne
ifle
ifge
iflt
ifgt
"""
from __future__ import division, absolute_import, print_function
import numpy as np
try: # import package
from ..division import division
from ..esat import esat
from ..const import sigma, T0
except:
try: # e.g. python module.py at main package level
from division import division
from esat import esat
from const import sigma, T0
except: # python logtools.py
division = _div
esat = _esat
sigma = 5.67e-08 # Stefan-Boltzmann constant [W m^-2 K^-4]
T0 = 273.15 # Celcius <-> Kelvin [K]
__all__ = ['varchs', 'varadd', 'varsub', 'varmul', 'vardiv', 'varsqr',
'varsqrt', 'varexp', 'varlog', 'varpot', 'lin', 'quad',
'cubic', 'hms', 'bit_test', 'setlow', 'sethigh', 'limits',
'mean', 'mini', 'maxi', 'met_lwrad', 'met_trad', 'met_alb',
'met_albl', 'met_vpmax', 'met_vpact', 'met_vpdef', 'met_sh',
'met_tpot', 'met_rho', 'met_dpt', 'met_h2oc', 'met_h2oc_rh',
'met_wdrot', 'met_urot', 'met_vrot', 'met_uv_wv', 'met_uv_wd',
'met_wvwd_u', 'met_wvwd_v', 'ifeq', 'ifne', 'ifle', 'ifge',
'iflt', 'ifgt']
# Not implemented: varset
def varchs(a, undef=-9999.):
"""
Change sign:
x = varchs(a) means x = -a, where a is ndarray or float.
Parameters
----------
a : ndarray
input variable
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Changed sign
History
-------
Written, <NAME>, Dec 2014
"""
return np.where(a==undef, undef, -a)
def varadd(a, b, undef=-9999.):
"""
Addition:
x = varadd(a, b) means x = a + b, where a and b are ndarray or float.
Parameters
----------
a : ndarray
input variable 1
b : ndarray
input variable 2
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Addition
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef) | (b==undef), undef, a + b)
def varsub(a, b, undef=-9999.):
"""
Subtraction:
x = varsub(a, b) means x = a - b, where a and b are ndarray or float.
Parameters
----------
a : ndarray
input variable 1
b : ndarray
input variable 2
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Subtraction
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef) | (b==undef), undef, a - b)
def varmul(a, b, undef=-9999.):
"""
Multiplication:
x = varmul(a, b) means x = a * b, where a and b are ndarray or float.
Parameters
----------
a : ndarray
input variable 1
b : ndarray
input variable 2
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Multiplication
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef) | (b==undef), undef, a * b)
def vardiv(a, b, undef=-9999.):
"""
Division:
x = vardiv(a, b) means x = a/b, where a and b are ndarray or float.
Parameters
----------
a : ndarray
dividend
b : ndarray
divisor
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Division
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef) | (b==undef), undef, division(a, b, undef))
def varsqr(a, undef=-9999.):
"""
Square root:
x = varsqr(a) means x = sqrt(a), where a is ndarray or float.
Parameters
----------
a : ndarray
input variable
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Square root
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef), undef, np.sqrt(a))
def varsqrt(a, undef=-9999.):
"""
Square root:
x = varsqrt(a) means x = sqrt(a), where a is ndarray or float.
Parameters
----------
a : ndarray
input variable
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Square root
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef), undef, np.sqrt(a))
def varexp(a, undef=-9999.):
"""
Exponentiation of e:
x = varexp(a) means x = exp(a), where a is ndarray or float.
Parameters
----------
a : ndarray
exponent
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Exponentiation
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef), undef, np.exp(a))
def varlog(a, undef=-9999.):
"""
Natural logarithm:
x = varlog(a) means x = ln(a), where a is ndarray or float.
Parameters
----------
a : ndarray
input variable
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Natural logarithm
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef), undef, np.log(a))
def varpot(a, b, undef=-9999.):
"""
Exponentiation:
x = varpot(a, b) means x = a**b, where a and b are ndarray or float.
Parameters
----------
a : ndarray
base
b : ndarray
exponent
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
Exponentiation
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((a==undef) | (b==undef), undef, a**b)
def lin(y, a0, a1, undef=-9999.):
"""
Apply linear function:
x = lin(y, a0, a1) means x = a0 + a1 * y
Parameters
----------
y : ndarray
input variable
a0 : ndarray or float
parameter 1
a1 : ndarray or float
parameter 2
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
linear function
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((y==undef), undef, a0 + a1*y)
def quad(y, a0, a1, a2, undef=-9999.):
"""
Apply 2nd order function:
x=quad(y,a0,a1,a2) means x = a0 + a1*y + a2*y**2
Parameters
----------
y : ndarray
input variable
a0 : ndarray or float
parameter 1
a1 : ndarray or float
parameter 1
a2 : ndarray or float
parameter 1
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
2nd order function
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((y==undef), undef, a0 + a1*y + a2*y*y)
def cubic(y, a0, a1, a2, a3, undef=-9999.):
"""
Apply 3rd order function:
x=cubic(y,a0,a1,a2,a3) means x = a0 + a1*y + a2*y**2 + a3*y**3
Parameters
----------
y : ndarray
input variable
a0 : ndarray or float
parameter 1
a1 : ndarray or float
parameter 2
a2 : ndarray or float
parameter 3
a3 : ndarray or float
parameter 4
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
3rd order function
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((y==undef), undef, a0 + a1*y + a2*y*y + a3*y*y*y)
def hms(h, m, s, undef=-9999.):
"""
Calculate fraction of day from hours, minutes and seconds:
x = hms(h, m, s) means x = (h + m/60 + s/3600)/24
Parameters
----------
h : ndarray
hour
m : ndarray
minute
s : ndarray
second
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
fraction of day
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((h==undef) | (m==undef) | (s==undef), undef, (h+m/60.+s/3600.)/24.)
def bit_test(y, b, start=0):
"""
Bitwise test:
x = bit_test(y, b, start=0) means x = 1 if bit b is set in y otherwise x = 0.
Returns a list if b is an array.
Counting of b starts at start.
For the behaviour of the original logger tools, set start=1.
Negative b's is not implemented.
Parameters
----------
y : ndarray
input variable 1
b : int or ndarray
input variable 2
start : int, optional
Counting of `b` starts at start (default: 0)
Returns
-------
int or list
Bitwise test
History
-------
Written, <NAME>, Jun 2014
"""
if np.size(b) > 1:
return [ (y >> i+start)%2 for i in b ]
else:
return (y >> b+start)%2
def setlow(y, low, islow=None, undef=-9999.):
"""
Replacement of underflows by new value:
x = setlow(y,low,islow) means IF (y < low) THEN x = islow ELSE x = y
islow is optional. If not given low will be used.
This function may be used to adjust small negative values of short wave radiation
during nighttime to zero values.
Parameters
----------
y : ndarray
input variable
low : ndarray
lower threshold
islow : None or ndarray, optional
if not None, use islow in case of y < low
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
underflows replaced by new value
History
-------
Written, <NAME>, Jun 2014
"""
if islow is None:
out = np.maximum(y, low)
else:
out = np.where(y < low, islow, y)
return np.where(y == undef, undef, out)
def sethigh(y, high, ishigh=None, undef=-9999.):
"""
Replacement of overflows by new value:
x = sethigh(y,high,ishigh) means IF (y > high) THEN x = ishigh ELSE x = y
ishigh is optional. If not given high will be used.
This function may be used to adjust relative humidity values of a little bit more than 100 % to 100 %.
Parameters
----------
y : ndarray
input variable
high : ndarray
upper threshold
ishigh : None or ndarray, optional
if not None, use ishigh in case of y > high
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
overflows replaced by new value
History
-------
Written, <NAME>, Jun 2014
"""
if ishigh is None:
out = np.minimum(y, high)
else:
out = np.where(y > high, ishigh, y)
return np.where(y == undef, undef, out)
def limits(y, mini, maxi, undef=-9999.):
"""
Replacement of underflows or overflows by undef:
x = limits(y, mini, maxi) means IF (y < mini) OR (y > maxi) THEN x = undef ELSE x = y
This function may be used to check values lying in between certain limits.
If one of the limits is exceeded the value is set to undef.
Parameters
----------
y : ndarray
input variable
mini : ndarray
lower threshold
maxi : ndarray
upper threshold
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
underflows or overflows replaced by `undef`
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((y >= mini) & (y <= maxi), y, undef)
def mean(y, axis=None, undef=-9999.):
"""
Calculation of mean value:
x = mean(y) means x = (y[0] + y[1] + ... + y[n-1])/n
Parameters
----------
y : ndarray
input variable
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed.
The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
mean value
History
-------
Written, <NAME>, Jun 2014
"""
return np.ma.mean(np.ma.array(y, mask=(y==undef)), axis=axis).filled(undef)
def mini(y, axis=None, undef=-9999.):
"""
Calculation of minimum value:
x = mini(y) means x = min(y[0],y[1],...,y[n-1])
Parameters
----------
y : ndarray
input variable
axis : None or int or tuple of ints, optional
Axis or axes along which the minimum are computed.
The default is to compute the minimum of the flattened array.
If this is a tuple of ints, a minimum is performed over multiple axes,
instead of a single axis or all the axes as before.
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
minimum value
History
-------
Written, <NAME>, Jun 2014
"""
return np.ma.amin(np.ma.array(y, mask=(y==undef)), axis=axis).filled(undef)
def maxi(y, axis=None, undef=-9999.):
"""
Calculation of maximum value:
x = maxi(y) means x = max(y[0],y[1],...,y[n-1])
Parameters
----------
y : ndarray
input variable
axis : None or int or tuple of ints, optional
Axis or axes along which the maximum are computed.
The default is to compute the maximum of the flattened array.
If this is a tuple of ints, a maximum is performed over multiple axes,
instead of a single axis or all the axes as before.
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
maximum value
History
-------
Written, <NAME>, Jun 2014
"""
return np.ma.amax(np.ma.array(y, mask=(y==undef)), axis=axis).filled(undef)
# Not implemented: met_torad
def met_lwrad(y, Tp, undef=-9999.): # assumes that y was already multiplied with calibration factor
"""
Calculation of long wave radiation from net radiometer:
x = met_lwrad(y, Tp)
The total radiation in W m-2 is calculated according to the following formula:
x = y*fl + sigma*(Tp+T0)**4
where sigma = 5.67051 * 10**8 W m-2 K-4 is the Stephan-Boltzmann-Constant and
fl is the factor for long wave radiation (reciprocal value of sensitivity) in W m-2 per mV.
The function assumes that fl was already applied before.
Parameters
----------
y : ndarray
output voltage of the net radiometer [mV]
Tp : ndarray
pyranometer temperature, i.e. the temperature of the net radiometer body [degC]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
total radiation in W m-2
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((y==undef) | (Tp==undef), undef, y + sigma * (Tp+T0)**4)
def met_trad(Rl, epsilon, undef=-9999.):
"""
Calculation of radiation temperature from long wave radiation:
x = met_trad(Rl, epsilon)
The radiation temperature in degC is calculated according to the following formula:
x= sqrt4(Rl/(sigma*epsilon)) - T0
where sigma = 5.67051 * 108 W m-2 K-4 is the Stephan-Boltzmann-Constant.
Parameters
----------
Rl : ndarray
longwave radiation [W m-2]
epsilon : ndarray
long wave emissivity of the surface [0-1]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
radiation temperature in degC
History
-------
Written, <NAME>, Jun 2014
"""
const = 1./(epsilon*sigma)
trad = np.ma.sqrt(np.ma.sqrt(const*np.ma.array(Rl, mask=(Rl==undef)))) - T0
return trad.filled(undef)
def met_alb(swd, swu, swdmin=50., swumin=10., undef=-9999.):
"""
Calculation of albedo from short wave downward and upward radiation:
x = met_alb(swd, swu)
The albedo in % is calculated according to the following formula:
x = 100 * ( swu / swd )
If swd < swdmin (50 W m-2) or swu < swumin (10 W m-2) the result is undef.
Parameters
----------
swd : ndarray
shortwave downward radiation [W m-2]
swu : ndarray
shortwave upward radiation [W m-2]
swdmin : float, optional
If `swd` < `swdmin` the result is undef (default: 50).
swumin : float, optional
If `swu` < `swumin` the result is undef (default: 10).
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
albedo in %
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((swd==undef) | (swu==undef) | (swd<swdmin) | (swu<swumin),
undef, division(swu*100., swd, undef))
def met_albl(swd, swu, swdmin, swumin, undef=-9999.):
"""
Calculation of albedo from short wave downward and upward radiation with limits:
x=met_albl(swd,swu,swdmin,swumin)
The albedo in % is calculated according to the following formula:
x = 100 * ( swu / swd )
If swd < swdmin or swu < swumin the result is `undef`.
Parameters
----------
swd : ndarray
shortwave downward radiation [W m-2]
swu : ndarray
shortwave upward radiation [W m-2]
swdmin : float
If `swd` < `swdmin` the result is undef.
swumin : float
If `swu` < `swumin` the result is undef.
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
albedo in %
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((swd==undef) | (swu==undef) | (swd<swdmin) | (swu<swumin),
undef, division(swu*100., swd, undef))
def met_vpmax(temp, undef=-9999.):
"""
Calculation of saturation water vapour pressure:
x = met_vpmax(T) where
The saturation water vapour pressure in mbar (hPa) is calculated according to the following formula:
x = 6.1078 * exp(17.08085 * T / (234.175 + T))
Parameters
----------
temp : ndarray
air temperature [degC]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
saturation water vapour pressure in mbar (hPa)
History
-------
Written, <NAME>, Jun 2014
"""
es = esat(np.ma.array(temp+T0, mask=(temp==undef)))*0.01
return es.filled(undef)
def met_vpact(temp, rh, undef=-9999.):
"""
Calculation of actual water vapour pressure:
x = met_vpact(T,rh)
The actual water vapour pressure in mbar (hPa) is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
x = Es * rh/100
Parameters
----------
temp : ndarray
air temperature [degC]
rh : ndarray
relative humidity [%]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
actual water vapour pressure in mbar (hPa)
History
-------
Written, <NAME>, Jun 2014
"""
es = esat(np.ma.array(temp+T0, mask=((temp==undef)|(rh==undef))))*0.01
ea = es*rh*0.01
return ea.filled(undef)
def met_vpdef(temp, rh, undef=-9999.):
"""
Calculation of water vapour pressure deficit:
x = met_vpdef(T, rh)
The water vapour pressure deficit in mbar (hPa) is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
E = Es * rh/100
x = Es - E
Parameters
----------
temp : ndarray
air temperature [degC]
rh : ndarray
relative humidity [%]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
water vapour pressure deficit in mbar (hPa)
History
-------
Written, <NAME>, Jun 2014
"""
es = esat(np.ma.array(temp+T0, mask=((temp==undef)|(rh==undef))))*0.01
ea = es*rh*0.01
vpd = es - ea
return vpd.filled(undef)
def met_sh(temp, rh, p, undef=-9999.):
"""
Calculation of specific humidity:
x = met_sh(T, rh, p)
The specific humidity in g kg-1 is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
E = Es * rh/100
x = 622 * E/(p-0.378*E)
Parameters
----------
temp : ndarray
air temperature [degC]
rh : ndarray
relative humidity [%]
p : ndarray
air pressure [hPa = mbar]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
specific humidity in g kg-1
History
-------
Written, <NAME>, Jun 2014
"""
es = esat(np.ma.array(temp+T0, mask=((temp==undef)|(rh==undef)|(p==undef))))*0.01
ea = es*rh*0.01
sh = division(622.*ea, (p-0.378*ea), undef)
return sh.filled(undef)
def met_tpot(temp, p, undef=-9999.):
"""
Calculation of potential temperature:
x = met_tpot(T, p)
The potential temperature in K is calculated according to the following formula:
x = (T + T0) * (1000/p)**0.286
Parameters
----------
temp : ndarray
air temperature [degC]
p : ndarray
air pressure [hPa = mbar]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
potential temperature in K
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((temp==undef) | (p==undef), undef, (temp+T0)*division(1000.,p)**0.286)
def met_rho(temp, rh, p, undef=-9999.):
"""
Calculation of air density:
x = met_rho(T, rh, p)
The air density in kg m-3 is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
E = Es * rh/100
sh = 622 * E/(p-0.378*E)
Tv = ((T + T0) * (1 + 0.000608 * sh)) - T0
x = p * 100 / (287.05 * (Tv + T0))
Parameters
----------
temp : ndarray
air temperature [degC]
rh : ndarray
relative humidity [%]
p : ndarray
air pressure [hPa = mbar]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
air density in kg m-3
History
-------
Written, <NAME>, Jun 2014
"""
es = esat(np.ma.array(temp+T0, mask=((temp==undef)|(rh==undef)|(p==undef))))*0.01
ea = es*rh*0.01
sh = division(622.*ea, (p-0.378*ea), undef)
Tv = ((temp+T0)*(1+0.000608*sh)) - T0
rho = division(p*100., (287.05*(Tv+T0)), undef)
return rho.filled(undef)
def met_dpt(temp, rh, undef=-9999.):
"""
Calculation of dew point temperature:
x = met_dpt(T, rh)
The dew point temperature in degC is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/(234.175 + T))
E = Es * rh/100
x = 234.175 * ln(E/6.1078)/(17.08085 - ln(E/6.1078))
Parameters
----------
temp : ndarray
air temperature [degC]
rh : ndarray
relative humidity [%]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
dew point temperature in degC
History
-------
Written, <NAME>, Jun 2014
"""
es = esat(np.ma.array(temp+T0, mask=((temp==undef)|(rh==undef))))*0.01
ea = es*rh*0.01
dpt = 234.175 * np.ma.log(ea/6.1078) / (17.08085 - np.ma.log(ea/6.1078))
return dpt.filled(undef)
def met_h2oc(temp, rh, p, undef=-9999.):
"""
Calculation of water vapour concentration:
x = met_h2oc(T, rh, p)
The water vapour concentration in mmol mol-1 is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/ (234.175 + T))
E = Es * rh/100
x = 0.1 * E /(0.001*p*100*0.001)
Parameters
----------
temp : ndarray
air temperature [degC]
rh : ndarray
relative humidity [%]
p : ndarray
air pressure [hPa = mbar]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
water vapour concentration in mmol mol-1
History
-------
Written, <NAME>, Jun 2014
"""
es = esat(np.ma.array(temp+T0, mask=((temp==undef)|(rh==undef)|(p==undef))))*0.01
ea = es*rh*0.01
c = division(1000.*ea, p, undef)
return c.filled(undef)
# Not implemented: met_psy_rh
# Not implemented: met_dpt_rh
def met_h2oc_rh(temp, h, p, undef=-9999.):
"""
Calculation of relative humidity from water vapour concentration:
x = met_h2oc_rh(T, [H2O], p)
The relative humidity in % is calculated according to the following formulas:
Es = 6.1078*exp(17.08085*T/(234.175 + T))
E = 10 * [H2O] * 0.001 * p * 100 * 0.001
x = 100 * E / Es
Parameters
----------
temp : ndarray
air temperature [degC]
h : ndarray
water vapour concentration [mmol mol-1]
p : ndarray
air pressure [hPa = mbar]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
relative humidity in %
History
-------
Written, <NAME>, Jun 2014
"""
es = esat(np.ma.array(temp+T0, mask=((temp==undef)|(h==undef)|(p==undef))))*0.01
ea = 0.001 * h * p
c = 100.*ea/es
return c.filled(undef)
def met_wdrot(wd, a, undef=-9999.):
"""
Rotation of wind direction:
x = met_wdrot(wd, a)
The rotated wind direction is calculated according to the following formulas:
x = wd + a
IF x < 0 THEN x = x + 360
IF x <= 360 THEN x = x - 360
Parameters
----------
wd : ndarray
wind direction [degree]
a : ndarray
rotation angle (positive is clockwise) [degree]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
rotated wind direction
History
-------
Written, <NAME>, Jun 2014
"""
rot = np.ma.array(wd+a, mask=((wd==undef)|(a==undef)))
rot = np.ma.where(rot < 0., rot+360., rot)
rot = np.ma.where(rot >= 360., rot-360., rot)
return rot.filled(undef)
def met_urot(u, v, a, undef=-9999.):
"""
Rotation of u-component of wind vector:
x = met_urot(u, v, a)
The rotated u-component is calculated according to the following formula:
x = u * cos (a) + v * sin (a)
Parameters
----------
u : ndarray
u-component of the wind vector
v : ndarray
v-component of the wind vector
a : ndarray
rotation angle (positive is clockwise) [degree]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
rotated u-component
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((u==undef) | (v==undef) | (a==undef), undef, u*np.cos(np.deg2rad(a)) + v*np.sin(np.deg2rad(a)))
def met_vrot(u, v, a, undef=-9999.):
"""
Rotation of v-component of wind vector:
x = met_vrot(u, v, a)
The rotated v-component is calculated according to the following formula:
x = -u * sin (a) + v * cos (a)
Parameters
----------
u : ndarray
u-component of the wind vector
v : ndarray
v-component of the wind vector
a : ndarray
rotation angle (positive is clockwise) [degree]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
rotated v-component
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((u==undef) | (v==undef) | (a==undef), undef, -u*np.sin(np.deg2rad(a)) + v*np.cos(np.deg2rad(a)))
def met_uv_wv(u, v, undef=-9999.):
"""
Calculation of wind velocity from u- and v-component of wind vector:
x = met_uv_wv(u, v)
The horizontal wind velocity is calculated according to the following formula:
x = sqrt(u**2 + v**2)
Parameters
----------
u : ndarray
u-component of the wind vector
v : ndarray
v-component of the wind vector
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
horizontal wind velocity
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((u==undef) | (v==undef), undef, np.sqrt(u*u + v*v))
def met_uv_wd(u, v, undef=-9999.):
"""
Calculation of wind direction from u- and v-component of wind vector:
x = met_uv_wd(u, v)
The horizontal wind velocity is calculated according to the following formulas:
IF u = 0 AND v = 0 THEN x = 0
IF u = 0 AND v < 0 THEN x = 360
IF u = 0 AND v > 0 THEN x = 180
IF u > 0 THEN x = 270 - arctan(v/u)
IF u < 0 THEN x = 90 - arctan(v/u)
Parameters
----------
u : ndarray
u-component of the wind vector
v : ndarray
v-component of the wind vector
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
horizontal wind velocity
History
-------
Written, <NAME>, Jun 2014
"""
wd = np.ma.zeros(u.shape)
wd.mask = (u==undef) | (v==undef)
wd = np.ma.where((u==0.) & (v==0.), 0., wd)
wd = np.ma.where((u==0.) & (v<0.), 360., wd)
wd = np.ma.where((u==0.) & (v>0.), 180., wd)
wd = np.ma.where((u>0.), 270.-np.rad2deg(np.ma.arctan(v/u)), wd)
wd = np.ma.where((u<0.), 90.-np.rad2deg(np.ma.arctan(v/u)), wd)
return wd.filled(undef)
def met_wvwd_u(wv, wd, undef=-9999.):
"""
Calculation of u-component of wind vector from wind velocity and wind direction:
x = met_wvwd_u(wv, wd)
The u-component of the wind vector is calculated according to the following formula:
x = -wv * sin (wd)
Parameters
----------
wv : ndarray
horizontal wind velocity [m s-1]
wd : ndarray
horizontal wind direction [degree]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
u-component of the wind vector
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((wv==undef) | (wd==undef), undef, -wv*np.sin(np.deg2rad(wd)))
def met_wvwd_v(wv, wd, undef=-9999.):
"""
Calculation of v-component of wind vector from wind velocity and wind direction:
x = met_wvwd_v(wv, wd)
The v-component of the wind vector is calculated according to the following formula:
x = -wv * cos (wd)
Parameters
----------
wv : ndarray
horizontal wind velocity [m s-1]
wd : ndarray
horizontal wind direction [degree]
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
v-component of the wind vector
History
-------
Written, <NAME>, Jun 2014
"""
return np.where((wv==undef) | (wd==undef), undef, -wv*np.cos(np.deg2rad(wd)))
def ifeq(y, a0, a1, a2):
"""
If-statements:
x = ifeq(y,a0,a1,a2) means IF y == a0 THEN x = a1 ELSE x = a2
Parameters
----------
y : ndarray
input variable
a0 : ndarray
compare to input `y == a0`
a1 : ndarray
result if `y == a0`
a2 : ndarray
result if `y != a0`
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
IF y == a0 THEN x = a1 ELSE x = a2
History
-------
Written, <NAME>, Jun 2014
"""
out = np.where(y == a0, a1, a2)
return np.where((y==undef) | (a0==undef) | (a1==undef) | (a2==undef), undef, out)
def ifne(y, a0, a1, a2):
"""
If-statements:
x = ifne(y,a0,a1,a2) means IF y != a0 THEN x = a1 ELSE x = a2
Parameters
----------
y : ndarray
input variable
a0 : ndarray
compare to input `y != a0`
a1 : ndarray
result if `y != a0`
a2 : ndarray
result if `y == a0`
y : ndarray
a0 : ndarray
a1 : ndarray
a2 : ndarray
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
IF y != a0 THEN x = a1 ELSE x = a2
History
-------
Written, <NAME>, Jun 2014
"""
out = np.where(y != a0, a1, a2)
return np.where((y==undef) | (a0==undef) | (a1==undef) | (a2==undef), undef, out)
def ifle(y, a0, a1, a2):
"""
If-statements:
x = ifle(y,a0,a1,a2) means IF y >= a0 THEN x = a1 ELSE x = a2
Parameters
----------
y : ndarray
input variable
a0 : ndarray
compare to input `y <= a0`
a1 : ndarray
result if `y <= a0`
a2 : ndarray
result if `y > a0`
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
IF y >= a0 THEN x = a1 ELSE x = a2
History
-------
Written, <NAME>, Jun 2014
"""
out = np.where(y <= a0, a1, a2)
return np.where((y==undef) | (a0==undef) | (a1==undef) | (a2==undef), undef, out)
def ifge(y, a0, a1, a2):
"""
If-statements:
x = ifge(y,a0,a1,a2) means IF y <= a0 THEN x = a1 ELSE x = a2
Parameters
----------
y : ndarray
input variable
a0 : ndarray
compare to input `y >= a0`
a1 : ndarray
result if `y >= a0`
a2 : ndarray
result if `y < a0`
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
IF y <= a0 THEN x = a1 ELSE x = a2
History
-------
Written, <NAME>, Jun 2014
"""
out = np.where(y <= a0, a1, a2)
return np.where((y==undef) | (a0==undef) | (a1==undef) | (a2==undef), undef, out)
def iflt(y, a0, a1, a2):
"""
If-statements:
x = iflt(y,a0,a1,a2) means IF y < a0 THEN x = a1 ELSE x = a2
Parameters
----------
y : ndarray
input variable
a0 : ndarray
compare to input `y < a0`
a1 : ndarray
result if `y < a0`
a2 : ndarray
result if `y >= a0`
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
IF y < a0 THEN x = a1 ELSE x = a2
History
-------
Written, <NAME>, Jun 2014
"""
out = np.where(y < a0, a1, a2)
return np.where((y==undef) | (a0==undef) | (a1==undef) | (a2==undef), undef, out)
def ifgt(y, a0, a1, a2):
"""
If-statements:
x = ifgt(y,a0,a1,a2) means IF y > a0 THEN x = a1 ELSE x = a2
Parameters
----------
y : ndarray
input variable
a0 : ndarray
compare to input `y > a0`
a1 : ndarray
result if `y > a0`
a2 : ndarray
result if `y <= a0`
undef : float, optional
elements are excluded from the calculations if any of the inputs equals `undef` (default: -9999.)
Returns
-------
ndarray
IF y > a0 THEN x = a1 ELSE x = a2
History
-------
Written, <NAME>, Jun 2014
"""
out = np.where(y > a0, a1, a2)
return np.where((y==undef) | (a0==undef) | (a1==undef) | (a2==undef), undef, out)
# Not implemented: write
#
# Local replacement functions if helper functions do not exist in library
#
def _div(a, b, otherwise=np.nan, prec=0.):
"""
Divide two arrays, return `otherwise` if division by 0.
Copy of ..division.py
Parameters
----------
a : array_like
enumerator
b : array_like
denominator
otherwise : float
value to return if `b=0` (default: `np.nan`)
prec : float
if |b|<|prec| then `otherwise`
Returns
-------
ndarray
ratio : numpy array or masked array
a/b if |b| > |prec|
otherwise if |b| <= |prec|
Output is numpy array. It is a masked array if at least one
of `a` or `b` is a masked array.
"""
oldsettings = np.geterr()
np.seterr(divide='ignore')
if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):
out = np.ma.where(np.ma.abs(np.ma.array(b)) > np.abs(prec), np.ma.array(a)/np.ma.array(b), otherwise)
else:
out = np.where(np.abs(np.array(b)) > np.abs(prec), np.array(a)/np.array(b), otherwise)
|
np.seterr(**oldsettings)
|
numpy.seterr
|
#!/usr/bin/env python3
import numpy as np
from car import Car
import utils
from utils import MAX
from optimized import update_car_sensing, compute_sensed_density
from optimized import get_cells_in_range
#############################################################
class Fleet():
def __init__(self, graph, ncars, moveinterval,
sensorrad, splinterval, sensortpr, sensornfp, rng,
loglevel, wpspaths,
maxdist, waypointskdtree, waypointsidx):
self.log = utils.get_multiprocessing_logger(loglevel)
self.graph = graph
self.splinterval = splinterval
self.sensortpr= sensortpr
self.sensornfp = sensornfp
self.moveinterval = moveinterval
self.lastid = -1
self.pedscount = np.full(graph.nnodes, 0.0)
self.samplesz =
|
np.full(graph.nnodes, 0.0)
|
numpy.full
|
from Beam import Beam
#from Empty import Optical_element
from OpticalElement import Optical_element
from SurfaceConic import SurfaceConic
from Shape import BoundaryRectangle
import numpy as np
from SurfaceConic import SurfaceConic
import matplotlib.pyplot as plt
from Vector import Vector
class CompoundOpticalElement(object):
def __init__(self,oe_list=[],oe_name=""):
self.oe = oe_list
self.type = oe_name
def append_oe(self,oe):
self.oe.append(oe)
def oe_number(self):
return len(self.oe)
def reset_oe_list(self):
self.oe = []
def set_type(self,name):
self.type = name
@classmethod
def initialiaze_as_wolter_1(cls,p1,q1,z0):
theta1 = 0.
alpha1 = 0.
print(q1)
print(2*z0)
print("dof=%f" %(2*z0-q1))
#oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p1,0.,theta1,alpha1,"p",2*z0-q1)
oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p1, 0., theta1, alpha1, "p", 2*z0-q1)
#oe2 = Optical_element.initialize_my_hyperboloid(p=0.,q=q1,theta=90*np.pi/180,alpha=0,wolter=1, z0=z0, distance_of_focalization=2*z0-q1)
oe2 = Optical_element.initialize_my_hyperboloid(p=0., q=q1, theta=90 * np.pi / 180, alpha=0, wolter=1, z0=z0,distance_of_focalization=2*z0-q1)
return CompoundOpticalElement(oe_list=[oe1,oe2],oe_name="<NAME>")
@classmethod
def initialiaze_as_wolter_1_with_two_parameters(cls,p1, R, theta):
cp1 = -2 * R / np.tan(theta)
cp2 = 2 * R * np.tan(theta)
cp = max(cp1, cp2)
f = cp / 4
print("focal=%f" % (f))
oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p=p1, q=f, theta=0., alpha=0.,infinity_location="p")
s1 = R / np.tan(2 * theta)
s2 = R / np.tan(4 * theta)
c = (s1 - s2) / 2
z0 = f + c
b1 = np.sqrt(
0.5 * c ** 2 + 0.5 * R ** 2 + 0.5 * R ** 4 / cp ** 2 - R ** 2 * z0 / cp + 0.5 * z0 ** 2 - 0.5 / cp ** 2 * np.sqrt(
(
-c ** 2 * cp ** 2 - cp ** 2 * R ** 2 - R ** 4 + 2 * cp * R ** 2 * z0 - cp ** 2 * z0 ** 2) ** 2 - 4 * cp ** 2 * (
c ** 2 * R ** 4 - 2 * c ** 2 * cp * R ** 2 * z0 + c ** 2 * cp ** 2 * z0 ** 2)))
b2 = np.sqrt(
0.5 * c ** 2 + 0.5 * R ** 2 + 0.5 * R ** 4 / cp ** 2 - R ** 2 * z0 / cp + 0.5 * z0 ** 2 + 0.5 / cp ** 2 * np.sqrt(
(
-c ** 2 * cp ** 2 - cp ** 2 * R ** 2 - R ** 4 + 2 * cp * R ** 2 * z0 - cp ** 2 * z0 ** 2) ** 2 - 4 * cp ** 2 * (
c ** 2 * R ** 4 - 2 * c ** 2 * cp * R ** 2 * z0 + c ** 2 * cp ** 2 * z0 ** 2)))
b = min(b1, b2)
a = np.sqrt(c ** 2 - b ** 2)
ccc = np.array(
[-1 / a ** 2, -1 / a ** 2, 1 / b ** 2, 0., 0., 0., 0., 0., -2 * z0 / b ** 2, z0 ** 2 / b ** 2 - 1])
oe2 = Optical_element.initialize_as_surface_conic_from_coefficients(ccc)
oe2.set_parameters(p=0., q=z0+c, theta=90*np.pi/180, alpha=0., type="My hyperbolic mirror")
#oe2.type = "My hyperbolic mirror"
#oe2.p = 0.
#oe2.q = z0 + c
#oe2.theta = 90 * np.pi / 180
#oe2.alpha = 0.
return CompoundOpticalElement(oe_list=[oe1,oe2],oe_name="Wolter 1")
@classmethod
def initialiaze_as_wolter_2(cls,p1,q1,z0):
#q1 = - q1
focal = q1+2*z0
print("focal=%f" %(focal))
theta1 = 0.
alpha1 = 0.
oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p1,0.,theta1,alpha1,"p", focal)
oe2 = Optical_element.initialize_my_hyperboloid(p=0. ,q=-(focal-2*z0), theta=90*np.pi/180, alpha=0, wolter=2, z0=z0, distance_of_focalization=focal)
return CompoundOpticalElement(oe_list=[oe1,oe2],oe_name="Wolter 2")
@classmethod
def initialiaze_as_wolter_12(cls,p1,q1,focal_parabola,Rmin):
focal = focal_parabola
d = q1 - focal_parabola
z0 = focal_parabola + d/2
print("focal=%f" %(focal))
theta1 = 0.
alpha1 = 0.
oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p1,0.,theta1,alpha1,"p", focal)
ccc = oe1.ccc_object.get_coefficients()
cp = -ccc[8]
print("R=%f, d=%f, cp=%f, z0=%f" %(Rmin,d,cp,z0))
#b1 = np.sqrt(0.125*d**2+Rmin+2*Rmin**4/cp**2-2*Rmin**2*z0/cp+0.5*z0**2-0.125/cp**2*np.sqrt((-cp**2*d**2-8*cp**2*Rmin-16*Rmin**4+16*cp*Rmin**2*z0-4*cp*z0**2)**2-16*cp**2*(4*d**2*Rmin**4-4*cp*d**2*Rmin**2*z0+cp**2*d**2*z0**2)))
#b2 = np.sqrt(0.125*d**2+Rmin+2*Rmin**4/cp**2-2*Rmin**2*z0/cp+0.5*z0**2+0.125/cp**2*np.sqrt((-cp**2*d**2-8*cp**2*Rmin-16*Rmin**4+16*cp*Rmin**2*z0-4*cp*z0**2)**2-16*cp**2*(4*d**2*Rmin**4-4*cp*d**2*Rmin**2*z0+cp**2*d**2*z0**2)))
p1 = -cp ** 2 * d ** 2 - 8 * cp ** 2 * Rmin - 16 * Rmin ** 4 + 16 * cp * Rmin ** 2 * z0 - 4 * cp ** 2 * z0 ** 2
p1 = p1**2
p2 = 16 * cp ** 2 * (4 * d ** 2 * Rmin ** 4 - 4 * cp * d ** 2 * Rmin ** 2 * z0 + cp ** 2 * d ** 2 * z0 ** 2)
sp = 0.125/cp**2*np.sqrt(p1-p2)
sp0 = 0.125*d**2+Rmin+2*Rmin**4/cp**2-2*Rmin**2*z0/cp+0.5*z0**2
b = np.sqrt(sp0-sp)
a = np.sqrt(d**2/4-b**2)
print("a=%f, b=%f" %(a,b))
#oe2 = Optical_element.initialize_my_hyperboloid(p=0. ,q=-(focal-2*z0), theta=90*np.pi/180, alpha=0, wolter=1.1, z0=z0, distance_of_focalization=focal)
cc = np.array([-1/a**2, -1/a**2, 1/b**2, 0., 0., 0., 0., 0., -2*z0/b**2, (z0/b)**2-1])
oe2 = Optical_element.initialize_as_surface_conic_from_coefficients(cc)
oe2.type = "My hyperbolic mirror"
oe2.set_parameters(p=0., q=q1, theta=90.*np.pi/180, alpha=0.)
return CompoundOpticalElement(oe_list=[oe1,oe2],oe_name="Wolter 1.2")
@classmethod
def initialize_as_wolter_3(cls, p, q, distance_between_the_foci):
f=-q
oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p=p, q=f, theta=0., alpha=0., infinity_location="p")
#c = z0+np.abs(f)
c = distance_between_the_foci/2
z0 = np.abs(c)-np.abs(f)
b = c+100
a = np.sqrt((b**2-c**2))
ccc = np.array([1/a**2, 1/a**2, 1/b**2, 0., 0., 0., 0., 0., -2*z0/b**2, z0**2/b**2-1])
oe2 = Optical_element.initialize_as_surface_conic_from_coefficients(ccc)
oe2.set_parameters(p=0., q=z0+z0+np.abs(q), theta=90*np.pi/180)
return CompoundOpticalElement(oe_list=[oe1,oe2],oe_name="Wolter 3")
@classmethod
def wolter_for_japanese(cls,p,q,d,q1,theta1,theta2):
############## ellipse ####################################################################################
ae = (p+q1)/2
be = np.sqrt(p*q1)*np.cos(theta1)
f = np.sqrt(ae**2-be**2)
beta = np.arccos((p**2+4*f**2-q1**2)/(4*p*f))
ccc1 = np.array([1. / be ** 2, 1. / be ** 2, 1 / ae ** 2, 0., 0., 0., 0., 0., 0., -1])
y = - p * np.sin(beta)
z = f - p * np.cos(beta)
oe1 = Optical_element.initialize_as_surface_conic_from_coefficients(ccc1)
oe1.set_parameters(p=p, q=q1, theta=theta1)
############## hyperbola ####################################################################################
p1 = q1 - d
ah = (p1 - q)/2
bh = np.sqrt(p1*q)*np.cos(theta2)
z0 = np.sqrt(ae**2-be**2) - np.sqrt(ah**2+bh**2)
print("z0 = %f" %z0)
ccc2 = np.array([-1. / ah ** 2, -1. / ah ** 2, 1 / bh ** 2, 0., 0., 0., 0., 0., 2 * z0 / bh ** 2, z0 ** 2 / bh ** 2 - 1])
oe2 = Optical_element.initialize_as_surface_conic_from_coefficients(ccc2)
oe2.set_parameters(p=p1, q=q, theta=theta2)
return CompoundOpticalElement(oe_list=[oe1, oe2], oe_name="Wolter for japanese")
@classmethod
def initialize_as_kirkpatrick_baez(cls, p, q, separation, theta, bound1, bound2):
p1 = p - 0.5 * separation
q1 = p - p1
q2 = q - 0.5 * separation
p2 = q - q2
f1p = p1
f1q = p+ q - p1
f2q = q2
f2p = p + q - q2
oe1 = Optical_element.initialize_as_surface_conic_ellipsoid_from_focal_distances(p= f1p, q= f1q, theta= theta, alpha=0., cylindrical=1)
#oe1.bound = bound1
oe1.set_bound(bound1)
oe1.p = p1
oe1.q = q1
oe2 = Optical_element.initialize_as_surface_conic_ellipsoid_from_focal_distances(p= f2p, q= f2q, theta= theta, alpha=90.*np.pi/180, cylindrical=1)
#oe2.bound = bound2
oe2.set_bound(bound2)
oe2.p = p2
oe2.q = q2
return CompoundOpticalElement(oe_list=[oe1,oe2],oe_name="<NAME>")
@classmethod
def initialize_as_montel_parabolic(cls, p, q, theta, bound1, bound2, distance_of_the_screen=None, angle_of_mismatch=0.):
beta = (90. - angle_of_mismatch)*np.pi/180 #### angle beetween the two mirror, if angle_of_mismatch is >0 the two mirror are closer
oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p=p, q=q, theta=theta, alpha=0., infinity_location='p', focal=q, cylindrical=1)
oe1.set_bound(bound1)
oe2 = oe1.duplicate()
oe2.rotation_surface_conic(beta, 'y')
oe2.set_bound(bound2)
if distance_of_the_screen == None:
distance_of_the_screen = q
ccc = np.array([0., 0., 0., 0., 0., 0., 0., 1., 0., -distance_of_the_screen])
screen = Optical_element.initialize_as_surface_conic_from_coefficients(ccc)
screen.set_parameters(p, q, 0., 0., "Surface conical mirror")
return CompoundOpticalElement(oe_list=[oe1, oe2, screen], oe_name="Montel parabolic")
@classmethod
def initialize_as_montel_ellipsoid(cls, p, q, theta, bound1, bound2, distance_of_the_screen=None, angle_of_mismatch=0.):
beta = (90.- angle_of_mismatch)*np.pi/180 #### angle beetween the two mirror
oe1 = Optical_element.initialize_as_surface_conic_ellipsoid_from_focal_distances(p=p, q=q, theta=theta, alpha=0., cylindrical=1)
oe1.set_bound(bound1)
oe2 = oe1.duplicate()
oe2.rotation_surface_conic(beta, 'y')
oe2.set_bound(bound2)
if distance_of_the_screen == None:
distance_of_the_screen = q
print(distance_of_the_screen)
ccc = np.array([0., 0., 0., 0., 0., 0., 0., 1., 0., -distance_of_the_screen])
screen = Optical_element.initialize_as_surface_conic_from_coefficients(ccc)
screen.set_parameters(p, q, 0., 0., "Surface conical mirror")
return CompoundOpticalElement(oe_list=[oe1, oe2, screen], oe_name="Montel ellipsoid")
def compound_specification_after_oe(self, oe):
if self.type == "Wolter 1":
if oe.type == "Surface conical mirror":
#oe.q = 0.
#oe.theta = 90.*np.pi/180
oe.set_parameters(p=None, q=0., theta=90.*np.pi/180)
elif oe.type == "My hyperbolic mirror":
#oe.theta = 0.*np.pi/180
oe.set_parameters(p=None, q=None, theta=0.)
if self.type == "Wolter 1.2":
if oe.type == "Surface conical mirror":
#oe.q = 0.
#oe.theta = 90.*np.pi/180
oe.set_parameters(p=None, q=0., theta=90.*np.pi/180)
elif oe.type == "My hyperbolic mirror":
#oe.theta = 0.*np.pi/180
oe.set_parameters(p=None, q=None, theta=0.)
if self.type == "Wolter 2":
if oe.type == "Surface conical mirror":
#oe.q = 0.
#oe.theta = 90.*np.pi/180
oe.set_parameters(p=None, q=0., theta=90.*np.pi/180)
elif oe.type == "My hyperbolic mirror":
#oe.theta = 0.*np.pi/180
oe.set_parameters(p=None, q=None, theta=0.)
if self.type == "Wolter 3":
if np.abs(oe.theta) < 1e-10:
#oe.q = 0.
#oe.theta = 90.*np.pi/180
oe.set_parameters(p=None, q=0., theta=90.*np.pi/180)
else:
#oe.theta = 0.*np.pi/180
oe.set_parameters(p=None, q=None, theta=0.)
def compound_specification_after_screen(self, oe, beam):
if self.type == "Wolter 1":
if oe.type == "My hyperbolic mirror":
oe.output_frame_wolter(beam)
if self.type == "Wolter 2":
if oe.type == "My hyperbolic mirror":
oe.output_frame_wolter(beam)
if self.type == "Wolter 3":
if oe.theta < 1e-10:
oe.output_frame_wolter(beam)
#todo control well this part
x = beam.x
z = beam.z
vx = beam.vx
vz = beam.vz
beam.x = z
beam.z = x
beam.vx = vz
beam.vz = vx
def trace_compound(self,beam1):
beam=beam1.duplicate()
for i in range (self.oe_number()):
print("Iteration number %d" %(i+1))
self.oe[i].effect_of_optical_element(beam)
self.compound_specification_after_oe(oe = self.oe[i])
self.oe[i].effect_of_the_screen(beam)
self.compound_specification_after_screen(oe = self.oe[i], beam = beam)
return beam
def info(self):
txt = ("\nThe optical element of the %s system are:\n" %(self.type))
for i in range (self.oe_number()):
txt += ("\nThe %d optical element:\n\n" %(i+1))
txt += self.oe[i].info()
return txt
def trace_good_rays(self, beam1):
beam11=beam1.duplicate()
beam = beam1.duplicate()
self.oe[0].rotation_to_the_optical_element(beam11)
self.oe[0].translation_to_the_optical_element(beam11)
b1=beam11.duplicate()
b2=beam11.duplicate()
[b1, t1] = self.oe[0].intersection_with_optical_element(b1)
[b2, t2] = self.oe[1].intersection_with_optical_element(b2)
indices = np.where(beam.flag>=0)
beam.flag[indices] = beam.flag[indices] + 1
if self.type == "Wolter 1":
indices = np.where (t1>=t2)
elif self.type == "Wolter 2":
indices = np.where (t1<=t2)
beam.flag[indices] = -1*beam.flag[indices]
print(beam.flag)
print("Trace indices")
indices = np.where(beam.flag>=0)
print(indices)
#beam.plot_good_xz(0)
beam = beam.good_beam()
beam.plot_good_xz()
plt.title("Good initial rays")
l = beam.number_of_good_rays()
print(l)
if l >0:
beam = self.trace_compound(beam)
else:
print(">>>>>>NO GOOD RAYS")
print("Number of good rays=%f" %(beam.number_of_good_rays()))
return beam
def rotation_traslation_montel(self, beam):
theta = self.oe[0].theta
p = self.oe[0].p
theta = np.pi / 2 - theta
vector = Vector(0., 1., 0.)
vector.rotation(-theta, 'x')
ny = -vector.z / np.sqrt(vector.y ** 2 + vector.z ** 2)
nz = vector.y / np.sqrt(vector.y ** 2 + vector.z ** 2)
n = Vector(0, ny, nz)
vrot = vector.rodrigues_formula(n, -theta)
vrot.normalization()
#########################################################################################################################
position = Vector(beam.x, beam.y, beam.z)
mod_position = position.modulus()
velocity = Vector(beam.vx, beam.vy, beam.vz)
position.rotation(-theta, 'x')
velocity.rotation(-theta, 'x')
position = position.rodrigues_formula(n, -theta)
velocity = velocity.rodrigues_formula(n, -theta)
velocity.normalization()
#position.normalization()
position.x = position.x #* mod_position
position.y = position.y #* mod_position
position.z = position.z #* mod_position
[beam.x, beam.y, beam.z] = [position.x, position.y, position.z]
[beam.vx, beam.vy, beam.vz] = [velocity.x, velocity.y, velocity.z]
####### translation ###################################################################################################
vector_point = Vector(0, p, 0)
vector_point.rotation(-theta, "x")
vector_point = vector_point.rodrigues_formula(n, -theta)
vector_point.normalization()
beam.x = beam.x - vector_point.x * p
beam.y = beam.y - vector_point.y * p
beam.z = beam.z - vector_point.z * p
return beam
def time_comparison(self, beam1, elements):
origin = np.ones(beam1.N)
tf = 1e35 * np.ones(beam1.N)
for i in range (0, len(elements)):
beam = beam1.duplicate()
[beam, t] = self.oe[elements[i]-1].intersection_with_optical_element(beam)
indices = np.where(beam.flag<0)
t[indices] = 1e30
tf = np.minimum(t, tf)
indices = np.where(t == tf)
origin[indices] = elements[i]
return origin
def trace_montel(self,beam):
beam = self.rotation_traslation_montel(beam)
beam.plot_xz()
origin = self.time_comparison(beam, elements = [1, 2, 3])
indices = np.where(origin == 1)
beam1 = beam.part_of_beam(indices)
indices =
|
np.where(origin == 2)
|
numpy.where
|
__version__ = "0.0.1 (2017/07/24)"
__authors__ = ['<NAME> <<EMAIL>>']
__email__ = "<EMAIL>"
import matplotlib
##fixes multiprocess issue (scrambled text in images)
matplotlib.use('agg',warn=False,force=True)
import sys
try:
import sunpy.map
from sunpy.cm import cm
except ImportError:
sys.stdout.write("sunpy not installed, use pip install sunpy --upgrade")
from matplotlib.transforms import Bbox
import matplotlib.dates as mdates
import os
import numpy as np
from datetime import datetime
from datetime import timedelta as dt
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.inset_locator import inset_axes
from astropy.table import vstack,Table,join
from sunpy.instr.aia import aiaprep as ap
class aia_mkimage:
def __init__(self,dayarray,sday=False,eday=False,w0=1900,h0=1144,dpi=100.,sc=1.,
goes=False,goesdat=False,ace=False,aceadat=False,single=True,panel=False,
color3=False,time_stamp=True,odir='working/',cutout=False,
img_scale=None,wavelet=False,
#xlim=None,ylim=None,
synoptic=False,cx=0.,cy=0.,rot_time=None,aia_prep=False,skip_short=False):
"""
Class to create a single image for input file or file array
Parameters
----------
dayarray : string or list
The files use to create a single image. The argument may be a string or list.
If dayarray is a string then the program assumes you are creating a single image
at a single wavelength. If dayarray is a list with length 3 or 4 the code respectively
assumes a 3 color or 4 panel image. The order for creating a 3 color array is RGB,
while the order for a 4 panel image is top left, top right, bottom left, bottom right.
sday : single string or datetime object
The start time over which to plot the goes and ace data.
The argument may be a string or datetime object.
If start is a string then the string must be in
the same form as the dfmt parameter (dfmt parameter
default = %Y/%m/%d %H:%M:%S)
eday : single string or datetime object
The end time over which to plot the goes and ace data.
The argument may be a string or datetime object.
If end is a string then the string must be in
w0: int or float, optional
Width of the movie in pixels. If the height (h0) is larger than
w0 the program will switch the two parameters on output.
However, it will also transpose the x and y axes, which allows
for rotated images and movies. Default = 1900
h0: int or float, optional
Height of the movie in pixels. If h0 is larger than the
width (w0) the program will switch the two parameters on
output. However, it will also transpose the x and y axes,
which allows for rotated images and movies. Default = 1144
dpi : float or integer, optional
The dots per inch of the output image. Default = 100
sc : float or integer, optional
The fraction to up sample or under sample the image. The large number means more up
sampling, while numbers less than 1 down sample the images. The default is no
change (1).
goes : boolean, optional
When to plot GOES X-ray fluxes on the plots. Only works with single image.
Default = False
goesdat: astropy Table, optional
An astropy Table containing GOES X-ray fluxes from the NOAA archive. Best when
used in conjunction with aia_mkmovie. Default = False
ace : boolean, optional
When to plot ACE solar wind data on the plots. Only works with single image.
Default = False
aceadat: astropy Table, optional
An astropy Table containing ACE solar wind parameters from the NOAA archive. Best when
used in conjunction with aia_mkmovie. Default = False
single: boolean, optional
Make a single image. Default = True but resets depending on the input list.
panel: boolean, optional
Make a 4 panel plot. If panel set to True then color3 must be
False and the wavelength list must be 4 wavelengths long.
The wav list has the following format [top right, top left,
bottom right, bottom left]. Default = False
color3 : boolean, optional
Create a 3 color image. If color3 set to True panel must be
False and the wavelength list must be 3 wavelengths long.
The wav list has the following format [R, G, B]. Default =
False.
time_stamp: boolean, optional
Include time stamp in images. Default = True
odir : str, optional
Output directory for png files. Default = 'working/'.
cutout : boolean, optional
Use a subsection of the aia images for processing. Default = False
img_scale: dictionary, optional
Pass a dictionary where the key is a 4 character wavelength string with left padded 0s
in Angstroms and the values are a list. The first element in the list is a color map.
By default the first element contains the color map given by sunpy for a given wavelength
(e.g. for 131 the color map is cm.sdoaia131). The second and third element are respectively
the minimum and maximum color map values. The minimum and maximum assume a arcsinh
transformation and exposure normalized values. The program uses arcsinh for all image
scaling because the arcsinh function behaves like a log transformation at large
values but does not error at negative values. If the user gives no image scale
then a default image scale loads. The default color table works well for single
and panel images but not for 3 color images.
wavelet: boolean, optional
Use a wavelet filter to sharpen the image (Default = False)
synoptic: boolean, optional
Check using synoptic parameters or not (synoptic are 1024x1024 images).
Default = False.
cx : float or int, optional
Center of the field of view for creating images. If cx is set
then the image is assumed to be a cutout. Selecting in prompt
overrides cx. Default = 0.0.
cy : float or int, optional
Center of the field of view for creating images. If cy is set
then the image is assumed to be a cutout. Selecting in prompt
overrides cy. Default = 0.0.
rot_time : string or datetime object, optional
The time cx and cy are measured. Can be set in prompt or manually.
If manually set then the rot_time must be a datetime object or
a string with format dfmt. Default = None.
aia_prep : boolean, optional
Use aia_prep from sunpy when making the image. Default = False.
skip_short: boolean, optional
Skip exposures with less than 1.85s. Default = True
"""
#check format of input day array
if isinstance(dayarray,list):
self.dayarray = dayarray
if len(dayarray) == 3: color3 = True #automatically assume rgb creation if 3
elif len(dayarray) == 4: panel = True #automatically assume panel creation if 4
elif len(dayarray) == 1: color3 = False #force color 3 to be false if length 1 array
else:
sys.stdout.write('dayarray must be length 1 (single), 3 (rgb), or 4 (panel)')
sys.exit(1)
#if just a string turn the file string into a list
elif isinstance(dayarray,str):
self.dayarray = [dayarray]
else:
sys.stdout.write('dayarray must be a list or string')
sys.exit(1)
#check if ace flag is set
if isinstance(ace,bool):
self.ace = ace
if self.ace: goes = True #is ace is set goes must also be set
else:
sys.stdout.write('ace must be a boolean')
sys.exit(1)
#check if synoptic flag is set
if isinstance(synoptic,bool):
self.synoptic = synoptic
else:
sys.stdout.write('synoptic must be a boolean')
sys.exit(1)
#check if aiaprep flag is set
if isinstance(aia_prep,bool):
self.aia_prep = aia_prep
else:
sys.stdout.write('aia_prep must be boolean (Default = True)')
sys.exit(1)
#check if goes flag is set
if isinstance(goes,bool):
self.goes = goes
else:
sys.stdout.write('goes must be a boolean')
sys.exit(1)
#check if timestamp flag is set (Default = True)
if isinstance(time_stamp,bool):
self.timestamp = time_stamp
else:
sys.stdout.write('timestamp must be a boolean')
sys.exit(1)
#check if wavelet is set (Default = False)
if isinstance(wavelet,bool):
self.wavelet = wavelet
else:
sys.stdout.write('wavelet must be a boolean')
sys.exit(1)
#check output directory
if isinstance(odir,str):
self.odir = odir
else:
sys.stdout.write('odir must be a string')
sys.exit(1)
#format and create output directory
if self.odir[-1] != '/': self.odir=self.odir+'/'
if not os.path.isdir(self.odir): os.mkdir(self.odir)
#check format of acedat Table if it exits
if isinstance(aceadat,Table):
self.aceadat = aceadat
elif ace == False:
self.aceadat = [] #do not plot goes data
elif isinstance(aceadat,list):
self.aceadat = [] #do not plot goes data
else:
sys.stdout.write('acedat must be a astropy table')
sys.exit(1)
#if goes is set you must give the plot a start and end date for plotting the goes xray flux
if self.goes:
#check inserted end time
if isinstance(sday,datetime):
self.sday = sday
elif isinstance(sday,str):
self.sday = datetime.strptime(sday,dfmt)
else:
sys.stdout.write('sday must be a datetime object or formatted string')
sys.exit(1)
#check inserted end time
if isinstance(eday,datetime):
self.eday = eday
elif isinstance(eday,str):
self.eday = datetime.strptime(eday,dfmt)
else:
sys.stdout.write('eday must be a datetime object or formatted string')
sys.exit(1)
#check format of goesdat Table if it exits
if isinstance(goesdat,Table):
self.goesdat = goesdat
elif goes == False:
self.goesdat = [] #do not plot goes data
elif isinstance(goesdat,list):
self.goesdat = []
else:
sys.stdout.write('goesdat must be a astropy table')
#check image height
if isinstance(h0,(int,float)):
self.h0 = h0
else:
sys.stdout.write('h0 must be an integer or float')
sys.exit(1)
#check image width
if isinstance(w0,(int,float)):
self.w0 = w0
else:
sys.stdout.write('w0 must be an integer or float')
sys.exit(1)
#rotate image if h0 > w0
self.flip_image = False
#Can do with out checking since we already checked w0,h0 are numbers
if h0 > w0:
self.h0 = w0
self.w0 = h0
self.flip_image = True
#check if cutout flag is set (Default = False)
if isinstance(cutout,bool):
self.cutout = cutout
else:
sys.stdout.write('cutout must be a boolean')
sys.exit(1)
#check inserted rot_time time
if isinstance(rot_time,datetime):
self.rot_time = rot_time
self.rotation = True
elif isinstance(rot_time,str):
self.rot_time = datetime.strptime(rot_time,dfmt)
self.rotation = True
elif rot_time is None:
self.rot_time = rot_time
self.rotation = False
else:
sys.stdout.write('rot_time must be datetime object or formatted string')
sys.exit(1)
#check image x center
if isinstance(cx,(int,float)):
self.cx = cx
#set variable for rotation in case need for np.rot90
if self.flip_image:
if self.cx > 0:
self.k = 3
else:
self.k = 1
else:
self.k = 0
else:
sys.stdout.write('cx must be an integer or float (Assuming 0)')
self.cx = 0.0
#set variable for rotation in case need for np.rot90
self.k = 0
#check image y center
if isinstance(cy,(int,float)):
self.cy = cy
else:
sys.stdout.write('cy must be an integer or float (Assuming 0)')
self.cy = 0.0
#check dpi
if isinstance(dpi,(int,float)):
self.dpi = dpi
else:
sys.stdout.write('dpi must be an integer or float')
sys.exit(1)
#check sc
if isinstance(sc,(int,float)):
self.sc = sc
else:
sys.stdout.write('sc must be an integer or float')
sys.exit(1)
#check if single wavelength flag is set
if isinstance(single,bool):
self.single = single
else:
sys.stdout.write('single must be a boolean')
sys.exit(1)
#create a panel movie
if isinstance(panel,bool):
self.panel = panel
else:
sys.stdout.write('panel must be a boolean')
sys.exit(1)
#create 3 color image (default = False)
if isinstance(color3,bool):
self.color3 = color3
else:
sys.stdout.write('color3 must be a boolean')
sys.exit(1)
#skip short exposures
if isinstance(skip_short,bool):
self.skip_short = skip_short
else:
sys.stdout.write('skip_short must be a boolean')
sys.exit(1)
#list of acceptable wavelengths
self.awavs = ['0094','0131','0171','0193','0211','0304','0335','1600','1700']
#Dictionary for vmax, vmin, and color
if img_scale is None:
#self.img_scale = {'0094':[cm.sdoaia94 ,np.arcsinh(1.),np.arcsinh(150.)],
# '0131':[cm.sdoaia131 ,np.arcsinh(1.),np.arcsinh(500.)],
# '0171':[cm.sdoaia171 ,np.arcsinh(10.),np.arcsinh(2500.)],
# '0193':[cm.sdoaia193 ,np.arcsinh(100.),np.arcsinh(4500.)],
# '0211':[cm.sdoaia211 ,np.arcsinh(10.),np.arcsinh(4000.)],
# '0304':[cm.sdoaia304 ,np.arcsinh(2.),np.arcsinh(300.)],
# '0335':[cm.sdoaia335 ,np.arcsinh(1.),np.arcsinh(100.)],
# '1600':[cm.sdoaia1600,np.arcsinh(20.),np.arcsinh(500.)],
# '1700':[cm.sdoaia1700,np.arcsinh(200.),np.arcsinh(4000.)]}
self.img_scale = {'0094':[cm.sdoaia94 ,np.arcsinh(1.),np.arcsinh(150.)],
'0131':[cm.sdoaia131 ,np.arcsinh(1.),np.arcsinh(500.)],
'0171':[cm.sdoaia171 ,np.arcsinh(10.),np.arcsinh(2500.)],
'0193':[cm.sdoaia193 ,np.arcsinh(10.),np.arcsinh(4500.)],
'0211':[cm.sdoaia211 ,np.arcsinh(10.),np.arcsinh(4000.)],
'0304':[cm.sdoaia304 ,np.arcsinh(2.),np.arcsinh(300.)],
'0335':[cm.sdoaia335 ,np.arcsinh(1.),np.arcsinh(100.)],
'1600':[cm.sdoaia1600,np.arcsinh(20.),np.arcsinh(500.)],
'1700':[cm.sdoaia1700,np.arcsinh(200.),np.arcsinh(4000.)]}
elif isinstance(img_scale,dict):
self.img_scale = img_scale
else:
sys.stdout.write('img_scale must be a dictionary with color map, min value, max value')
sys.exit(1)
#Removed logic to check x and y limits <NAME>. (2017/09/06)
## #check proposed x and y limits
## if ((xlim is None) & (ylim is None) & (not self.rotation)):
## self.cutout = False
## #if you are rotating assume a cut out (no reason to rotate with full sun)
## elif (self.rotation):
## self.cutout = True
## #make sure
## #make sure
## elif ((xlim is not None) & (isinstance(xlim,(np.ndarray,list))) & (ylim is not None) & (isinstance(ylim,(np.ndarray,list)))):
## for i in xlim:
## if not isinstance(i,(float,int)):
## sys.stdout.write('Individual x values must be float or int')
## sys.exit(1)
## #if passes set xlim
## #self.xlim = xlim
## for i in ylim:
## if not isinstance(i,(float,int)):
## sys.stdout.write('Individual y values must be float or int')
## sys.exit(1)
## #if passes set ylim
## #self.ylim = ylim
## else:
## sys.stdout.write('X and Y limits must be empty, lists, or numpy arrays')
## sys.exit(1)
#create window for plotting
def sub_window(self):
#3 color image
if self.color3:
self.scale = [self.img.scale[0].value,self.img.scale[1].value] # get x, y image scale
#single color image
else:
self.scale = [self.img.scale[0].value,self.img.scale[1].value] # get x, y image scale
#if rotation set get modify cx and cy values
if self.rotation:
#make rotation stable across different sunpy version
#try:
# from sunpy.physics.differential_rotation import rot_hpc
#except ImportError:
#forcing sunpy > 8.0
from sunpy.physics.differential_rotation import solar_rotate_coordinate
#use astropy SkyCoord
from astropy.coordinates import SkyCoord
#get frame for coordiantes
from sunpy.coordinates import frames
import astropy.units as u
#create Sky Coord class with intial values
c = SkyCoord(self.cx*u.arcsec,self.cy*u.arcsec,obstime=self.rot_time,frame=frames.Helioprojective)
#rotate start points
nc = solar_rotate_coordinate(c,self.obs_time)
#update with new rotation values
self.cx, self.cy = nc.Tx.value,nc.Ty.value
#set new plot limits
#flip x and y values if h0>w0
if self.flip_image:
self.xlim = [self.cy-(self.scale[0]*self.w0/2.),self.cy+(self.scale[0]*self.w0/2.)]
self.ylim = [self.cx-(self.scale[1]*self.h0/2.),self.cx+(self.scale[1]*self.h0/2.)]
if self.k == 3:
self.xlim = self.xlim[::-1]
if self.k == 1:
self.ylim = self.ylim[::-1]
#self.xlim = self.xlim[::-1]
#if self.k == 1:
# self.xlim = [self.cy+(self.scale[0]*self.w0/2.),self.cy-(self.scale[0]*self.w0/2.)]
# self.ylim = [self.cx+(self.scale[1]*self.h0/2.),self.cx-(self.scale[1]*self.h0/2.)]
else:
self.xlim = [self.cx-(self.scale[0]*self.w0/2.),self.cx+(self.scale[0]*self.w0/2.)]
self.ylim = [self.cy-(self.scale[1]*self.h0/2.),self.cy+(self.scale[1]*self.h0/2.)]
#for j,i in enumerate(dayarray):
#reformat file to be in 1900x1200 array and contain timetext
def format_img(self):
"""
Formats image and writes image to png file.
"""
#input fits file
self.filep = self.dayarray
#check image quality
check, img = self.qual_check()
#check the if wavelet flag is set and if so run wavelet processing
if self.wavelet:
img = self.apply_wavelet(img)
#return image wavelength
#if isinstance(img,list):
if self.color3:
img3d = np.zeros((img[0].data.shape[0],img[0].data.shape[1],3))
for j,i in enumerate(img):
#set normalized scaling for every observation
ivmin = self.img_scale[self.wav[j]][1]
ivmax = self.img_scale[self.wav[j]][2]
prelim = (np.arcsinh(i.data/i.exposure_time.value)-ivmin)/ivmax
#replace out of bounds points
prelim[prelim < 0.] = 0.
prelim[prelim > 1.] = 1.
#if flipped image flip the x,y values in prelim
if self.flip_image:
img3d[:,:,j] = np.rot90(prelim,k=self.k)
else:
img3d[:,:,j] = prelim
#output png file
outfi = self.odir+'AIA_{0}_'.format(img[0].date.strftime('%Y%m%d_%H%M%S'))+'{0}_{1}_{2}.png'.format(*self.wav)
#observed time
self.obs_time = img[0].date
#set scale for plotting
self.scale = [self.img.scale[0].value,self.img.scale[1].value] # get x, y image scale
#set up panel plot parameters
elif self.panel:
ivmin = {}
ivmax = {}
icmap = {}
#put parameters in a series of dictionaries
for j,i in enumerate(img):
icmap[self.wav[j]] = self.img_scale[self.wav[j]][0]
ivmin[self.wav[j]] = self.img_scale[self.wav[j]][1]
ivmax[self.wav[j]] = self.img_scale[self.wav[j]][2]
outfi = self.odir+'AIA_{0}_'.format(img[0].date.strftime('%Y%m%d_%H%M%S'))+'{0}_{1}_{2}_{3}.png'.format(*self.wav)
#set scale for plotting
self.scale = [self.img.scale[0].value,self.img.scale[1].value] # get x, y image scale
#observed time
self.obs_time = img[0].date
else:
#use default color tables
icmap = self.img_scale[self.wav][0]
ivmin = self.img_scale[self.wav][1]
ivmax = self.img_scale[self.wav][2]
outfi = self.odir+'AIA_{0}_'.format(img.date.strftime('%Y%m%d_%H%M%S'))+'{0}.png'.format(self.wav)
#set scale for plotting
self.img = img
self.scale = [self.img.scale[0].value,self.img.scale[1].value] # get x, y image scale
#observed time
self.obs_time = img.date
#set up subwindow limits if cutout set
if self.cutout: self.sub_window()
#see if output file already exists
test = os.path.isfile(outfi)
#test to see if png file already exists and passes quality tests
if ((test == False) & (check)):
print('Modifying file '+outfi)
# <NAME> 2016/10/06
#Block add <NAME> (2016/10/06) to give physical coordinate values
#set up extra in stuff in plot if panel set
if self.panel:
fig,ax = plt.subplots(figsize=(self.sc*float(self.w0)/float(self.dpi),self.sc*float(self.h0)/float(self.dpi)),nrows=2,ncols=2)
#remove space between plots
fig.subplots_adjust(wspace=0.0001,hspace=0.0001)
#make axis object a 1D array
ax = ax.ravel()
img = sunpy.map.Map(*self.filep)
#set up dictionary for plotting data
img_dict = {}
for l,p in enumerate(self.wav): img_dict[p] = img[l]
#single image properties
else:
fig,ax = plt.subplots(figsize=(self.sc*float(self.w0)/float(self.dpi),self.sc*float(self.h0)/float(self.dpi)))
img = sunpy.map.Map(*self.filep)
ax.set_axis_off()
#universal image properties
fig.set_dpi(self.dpi)
fig.subplots_adjust(left=0,bottom=0,right=1,top=1)
#return extent of image
#use the first image in the list if it is a composite image to get the image boundaries
if isinstance(img,list):
maxx,minx,maxy,miny = self.img_extent(img[0])
#else use the only image
else:
maxx,minx,maxy,miny = self.img_extent(img)
#set text location
if ((self.w0 > self.h0) & (not self.cutout)):
txtx = -(self.w0-self.h0)
txty = (maxy-miny)*0.01
elif ((self.w0 < self.h0) & (not self.cutout)):
txty = -(self.h0-self.w0)
txtx = (maxx-minx)*0.01
elif ((self.w0 == self.h0) & (not self.cutout)):
txtx = (maxx-minx)*0.01
txty = (maxy-miny)*0.01
elif ((self.cutout) | (self.panel)):
txtx = (self.xlim[1]-self.xlim[0])*0.01+(min(self.xlim)-minx)
txty = (self.ylim[1]-self.ylim[0])*0.01+(min(self.ylim)-miny)
#set the origin location
origin = 'lower'
#if self.flip_image:
# origin = 'upper'
#plot the image in matplotlib
#use color composite image if color3 set
if self.color3:
ax.imshow(img3d,interpolation='none',origin=origin,extent=[minx,maxx,miny,maxy],aspect='auto')
ax.text(0.01,0.02,
'AIA {0}/{1}/{2}'.format(*self.wav)+'- {0}Z'.format(img[0].date.strftime('%Y/%m/%d - %H:%M:%S')),
color='white',fontsize=36,zorder=5000,fontweight='bold',transform=ax.transAxes)
#loop through axis objects if panel
elif self.panel:
#see if image is flipped
if self.flip_image:
for l,p in enumerate(self.wav):
ax[l].imshow(np.arcsinh(np.rot90(img_dict[p].data/img_dict[p].exposure_time.value,k=self.k)),
interpolation='none',cmap=icmap[p],origin=origin,vmin=ivmin[p],vmax=ivmax[p],extent=[minx,maxx,miny,maxy],aspect='auto')
else:
for l,p in enumerate(self.wav):
ax[l].imshow(
|
np.arcsinh(img_dict[p].data/img_dict[p].exposure_time.value)
|
numpy.arcsinh
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import gym
import numpy as np
from gym import spaces
from gym.envs.registration import EnvSpec
from mujoco_py import load_model_from_path, MjSim, MjViewer, MjRenderContextOffscreen
class PendulumWithGoals(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30
}
def __init__(self, goal_reaching_thresholds=np.array([0.075, 0.075, 0.75]),
goal_not_reached_penalty=-1, goal_reached_reward=0, terminate_on_goal_reaching=True,
time_limit=1000, frameskip=1, random_goals_instead_of_standing_goal=False,
polar_coordinates: bool=False):
super().__init__()
dir = os.path.dirname(__file__)
model = load_model_from_path(dir + "/pendulum_with_goals.xml")
self.sim = MjSim(model)
self.viewer = None
self.rgb_viewer = None
self.frameskip = frameskip
self.goal = None
self.goal_reaching_thresholds = goal_reaching_thresholds
self.goal_not_reached_penalty = goal_not_reached_penalty
self.goal_reached_reward = goal_reached_reward
self.terminate_on_goal_reaching = terminate_on_goal_reaching
self.time_limit = time_limit
self.current_episode_steps_counter = 0
self.random_goals_instead_of_standing_goal = random_goals_instead_of_standing_goal
self.polar_coordinates = polar_coordinates
# spaces definition
self.action_space = spaces.Box(low=-self.sim.model.actuator_ctrlrange[:, 1],
high=self.sim.model.actuator_ctrlrange[:, 1],
dtype=np.float32)
if self.polar_coordinates:
self.observation_space = spaces.Dict({
"observation": spaces.Box(low=np.array([-np.pi, -15]),
high=np.array([np.pi, 15]),
dtype=np.float32),
"desired_goal": spaces.Box(low=np.array([-np.pi, -15]),
high=np.array([np.pi, 15]),
dtype=np.float32),
"achieved_goal": spaces.Box(low=np.array([-np.pi, -15]),
high=np.array([np.pi, 15]),
dtype=np.float32)
})
else:
self.observation_space = spaces.Dict({
"observation": spaces.Box(low=np.array([-1, -1, -15]),
high=np.array([1, 1, 15]),
dtype=np.float32),
"desired_goal": spaces.Box(low=
|
np.array([-1, -1, -15])
|
numpy.array
|
import tensorflow as tf
import numpy as np
from scipy import signal
from scipy.ndimage import gaussian_filter
from PIL import Image, ImageDraw
import random
import glob, os
import csv
from multiprocessing import Pool
import subprocess
import time
width = 512
height = 512
scale = 2
np.random.seed(os.getpid() + int(time.time()))
random.seed(os.getpid() + int(time.time()))
class BaseData:
def __init__(self):
self.load_idmap()
def load_idmap(self):
self.glyph_id = {}
self.glyphs = {}
self.glyph_type = {}
self.glyph_id[''] = 0
self.glyphs[0] = ''
with open(os.path.join('data','codepoints.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
codehex = row[1]
if len(codehex) > 7:
code = eval('"' + ''.join(['\\u' + codehex[i*4:i*4+4] for i in range(len(codehex) // 4)]) + '"')
else:
code = chr(int(codehex, 16))
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
with open(os.path.join('data','id_map.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
code = bytes.fromhex(row[2]).decode()
if code in self.glyph_id:
k = self.glyph_id[code]
else:
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
k = i
self.glyph_type[k] = int(row[3])
self.id_count = len(self.glyph_id)
def sub_load(args):
exe = os.path.join('data','load_font','load_font.exe')
if not os.path.exists(exe):
exe = os.path.join('data','load_font','load_font')
proc = subprocess.Popen([
exe,
args[0],
'128',
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret = {}
for c in args[1]:
if len(c) == 1:
charbuf = c.encode("utf-32-le")
proc.stdin.write(charbuf[:4])
proc.stdin.flush()
result = proc.stdout.read(32)
code = result[:4]
rows = int.from_bytes(result[4:8], 'little')
width = int.from_bytes(result[8:12], 'little')
boundingWidth = int.from_bytes(result[12:16], 'little', signed=True)
boundingHeight = int.from_bytes(result[16:20], 'little', signed=True)
horiBearingX = int.from_bytes(result[20:24], 'little', signed=True)
horiBearingY = int.from_bytes(result[24:28], 'little', signed=True)
horiAdvance = int.from_bytes(result[28:32], 'little', signed=True)
if rows * width == 0:
continue
assert(charbuf == code)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
horiBearingX = horiBearingX / 64
horiBearingY = horiBearingY / 64
horiAdvance = horiAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': horiAdvance,
'image': img,
}
}
result = proc.stdout.read(28)
rows = int.from_bytes(result[:4], 'little')
width = int.from_bytes(result[4:8], 'little')
boundingWidth = int.from_bytes(result[8:12], 'little', signed=True)
boundingHeight = int.from_bytes(result[12:16], 'little', signed=True)
vertBearingX = int.from_bytes(result[16:20], 'little', signed=True)
vertBearingY = int.from_bytes(result[20:24], 'little', signed=True)
vertAdvance = int.from_bytes(result[24:28], 'little', signed=True)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
vertBearingX = vertBearingX / 64
vertBearingY = vertBearingY / 64
vertAdvance = vertAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'vertBearingX': vertBearingX,
'vertBearingY': vertBearingY,
'vertAdvance': vertAdvance,
'image': img,
}
ret[(args[0],c)] = value
else:
pass
proc.stdin.close()
return ret
def sub_load_image(path):
dirnames = glob.glob(os.path.join(path, '*'))
ret = {}
for d in dirnames:
c_code = os.path.basename(d)
char = str(bytes.fromhex(c_code), 'utf-8')
count = 0
for f in glob.glob(os.path.join(d, '*.png')):
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%count,char)] = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': 96.0,
'image': img,
},
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
}
count += 1
vert_imgs = glob.glob(os.path.join(d, 'vert', '*.png'))
if 0 < len(vert_imgs) <= count:
for i in range(count):
f = vert_imgs[i % len(vert_imgs)]
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%i,char)]['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
elif 0 < len(vert_imgs):
vcount = 0
for f in vert_imgs:
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%vcount,char)] = {
'horizontal': ret[('hand%06d'%(vcount % count),char)]['horizontal'],
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': vertBearingY,
'vertBearingY': vertBearingX,
'vertAdvance': 96.0,
'image': img,
}
}
vcount += 1
return ret
def gaussian_kernel(kernlen=7, xstd=1., ystd=1.):
gkern1dx = signal.gaussian(kernlen, std=xstd).reshape(kernlen, 1)
gkern1dy = signal.gaussian(kernlen, std=ystd).reshape(kernlen, 1)
gkern2d = np.outer(gkern1dy, gkern1dx)
return gkern2d
def apply_random_filter(images):
p = np.random.uniform(0., 1.)
if p < 0.25:
sigma = np.random.uniform(0., 1.75)
return gaussian_filter(images, sigma=sigma)
if p < 0.5:
sigma = np.random.uniform(0., 6.)
gauss = gaussian_filter(images, sigma=sigma)
gain = np.random.uniform(0., 5.)
return (1 + gain) * images - gain * gauss
return images
def is_Font_match(font, target):
if target.startswith('hand'):
return font.startswith('hand')
else:
return font == target
class FontData(BaseData):
def __init__(self):
super().__init__()
self.img_cache = {}
print('loading handwrite image')
self.img_cache.update(sub_load_image(os.path.join('data','handwritten')))
print('loading enfont')
enfont_files = sorted(glob.glob(os.path.join('data','enfont','*.ttf')) + glob.glob(os.path.join('data','enfont','*.otf')))
en_glyphs = [self.glyphs[key] for key in self.glyphs.keys() if self.glyph_type.get(key,-1) in [0,1,2,6]]
items = [(f, en_glyphs) for f in enfont_files]
total = len(enfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
print('loading jpfont')
jpfont_files = sorted(glob.glob(os.path.join('data','jpfont','*.ttf')) + glob.glob(os.path.join('data','jpfont','*.otf')))
items = [(f, list(self.glyphs.values())) for f in jpfont_files]
total = len(jpfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
for key in self.img_cache:
i = self.glyph_id[key[1]]
if i not in self.glyph_type:
self.glyph_type[i] = type_count_max
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
gtype_count = [0 for _ in range(type_count_max)]
type_count = [0 for _ in range(type_count_max)]
for key in self.img_cache:
t = self.glyph_type[self.glyph_id[key[1]]]
type_count[t] += 1
for k in self.glyph_type:
gtype_count[self.glyph_type[k]] += 1
self.image_keys = list(self.img_cache.keys())
self.test_keys = self.get_test_keys()
self.train_keys = self.get_train_keys()
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1, 1.0]
self.prob_map = [p/t for p,t in zip(self.prob_map, type_count)]
self.random_probs_train = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.random_probs_test = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_kanji = [0, 0, 0, 0, 0, 1.0, 0, 0, 1.0, 1.0, 0.5, 0]
self.prob_map_kanji = [p/t for p,t in zip(self.prob_map_kanji, type_count)]
self.kanji_probs_train = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.kanji_probs_test = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_num = [1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_num = [p/t for p,t in zip(self.prob_map_num, type_count)]
self.num_probs_train = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.num_probs_test = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_alpha = [0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_alpha = [p/t for p,t in zip(self.prob_map_alpha, type_count)]
self.alpha_probs_train = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.alpha_probs_test = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_hira = [0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_hira = [p/t for p,t in zip(self.prob_map_hira, type_count)]
self.hira_probs_train = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.hira_probs_test = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
self.train_keys_num = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.train_num_fonts = list(set([key[0] for key in self.train_keys_num]))
self.test_keys_num = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.test_num_fonts = list(set([key[0] for key in self.test_keys_num]))
self.train_keys_capital = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.train_capital_fonts = list(set([key[0] for key in self.train_keys_capital]))
self.test_keys_capital = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.test_capital_fonts = list(set([key[0] for key in self.test_keys_capital]))
self.train_keys_small = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.train_small_fonts = list(set([key[0] for key in self.train_keys_small]))
self.test_keys_small = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.test_small_fonts = list(set([key[0] for key in self.test_keys_small]))
self.train_keys_alpha = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.train_alpha_fonts = list(set([key[0] for key in self.train_keys_alpha]))
self.test_keys_alpha = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.test_alpha_fonts = list(set([key[0] for key in self.test_keys_alpha]))
self.train_keys_jp = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.test_keys_jp = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.train_jp_fonts = list(set([key[0] for key in self.train_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_jp_fonts])
self.train_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_jp_fonts]
self.test_jp_fonts = list(set([key[0] for key in self.test_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_jp_fonts])
self.test_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_jp_fonts]
self.train_keys_hira = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.test_keys_hira = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.train_hira_fonts = list(set([key[0] for key in self.train_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_hira_fonts])
self.train_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_hira_fonts]
self.test_hira_fonts = list(set([key[0] for key in self.test_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_hira_fonts])
self.test_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_hira_fonts]
self.train_keys_jpnum = [x for x in self.train_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.train_jp_fonts)]
self.test_keys_jpnum = [x for x in self.test_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.test_jp_fonts)]
self.train_jpnum_fonts = list(set([key[0] for key in self.train_keys_jpnum]))
self.train_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.train_jpnum_fonts]
self.test_jpnum_fonts = list(set([key[0] for key in self.test_keys_jpnum]))
self.test_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.test_jpnum_fonts]
self.prob_map_clustering = [
gtype_count[0] / type_count[0],
gtype_count[1] / type_count[1],
gtype_count[2] / type_count[2],
gtype_count[3] / type_count[3],
gtype_count[4] / type_count[4],
gtype_count[5] / type_count[5],
gtype_count[6] / type_count[6],
0.,
0.,
0.,
0.,
0.
]
self.random_background = glob.glob(os.path.join('data','background','*'))
self.max_std = 8.0
self.min_ker = 4
def get_test_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
test_keys = [k for k in keys if fontname(k[0]).startswith('Noto')]
return test_keys
def get_train_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
train_keys = [k for k in keys if not fontname(k[0]).startswith('Noto')]
return train_keys
def load_background_images(self):
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency (http://stackoverflow.com/a/1963146)
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)
alpha = im.convert('RGBA').getchannel('A')
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
# (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
im_file = random.choice(self.random_background)
im = Image.open(im_file)
im = remove_transparency(im).convert('RGB')
scale_min = max(width / im.width, height / im.height)
scale_max = max(scale_min + 0.5, 1.5)
s = np.random.uniform(scale_min, scale_max)
im = im.resize((int(im.width * s)+1, int(im.height * s)+1))
x1 = np.random.randint(0, im.width - width)
y1 = np.random.randint(0, im.height - height)
im_crop = im.crop((x1, y1, x1 + width, y1 + height))
img = np.asarray(im_crop).astype(np.float32)
img = img / 128. - 1.
if np.random.uniform() < 0.5:
img = img[::-1,:,:]
if np.random.uniform() < 0.5:
img = img[:,::-1,:]
brightness = np.random.uniform(-1.0, 1.0)
brightness = np.array([brightness,brightness,brightness])
img += brightness[None,None,:]
contrast = np.random.uniform(0.2, 1.8)
contrast = np.array([contrast,contrast,contrast])
img = img * contrast[None,None,:]
img = np.clip(img, -1.0, 1.0)
return img
def tateyokotext_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] == 0 else 0. for key in keys]
selection2 = [key for key in random.choices(keys, k=max_count*2, weights=probs2)]
base_line = width - text_size // 2
line_space = int(text_size * random.uniform(1.05, 2.0))
line_start = 0
line_end = 0
isnum = -1
i = 0
for key in selection:
if isnum < 0 or isnum > 1:
if np.random.uniform() < 0.1:
isnum = 0
else:
isnum = -1
if isnum < 0:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
horiBearingX = 0
else:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
key = selection2[i]
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
vertBearingX = -text_size * 0.5
vertBearingY = 0
vertAdvance = text_size
if line_end + vertAdvance >= height:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
base_line -= line_space
if base_line - text_size / 2 < 0:
break
line_start = 0
line_end = 0
if isnum >= 0:
t = (line_end + vertBearingY + text_size * 0.75 - horiBearingY) / height
else:
t = (line_end + vertBearingY) / height
if isnum > 0:
l = (base_line + horiBearingX) / width
else:
l = (base_line + vertBearingX + horiBearingX) / width
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
if isnum > 0:
l = int(np.clip(base_line + horiBearingX, 0, width - w))
else:
l = int(np.clip(base_line + vertBearingX + horiBearingX, 0, width - w))
if isnum >= 0:
t = int(np.clip(line_end + vertBearingY + text_size * 0.75 - horiBearingY, 0, height - h))
else:
t = int(np.clip(line_end + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
if isnum != 0:
line_end += vertAdvance
if isnum >= 0:
isnum += 1
i += 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def yoko_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
line_space = int(text_size * random.uniform(1.05, 2.0))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * width)
break_space = text_size * random.uniform(0.6, 1.5)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
base_line = line_space
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
if temp_lineend + horiAdvance < line_end:
linebuf.append((key, item))
temp_lineend += horiAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((line_start // scale, base_line // scale),
(line_end // scale, base_line // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
l = (line_start + horiBearingX) / width
t = (base_line - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
top = int(np.clip(base_line - horiBearingY, 0, height - h))
left = int(np.clip(line_start + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start += int(horiAdvance)
base_line += line_space
if base_line + text_size >= height:
if block_no == 0:
sep_end = base_line - line_space
base_line = line_space
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
t = line_space // 2 // scale
b = sep_end // scale
seps[t:b, l-1:l+2] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def tate_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
line_space = int(text_size * random.uniform(1.05, 2.0))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * height)
break_space = text_size * random.uniform(0.6, 1.0)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
base_line = width - line_space + text_size // 2
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(height, height if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
if temp_lineend + vertAdvance < line_end:
linebuf.append((key,item))
temp_lineend += vertAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
l = (base_line + vertBearingX) / width
t = (line_start + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
l = int(np.clip(base_line + vertBearingX, 0, width - w))
t = int(np.clip(line_start + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start += int(vertAdvance)
base_line -= line_space
if base_line - text_size / 2 < 0:
if block_no == 0:
sep_end = base_line + line_space
base_line = width - line_space + text_size // 2
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
right = (width - line_space + text_size // 2) // scale
left = sep_end // scale
seps[l-1:l+2, left:right] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def tatefurigana_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 12
max_pixel = 50
text_size = random.randint(min_pixel, max_pixel)
text_size2 = text_size * 2
line_space = int(text_size2 * random.uniform(1.45, 1.7))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * height)
break_space = text_size2 * random.uniform(0.6, 1.0)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] in [3,4] else 0. for key in keys]
selection2 = iter([key for key in random.choices(keys, k=max_count*2, weights=probs2)])
base_line = width - line_space + text_size2 // 2
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(height, height if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
vertBearingX = item['vertBearingX'] / 128 * text_size2
vertBearingY = item['vertBearingY'] / 128 * text_size2
vertAdvance = item['vertAdvance'] / 128 * text_size2
if temp_lineend + vertAdvance < line_end:
linebuf.append((key,item))
temp_lineend += vertAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
vertBearingX = item['vertBearingX'] / 128 * text_size2
vertBearingY = item['vertBearingY'] / 128 * text_size2
vertAdvance = item['vertAdvance'] / 128 * text_size2
l = (base_line + vertBearingX) / width
t = (line_start + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size2), 1)
h = max(int(item['rows'] / 128 * text_size2), 1)
l = int(np.clip(base_line + vertBearingX, 0, width - w))
t = int(np.clip(line_start + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start += int(vertAdvance)
# ふりがな処理
base_line2 = base_line + text_size2 // 2 + text_size // 2
line_start2 = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
if block_no == 0:
line_start2 += remain
line_end2 = line_start
line_start2p = line_start2
while line_start2 < line_end2:
key2 = next(selection2, None)
if key2 is None:
break
item = self.img_cache[key2]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
if np.random.uniform() < 0.2:
# ここは空ける
if line_start2 != line_start2p:
draw.line(((base_line2 // scale, line_start2p // scale),
(base_line2 // scale, line_start2 // scale)), fill=255, width=3)
line_start2 += int(vertAdvance)
line_start2p = line_start2
continue
# ここはふりがな
l = (base_line2 + vertBearingX) / width
t = (line_start2 + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
l = int(np.clip(base_line2 + vertBearingX, 0, width - w))
t = int(np.clip(line_start2 + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start2 += int(vertAdvance)
if line_start2 != line_start2p:
draw.line(((base_line2 // scale, line_start2p // scale),
(base_line2 // scale, line_start2 // scale)), fill=255, width=3)
base_line -= line_space
if base_line - text_size2 / 2 < 0:
if block_no == 0:
sep_end = base_line + line_space
base_line = width - line_space + text_size2 // 2
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
right = (width - line_space + text_size2 // 2) // scale
left = sep_end // scale
seps[l-1:l+2, left:right] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def yokofurigana_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 12
max_pixel = 50
text_size = random.randint(min_pixel, max_pixel)
text_size2 = text_size * 2
line_space = int(text_size2 * random.uniform(1.45, 1.7))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * width)
break_space = text_size2 * random.uniform(0.6, 1.5)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] in [3,4] else 0. for key in keys]
selection2 = iter([key for key in random.choices(keys, k=max_count*2, weights=probs2)])
base_line = line_space
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
horiBearingX = item['horiBearingX'] / 128 * text_size2
horiBearingY = item['horiBearingY'] / 128 * text_size2
horiAdvance = item['horiAdvance'] / 128 * text_size2
if temp_lineend + horiAdvance < line_end:
linebuf.append((key, item))
temp_lineend += horiAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((line_start // scale, base_line // scale),
(line_end // scale, base_line // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
horiBearingX = item['horiBearingX'] / 128 * text_size2
horiBearingY = item['horiBearingY'] / 128 * text_size2
horiAdvance = item['horiAdvance'] / 128 * text_size2
l = (line_start + horiBearingX) / width
t = (base_line - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size2), 1)
h = max(int(item['rows'] / 128 * text_size2), 1)
top = int(np.clip(base_line - horiBearingY, 0, height - h))
left = int(np.clip(line_start + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start += int(horiAdvance)
# ふりがな処理
base_line2 = base_line - text_size2
line_start2 = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
if block_no == 0:
line_start2 += remain
line_end2 = line_start
line_start2p = line_start2
while line_start2 < line_end2:
key2 = next(selection2, None)
if key2 is None:
break
item = self.img_cache[key2]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
if np.random.uniform() < 0.2:
# ここは空ける
if line_start2 != line_start2p:
draw.line(((line_start2p // scale, base_line // scale),
(line_start // scale, base_line // scale)), fill=255, width=3)
line_start2 += int(horiAdvance)
line_start2p = line_start2
continue
# ここはふりがな
l = (line_start2 + horiBearingX) / width
t = (base_line2 - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
top = int(np.clip(base_line2 - horiBearingY, 0, height - h))
left = int(np.clip(line_start2 + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start2 += int(horiAdvance)
if line_start2 != line_start2p:
draw.line(((line_start2p // scale, base_line // scale),
(line_start // scale, base_line // scale)), fill=255, width=3)
base_line += line_space
if base_line + text_size2 >= height:
if block_no == 0:
sep_end = base_line - line_space
base_line = line_space
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
t = line_space // 2 // scale
b = sep_end // scale
seps[t:b, l-1:l+2] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def null_images(self):
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
labels = np.stack([keymap, xsizes, ysizes, offsetx, offsety, lines, seps], -1)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_random_line(self):
images =
|
np.zeros([height, width], dtype=np.float32)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
ClaSP (Classification Score Profile) Segmentation.
Notes
-----
As described in
@inproceedings{clasp2021,
title={ClaSP - Time Series Segmentation},
author={<NAME> and Ermshaus, Arik and <NAME>},
booktitle={CIKM},
year={2021}
}
"""
from sktime.annotation.base import BaseSeriesAnnotator
__author__ = ["ermshaua", "patrickzib"]
__all__ = ["ClaSPSegmentation", "find_dominant_window_sizes"]
from queue import PriorityQueue
import numpy as np
import pandas as pd
from sktime.transformations.series.clasp import ClaSPTransformer
from sktime.utils.validation.series import check_series
def find_dominant_window_sizes(X, offset=0.05):
"""Determine the Window-Size using dominant FFT-frequencies.
Parameters
----------
X : array-like, shape=[n]
a single univariate time series of length n
offset : float
Exclusion Radius
Returns
-------
trivial_match: bool
If the candidate change point is a trivial match
"""
fourier = np.absolute(np.fft.fft(X))
freq = np.fft.fftfreq(X.shape[0], 1)
coefs = []
window_sizes = []
for coef, freq in zip(fourier, freq):
if coef and freq > 0:
coefs.append(coef)
window_sizes.append(1 / freq)
coefs = np.array(coefs)
window_sizes = np.asarray(window_sizes, dtype=np.int64)
idx = np.argsort(coefs)[::-1]
for window_size in window_sizes[idx]:
if window_size not in range(20, int(X.shape[0] * offset)):
continue
return int(window_size / 2)
def _is_trivial_match(candidate, change_points, n_timepoints, exclusion_radius=0.05):
"""Check if a candidate change point is in close proximity to other change points.
Parameters
----------
candidate : int
A single candidate change point. Will me chosen if non-trivial match based
on exclusion_radius.
change_points : list, dtype=int
List of change points chosen so far
n_timepoints : int
Total length
exclusion_radius : int
Exclusion Radius for change points to be non-trivial matches
Returns
-------
trivial_match: bool
If the 'candidate' change point is a trivial match to the ones in change_points
"""
change_points = [0] + change_points + [n_timepoints]
exclusion_radius = np.int64(n_timepoints * exclusion_radius)
for change_point in change_points:
left_begin = max(0, change_point - exclusion_radius)
right_end = min(n_timepoints, change_point + exclusion_radius)
if candidate in range(left_begin, right_end):
return True
return False
def _segmentation(X, clasp, n_change_points=None, exclusion_radius=0.05):
"""Segments the time series by extracting change points.
Parameters
----------
X : array-like, shape=[n]
the univariate time series of length n to be segmented
clasp :
the transformer
n_change_points : int
the number of change points to find
exclusion_radius :
the exclusion zone
Returns
-------
Tuple (array-like, array-like, array-like):
(predicted_change_points, clasp_profiles, scores)
"""
period_size = clasp.window_length
queue = PriorityQueue()
# compute global clasp
profile = clasp.transform(X)
queue.put(
(
-np.max(profile),
[np.arange(X.shape[0]).tolist(), np.argmax(profile), profile],
)
)
profiles = []
change_points = []
scores = []
for idx in range(n_change_points):
# should not happen ... safety first
if queue.empty() is True:
break
# get profile with highest change point score
priority, (profile_range, change_point, full_profile) = queue.get()
change_points.append(change_point)
scores.append(-priority)
profiles.append(full_profile)
if idx == n_change_points - 1:
break
# create left and right local range
left_range = np.arange(profile_range[0], change_point).tolist()
right_range = np.arange(change_point, profile_range[-1]).tolist()
for ranges in [left_range, right_range]:
# create and enqueue left local profile
if len(ranges) > period_size:
profile = clasp.transform(X[ranges])
change_point =
|
np.argmax(profile)
|
numpy.argmax
|
import numpy as np
from scipy.integrate import odeint
def dpendulum(state, t=0, pivot_x=0.0, pivot_y=0.0, is_acceleration=False, l=1.0, g=9.8, d=0.0, h=1e-4):
"""Returns the dynamical equation of a non inertial pendulum
:param state: the state (angle, angular speed)
:param t: the time
:param l: the pendulum's length
:param g: the local acceleration of gravity
:param d: the damping constant
:param pivot_x: the horizontal position of the pivot
:type pivot_x: function of time or constant
:param pivot_y: the vertical position of the pivot
:type pivot_y: function of time or constant
:param is_acceleration: set to True to input pivot accelerations instead of positions
:type is_acceleration: boolean
:param h: numerical step for computing numerical derivatives
:returns: the time derivative (dydt)
"""
## Avoid wrong inputs
if (l <= 0.0): # Negative or zero lengths don't make sense
raise ValueError('Wrong pendulum length (l). Expected positive float')
if (d < 0.0): # A negative damping constant doesn't make sense
raise ValueError('Wrong damping constant (d). Expected zero or positive float')
if (h <= 0.0): # The numerical step for differentiation has to be positive
raise ValueError('Wrong numerical step (h). Expected a positive float')
## Flexible input interpretation
accel_x, accel_y = _format_accelerations(pivot_x, pivot_y, is_acceleration, h)
## Dynamical equation (see drafts/Derivation ni_pendulum.pdf)
th, w = state
dydt = [w,
-g/l * np.sin(th) - d * w - accel_x(t) * np.cos(th) / l - accel_y(t) *
|
np.sin(th)
|
numpy.sin
|
import numpy as np
from scipy.linalg import expm
from emlp.utils import Named, export
import jax
import jax.numpy as jnp
from emlp.reps.linear_operators import LazyShift, SwapMatrix, Rot90, LazyKron, LazyKronsum, LazyPerm, I
from jax import jit, vmap
def rel_err(A, B):
return jnp.mean(jnp.abs(A - B)) / (jnp.mean(jnp.abs(A)) + jnp.mean(jnp.abs(B)) + 1e-6)
@export
class Group(object, metaclass=Named):
""" Abstract Group Object which new groups should inherit from. """
lie_algebra = NotImplemented #: The continuous generators
discrete_generators = NotImplemented #: The discrete generators
z_scale = None # For scale noise for sampling elements
is_orthogonal = None
is_permutation = None
d = NotImplemented #: The dimension of the base representation
def __init__(self, *args, **kwargs):
# get the dimension of the base group representation
if self.d is NotImplemented:
if self.lie_algebra is not NotImplemented and len(self.lie_algebra):
self.d = self.lie_algebra[0].shape[-1]
if self.discrete_generators is not NotImplemented and len(self.discrete_generators):
self.d = self.discrete_generators[0].shape[-1]
if self.lie_algebra is NotImplemented:
self.lie_algebra = np.zeros((0, self.d, self.d))
if self.discrete_generators is NotImplemented:
self.discrete_generators = np.zeros((0, self.d, self.d))
self.args = args
if isinstance(self.lie_algebra, np.ndarray): self.lie_algebra = jax.device_put(self.lie_algebra)
if isinstance(self.discrete_generators, np.ndarray): self.discrete_generators = jax.device_put(
self.discrete_generators)
# Set orthogonal flag automatically if not specified
if self.is_permutation: self.is_orthogonal = True
if self.is_orthogonal is None:
self.is_orthogonal = True
if len(self.lie_algebra) != 0:
A_dense = jnp.stack([Ai @ jnp.eye(self.d) for Ai in self.lie_algebra])
self.is_orthogonal &= rel_err(-A_dense.transpose((0, 2, 1)), A_dense) < 1e-6
if len(self.discrete_generators) != 0:
h_dense = jnp.stack([hi @ jnp.eye(self.d) for hi in self.discrete_generators])
self.is_orthogonal &= rel_err(h_dense.transpose((0, 2, 1)) @ h_dense, jnp.eye(self.d)) < 1e-6
# Set regular flag automatically if not specified
if self.is_orthogonal and (self.is_permutation is None):
self.is_permutation = True
self.is_permutation &= (len(self.lie_algebra) == 0) # no infinitesmal generators and all rows have one 1
if len(self.discrete_generators) != 0:
h_dense = jnp.stack([hi @ jnp.eye(self.d) for hi in self.discrete_generators])
self.is_permutation &= ((h_dense == 1).astype(int).sum(-1) == 1).all()
def exp(self, A):
""" Matrix exponential """
return expm(A)
def num_constraints(self):
return len(self.lie_algebra) + len(self.discrete_generators)
def sample(self):
"""Draw a sample from the group (not necessarily Haar measure)"""
return self.samples(1)[0]
def samples(self, N):
""" Draw N samples from the group (not necessarily Haar measure)"""
A_dense = jnp.stack([Ai @ jnp.eye(self.d) for Ai in self.lie_algebra]) if len(self.lie_algebra) else jnp.zeros(
(0, self.d, self.d))
h_dense = jnp.stack([hi @ jnp.eye(self.d) for hi in self.discrete_generators]) if len(
self.discrete_generators) else jnp.zeros((0, self.d, self.d))
z = np.random.randn(N, A_dense.shape[0])
if self.z_scale is not None:
z *= self.z_scale
k = np.random.randint(-5, 5, size=(N, h_dense.shape[0], 3))
jax_seed = np.random.randint(100)
return noise2samples(z, k, A_dense, h_dense, jax_seed)
def check_valid_group_elems(self, g):
return True
def __str__(self):
return repr(self)
def __repr__(self):
outstr = f"{self.__class__}"
if self.args:
outstr += '(' + ''.join(repr(arg) for arg in self.args) + ')'
return outstr
def __eq__(self, G2): # TODO: more permissive by checking that spans are equal?
return repr(self) == repr(G2)
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
""" For sorting purposes only """
return hash(self) < hash(other)
def __mul__(self, other):
return DirectProduct(self, other)
@jit
def matrix_power_simple(M, n):
out = jnp.eye(M.shape[-1])
body = lambda Mn: jax.lax.fori_loop(0, Mn[1], lambda i, g: Mn[0] @ g, out)
out = jax.lax.cond(n < 0, (jnp.linalg.inv(M), -n), body, (M, n), body)
return out
@jit
def noise2sample(z, ks, lie_algebra, discrete_generators, seed=0):
""" [zs (D,)] [ks (M,K)] [lie_algebra (D,d,d)] [discrete_generators (M,d,d)]
Here K is the number of repeats for a given discrete generator."""
g = jnp.eye(lie_algebra.shape[-1])
if lie_algebra.shape[0]:
A = (z[:, None, None] * lie_algebra).sum(0)
g = g @ jax.scipy.linalg.expm(A)
key = jax.random.PRNGKey(seed)
M, K = ks.shape
if M == 0: return g
for k in range(K): # multiple rounds of discrete generators
key, pkey = jax.random.split(key)
for i in jax.random.permutation(pkey, M): # Randomize the order of generators
g = g @ matrix_power_simple(discrete_generators[i],
ks[i, k]) # jnp.linalg.matrix_power(discrete_generators[i],ks[i])
return g
@jit
def noise2samples(zs, ks, lie_algebra, discrete_generators, seed=0):
return vmap(noise2sample, (0, 0, None, None, None), 0)(zs, ks, lie_algebra, discrete_generators, seed)
@export
class Trivial(Group):
""" The trivial group G={I} in n dimensions. If you want to see how the
inductive biases of EMLP perform without any symmetry, use Trivial(n)"""
def __init__(self, n):
self.d = n
super().__init__(n)
@export
class SO(Group):
""" The special orthogonal group SO(n) in n dimensions"""
def __init__(self, n):
self.lie_algebra = np.zeros(((n * (n - 1)) // 2, n, n))
k = 0
for i in range(n):
for j in range(i):
self.lie_algebra[k, i, j] = 1
self.lie_algebra[k, j, i] = -1
k += 1
super().__init__(n)
@export
class O(SO):
""" The Orthogonal group O(n) in n dimensions"""
def __init__(self, n):
self.discrete_generators = np.eye(n)[None]
self.discrete_generators[0, 0, 0] = -1
super().__init__(n)
@export
class C(Group):
""" The Cyclic group Ck in 2 dimensions"""
def __init__(self, k):
theta = 2 * np.pi / k
self.discrete_generators = np.zeros((1, 2, 2))
self.discrete_generators[0, :, :] = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])
super().__init__(k)
@export
class D(C):
""" The Dihedral group Dk in 2 dimensions"""
def __init__(self, k):
super().__init__(k)
self.discrete_generators = np.concatenate((self.discrete_generators, np.array([[[-1, 0], [0, 1]]])))
@export
class Scaling(Group):
""" The scaling group in n dimensions"""
def __init__(self, n):
self.lie_algebra = np.eye(n)[None]
super().__init__(n)
class Parity(Group): # """ The spacial parity group in 1+3 dimensions"""
discrete_generators = -np.eye(4)[None]
discrete_generators[0, 0, 0] = 1
class TimeReversal(Group): # """ The time reversal group in 1+3 dimensions"""
discrete_generators = np.eye(4)[None]
discrete_generators[0, 0, 0] = -1
@export
class SO13p(Group):
""" The component of Lorentz group connected to identity"""
lie_algebra = np.zeros((6, 4, 4))
lie_algebra[3:, 1:, 1:] = SO(3).lie_algebra
for i in range(3):
lie_algebra[i, 1 + i, 0] = lie_algebra[i, 0, 1 + i] = 1.
# Adjust variance for samples along boost generators. For equivariance checks
# the exps for high order tensors can get very large numbers
z_scale = np.array([.3, .3, .3, 1, 1, 1]) # can get rid of now
@export
class SO13(SO13p):
discrete_generators = -np.eye(4)[None]
@export
class O13(SO13p):
""" The full lorentz group (including Parity and Time reversal)"""
discrete_generators = np.eye(4)[None] + np.zeros((2, 1, 1))
discrete_generators[0] *= -1
discrete_generators[1, 0, 0] = -1
@export
class Lorentz(O13): pass
@export
class SO11p(Group):
""" The identity component of O(1,1) (Lorentz group in 1+1 dimensions)"""
lie_algebra = np.array([[0., 1.], [1., 0.]])[None]
@export
class O11(SO11p):
""" The Lorentz group O(1,1) in 1+1 dimensions """
discrete_generators = np.eye(2)[None] + np.zeros((2, 1, 1))
discrete_generators[0] *= -1
discrete_generators[1, 0, 0] = -1
@export
class Sp(Group):
""" Symplectic group Sp(m) in 2m dimensions (sometimes referred to
instead as Sp(2m) )"""
def __init__(self, m):
self.lie_algebra = np.zeros((m * (2 * m + 1), 2 * m, 2 * m))
k = 0
for i in range(m): # block diagonal elements
for j in range(m):
self.lie_algebra[k, i, j] = 1
self.lie_algebra[k, m + j, m + i] = -1
k += 1
for i in range(m):
for j in range(i + 1):
self.lie_algebra[k, m + i, j] = 1
self.lie_algebra[k, m + j, i] = 1
k += 1
self.lie_algebra[k, i, m + j] = 1
self.lie_algebra[k, j, m + i] = 1
k += 1
super().__init__(m)
@export
class Z(Group):
r""" The cyclic group Z_n (discrete translation group) of order n.
Features a regular base representation."""
def __init__(self, n):
self.discrete_generators = [LazyShift(n)]
super().__init__(n)
@export
class S(Group): # The permutation group
r""" The permutation group S_n with an n dimensional regular representation."""
def __init__(self, n):
# Here we choose n-1 generators consisting of swaps between the first element
# and every other element
perms = np.arange(n)[None] + np.zeros((n - 1, 1)).astype(int)
perms[:, 0] = np.arange(1, n)
perms[np.arange(n - 1), np.arange(1, n)[None]] = 0
self.discrete_generators = [LazyPerm(perm) for perm in perms]
super().__init__(n)
# We can also have chosen the 2 generator soln described in the paper, but
# adding superflous extra generators surprisingly can sometimes actually *decrease*
# the runtime of the iterative krylov solver by improving the conditioning
# of the constraint matrix
@export
class SL(Group):
""" The special linear group SL(n) in n dimensions"""
def __init__(self, n):
self.lie_algebra = np.zeros((n * n - 1, n, n))
k = 0
for i in range(n):
for j in range(n):
if i == j: continue # handle diag elements separately
self.lie_algebra[k, i, j] = 1
k += 1
for l in range(n - 1):
self.lie_algebra[k, l, l] = 1
self.lie_algebra[k, -1, -1] = -1
k += 1
super().__init__(n)
@export
class GL(Group):
""" The general linear group GL(n) in n dimensions"""
def __init__(self, n):
self.lie_algebra = np.zeros((n * n, n, n))
k = 0
for i in range(n):
for j in range(n):
self.lie_algebra[k, i, j] = 1
k += 1
super().__init__(n)
@export
class U(Group): # Of dimension n^2
""" The unitary group U(n) in n dimensions (complex)"""
def __init__(self, n):
lie_algebra_real = np.zeros((n ** 2, n, n))
lie_algebra_imag = np.zeros((n ** 2, n, n))
k = 0
for i in range(n):
for j in range(i):
# Antisymmetric real generators
lie_algebra_real[k, i, j] = 1
lie_algebra_real[k, j, i] = -1
k += 1
# symmetric imaginary generators
lie_algebra_imag[k, i, j] = 1
lie_algebra_imag[k, j, i] = 1
k += 1
for i in range(n):
# diagonal imaginary generators
lie_algebra_imag[k, i, i] = 1
k += 1
self.lie_algebra = lie_algebra_real + lie_algebra_imag * 1j
super().__init__(n)
@export
class SU(Group): # Of dimension n^2-1
""" The special unitary group SU(n) in n dimensions (complex)"""
def __init__(self, n):
if n == 1: return Trivial(1)
lie_algebra_real = np.zeros((n ** 2 - 1, n, n))
lie_algebra_imag = np.zeros((n ** 2 - 1, n, n))
k = 0
for i in range(n):
for j in range(i):
# Antisymmetric real generators
lie_algebra_real[k, i, j] = 1
lie_algebra_real[k, j, i] = -1
k += 1
# symmetric imaginary generators
lie_algebra_imag[k, i, j] = 1
lie_algebra_imag[k, j, i] = 1
k += 1
for i in range(n - 1):
# diagonal traceless imaginary generators
lie_algebra_imag[k, i, i] = 1
for j in range(n):
if i == j: continue
lie_algebra_imag[k, j, j] = -1 / (n - 1)
k += 1
self.lie_algebra = lie_algebra_real + lie_algebra_imag * 1j
super().__init__(n)
@export
class Cube(Group):
""" A discrete version of SO(3) including all 90 degree rotations in 3d space
Implements a 6 dimensional representation on the faces of a cube"""
def __init__(self):
# order = np.arange(6) # []
Fperm = np.array([4, 1, 0, 3, 5, 2])
Lperm = np.array([3, 0, 2, 5, 4, 1])
self.discrete_generators = [LazyPerm(perm) for perm in [Fperm, Lperm]]
super().__init__()
def pad(permutation):
assert len(permutation) == 48
padded = np.zeros((6, 9)).astype(permutation.dtype)
padded[:, :4] = permutation.reshape(6, 8)[:, :4]
padded[:, 5:] = permutation.reshape(6, 8)[:, 4:]
return padded
def unpad(padded_perm):
return np.concatenate([padded_perm[:, :4], padded_perm[:, 5:]], -1).reshape(-1)
@export
class RubiksCube(Group): # 3x3 rubiks cube
r""" The Rubiks cube group G<S_48 consisting of all valid 3x3 Rubik's cube transformations.
Generated by the a quarter turn about each of the faces."""
def __init__(self):
# Faces are ordered U,F,R,B,L,D (the net of the cube) # B
order = np.arange(48) # L U R
order_padded = pad(order) # include a center element # F
# Compute permutation for Up quarter turn # D
order_padded[0, :] = np.rot90(order_padded[0].reshape(3, 3), 1).reshape(9) # Rotate top face
FRBL = np.array([1, 2, 3, 4])
order_padded[FRBL, :3] = order_padded[np.roll(FRBL, 1), :3] # F <- L,R <- F,B <- R,L <- B
Uperm = unpad(order_padded)
# Now form all other generators by using full rotations of the cube by 90 clockwise about a given face
RotFront = pad(np.arange(
48)) # rotate full cube so that Left face becomes Up, Up becomes Right, Right becomes Down, Down becomes Left
URDL = np.array([0, 2, 5, 4])
RotFront[URDL, :] = RotFront[
|
np.roll(URDL, 1)
|
numpy.roll
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
last mod 8/17/19
testing pieces of lidar processing code
tests segmentation and rectangle generation
"""
import numpy as np
from imageio import imread
from cv2 import imshow, waitKey, destroyWindow
from segment import lasers2use, segmentPoints, getGrndHeight
from plotStuff import base_image, plotRectangle, plotPoints, drawLine
from plotStuff import plotImgKitti, addRect2KittiImg, hsv2Rgba
from calibs import calib_extrinsics, calib_projections, view_by_day
from trackinginfo import sceneranges
from trackinginfo import calib_map_training as calib_map
from analyzeGT import readGroundTruthFileTracking
lidar_files = '/home/m2/Data/kitti/tracking_velodyne/training/{:04d}/{:06d}.bin'
img_files = '/home/m2/Data/kitti/tracking_image/training/{:04d}/{:06d}.png'
gt_files = '/home/m2/Data/kitti/tracking_gt/{:04d}.txt'
ground_files = '/home/m2/Data/kitti/tracking_ground/training/{:02d}f{:06d}.npy'
scene_idx = 9#1
startfileidx, endfileidx = sceneranges[scene_idx]
startfileidx = 0#368
endfileidx = 3#369
def clear(): destroyWindow('a')
calib_idx = calib_map[scene_idx]
calib_extrinsic = calib_extrinsics[calib_idx].copy()
calib_projection = calib_projections[calib_idx]
calib_intrinsic = calib_projection.dot(np.linalg.inv(calib_extrinsic))
calib_extrinsic[2,3] += 1.65
view_angle = view_by_day[calib_idx]
view = view_angle + .1
bevTM = np.array(((-320./30,0,640), (0,-320./30,320)))
bevTM = bevTM.dot(calib_extrinsic[[0,1,3],:]) # lidar xyz to pixels
with open(gt_files.format(scene_idx), 'r') as fd: gtfilestr = fd.read()
gt_all, gtdc = readGroundTruthFileTracking(gtfilestr, ('Car', 'Van'))
for fileidx in range(startfileidx, endfileidx):
img = imread(img_files.format(scene_idx, fileidx))[:,:,::-1]
img = (img.astype(float)*.9).astype(np.uint8)
img[:,:,1:] += 25
data = np.fromfile(lidar_files.format(scene_idx, fileidx),
dtype=np.float32).reshape((-1,4))[:,:3]
gt = gt_all[fileidx]
ground = np.load(ground_files.format(scene_idx, fileidx))
starts = np.where(np.diff(np.sign(data[:,1])) > 0)[0]
starts = np.concatenate(([0], starts+1, [len(data)]))
true_starts = np.append(np.diff(starts) > 2, [True])
starts = starts[true_starts]
assert starts.shape[0] > 55
lidar = []
for laser in lasers2use[::-1]:
pts = data[starts[laser]:starts[laser+1]]
include = pts[:,0] > 0
include &= abs(pts[:,1]) < pts[:,0]*view + 2.
include &= pts.dot(calib_extrinsic[2,:3])+calib_extrinsic[2,3] > -.3
pts = pts[include]
swap_idx = np.where(np.diff(np.arctan2(pts[:,1],pts[:,0]))<-.05)[0]
assert len(swap_idx) <= 1
if len(swap_idx) == 1:
swap_idx = swap_idx[0] + 1
pts = np.append(pts[swap_idx:], pts[:swap_idx], axis=0)
lidar.append(pts.copy())
plotimg1 = plotImgKitti(view_angle)
plotimg2 = plotImgKitti(view_angle)
# draw object
for gtobj in gt:
box = np.array(gtobj['box'])
if gtobj['scored']:
addRect2KittiImg(plotimg1, box, (0,0,210*.9,.5))
else:
addRect2KittiImg(plotimg1, box, (30*.9,80*.9,255*.9,.5))
# draw laser points
for lidaridx, pts in enumerate(lidar):
pts2plot = pts.dot(bevTM[:,:3].T) + bevTM[:,3]
include_scatter = ((pts2plot[:,0] > 0) & (pts2plot[:,0] < 639) &
(pts2plot[:,1] > 0) & (pts2plot[:,1] < 639))
pts2plot = pts2plot[include_scatter].astype(int)
plotPoints(plotimg1, pts2plot[:,0], pts2plot[:,1], ((0,0),), (0.,0.,0.,1.))
ptsheight = pts.dot(calib_extrinsic[2,:3])+calib_extrinsic[2,3]
groundptangles = np.arctan2(pts[ptsheight < .1,1], pts[ptsheight < .1,0])
groundptangles = groundptangles.sort()
segments = segmentPoints(pts)
for segidx, seg in enumerate(segments):
segmiddle =
|
np.mean(seg,axis=0)
|
numpy.mean
|
from random import sample
import cv2
import gym
import numpy as np
from numpy.linalg import norm
from sortedcontainers import SortedDict
from growspace.defaults import (
DEFAULT_RES,
LIGHT_DIFFUSION,
FIRST_BRANCH_HEIGHT,
LIGHT_WIDTH,
EPSILON,
BRANCH_THICCNESS,
POINT_RADIUS,
LIGHT_COLOR,
POINT_COLOR,
PLANT_COLOR,
LIGHT_STEP,
MAX_GROW_DIST,
MIN_GROW_DIST,
BRANCH_LENGTH,
MAX_BRANCHES,
)
from growspace.utils import ir
class GrowSpaceSortedEnv(gym.Env):
def __init__(self, width=DEFAULT_RES, height=DEFAULT_RES, light_dif=LIGHT_DIFFUSION):
self.width = width
self.height = height
self.seed()
self.light_dif = light_dif
self.action_space = gym.spaces.Discrete(3) # L, R, keep of light paddle
self.observation_space = gym.spaces.Box(0, 255, shape=(height, width, 3), dtype=np.uint8)
self.steps = 0
# data format for branches: they are indexed/sorted by x_end position and each
# key has a list of values that are [y_end, x_start, y_start, children]
self.branches = SortedDict()
self.points = SortedDict()
def seed(self, seed=None):
return [np.random.seed(seed)]
def light_move_R(self):
if np.around(self.light_left, 1) >= 1 - LIGHT_WIDTH - LIGHT_STEP: # limit of coordinates
self.light_left = 1 - LIGHT_WIDTH # stay put
else:
self.light_left += LIGHT_STEP # move by .1 right
def light_move_L(self):
if np.around(self.light_left, 1) <= LIGHT_STEP: # limit of coordinates
self.light_left = 0
else:
self.light_left -= LIGHT_STEP # move by .1 left
def find_closest_branch(self, point_x, branches):
branch_names = []
branch_distances = []
# prefilter by x
if len(branches) > MAX_BRANCHES:
branches_trimmed = sample(branches, MAX_BRANCHES)
else:
branches_trimmed = branches
for branch in branches_trimmed:
dist_x = branch - point_x
if np.abs(dist_x) <= MAX_GROW_DIST:
# we got a potential candidate - now let's check Y
dist_y = self.branches[branch][0] - self.points[point_x]
if np.abs(dist_y) <= MAX_GROW_DIST:
dist = norm((dist_x, dist_y))
if dist <= MAX_GROW_DIST:
branch_names.append(branch)
branch_distances.append(dist)
if len(branch_distances) == 0:
return None, None
argmin =
|
np.argmin(branch_distances)
|
numpy.argmin
|
import numpy as np
import pytest
import matplotlib
matplotlib.use('agg')
from magnet.utils.statistical import find_outliers, smoothen
class TestRemoveOutlier:
def test_data_can_be_1d(self):
find_outliers(np.zeros(5))
def test_data_can_be_linear(self):
find_outliers(np.linspace(0, 5, 100))
def test_cannot_send_none(self):
with pytest.raises(Exception):
find_outliers(None)
def test_cannot_send_empty(self):
with pytest.raises(ValueError):
find_outliers([])
with pytest.raises(ValueError):
find_outliers(np.array([]))
def test_threshold_not_negative(self):
with pytest.raises(ValueError):
find_outliers(np.zeros(5), -1)
def test_threshold_not_none(self):
with pytest.raises(TypeError):
find_outliers(np.zeros(5), None)
def test_threshold_not_inf(self):
with pytest.raises(ValueError):
find_outliers(np.zeros(5), np.inf)
def test_window_fraction_is_fraction(self):
window_fraction = 2
with pytest.raises(ValueError):
find_outliers(
|
np.zeros(5)
|
numpy.zeros
|
#
"""
Useful python tools for working with the MIRI MRS.
This contains cdp8b specific code.
This version of the tools uses the JWST pipeline implementation
of the distortion solution to do the transformations,
and hooks into offline versions of the CRDS reference
files contained within this github repository.
Convert JWST v2,v3 locations (in arcsec) to MIRI MRS SCA x,y pixel locations.
Note that the pipeline uses a 0-indexed detector pixel (1032x1024) convention while
SIAF uses a 1-indexed detector pixel convention. The CDP files define
the origin such that (0,0) is the middle of the lower-left pixel
(1032x1024)- note that this is a CHANGE of convention from earlier CDP!
Author: <NAME> (<EMAIL>)
REVISION HISTORY:
10-Oct-2018 Written by <NAME> (<EMAIL>)
"""
import os as os
import numpy as np
import pdb as pdb
from astropy.modeling import models
from asdf import AsdfFile
from jwst import datamodels
from jwst.assign_wcs import miri
#############################
# Return the tools version
def version():
return 'cdp8b'
#############################
# Set the relevant CRDS distortion file based on channel (e.g., '1A')
def get_fitsreffile(channel):
rootdir=os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
rootdir=os.path.join(rootdir,'data/crds/')
wavefile=rootdir+'jwst_miri_mrs_wavelengthrange_cdp8b.asdf'
# Channel should be of the form (e.g.) '1A', '3C', etc
# See https://jwst-crds.stsci.edu//display_result/52cef902-ad77-4792-9964-d26a0a8a96a8
if ((channel is '1A')or(channel is '2A')):
distfile=rootdir+'jwst_miri_mrs12A_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs12A_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs12A_specwcs_cdp8b.asdf'
elif ((channel is '3A')or(channel is '4A')):
distfile=rootdir+'jwst_miri_mrs34A_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs34A_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs34A_specwcs_cdp8b.asdf'
elif ((channel is '1B')or(channel is '2B')):
distfile=rootdir+'jwst_miri_mrs12B_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs12B_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs12B_specwcs_cdp8b.asdf'
elif ((channel is '3B')or(channel is '4B')):
distfile=rootdir+'jwst_miri_mrs34B_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs34B_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs34B_specwcs_cdp8b.asdf'
elif ((channel is '1C')or(channel is '2C')):
distfile=rootdir+'jwst_miri_mrs12C_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs12C_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs12C_specwcs_cdp8b.asdf'
elif ((channel is '3C')or(channel is '4C')):
distfile=rootdir+'jwst_miri_mrs34C_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs34C_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs34C_specwcs_cdp8b.asdf'
else:
print('Failure!')
refs={'distortion': distfile, 'regions':regfile, 'specwcs':specfile, 'wavelengthrange':wavefile}
return refs
#############################
# Convenience function to turn '1A' type name into '12' and 'SHORT' type names
def bandchan(channel):
# Channel should be of the form (e.g.) '1A', '3C', etc
if ((channel is '1A')or(channel is '2A')):
newband='SHORT'
newchannel='12'
elif ((channel is '3A')or(channel is '4A')):
newband='SHORT'
newchannel='34'
elif ((channel is '1B')or(channel is '2B')):
newband='MEDIUM'
newchannel='12'
elif ((channel is '3B')or(channel is '4B')):
newband='MEDIUM'
newchannel='34'
elif ((channel is '1C')or(channel is '2C')):
newband='LONG'
newchannel='12'
elif ((channel is '3C')or(channel is '4C')):
newband='LONG'
newchannel='34'
else:
newband='FAIL'
newchannel='FAIL'
return newband,newchannel
#############################
# Convenience function to turn '12A' type name into '1A' and '2A' type names
def channel(detband):
if (detband == '12A'):
ch1='1A'
ch2='2A'
elif (detband == '12B'):
ch1='1B'
ch2='2B'
elif (detband == '12C'):
ch1='1C'
ch2='2C'
elif (detband == '34A'):
ch1='3A'
ch2='4A'
elif (detband == '34B'):
ch1='3B'
ch2='4B'
elif (detband == '34C'):
ch1='3C'
ch2='4C'
else:
ch1='FAIL'
ch2='FAIL'
return ch1,ch2
#############################
# Convenience function to return the rough middle wavelength of a given channel
# Note that this ISNT exact, just some valid value
def midwave(channel):
if (channel is '1A'):
thewave=5.32
elif (channel is '1B'):
thewave=6.145
elif (channel is '1C'):
thewave=7.09
elif (channel is '2A'):
thewave=8.135
elif (channel is '2B'):
thewave=9.395
elif (channel is '2C'):
thewave=10.85
elif (channel is '3A'):
thewave=12.505
elif (channel is '3B'):
thewave=14.5
elif (channel is '3C'):
thewave=16.745
elif (channel is '4A'):
thewave=19.29
elif (channel is '4B'):
thewave=22.47
elif (channel is '4C'):
thewave=26.2
return thewave
#############################
# Convenience function to return model distortion object
# for the x,y to alpha,beta,lam transform
def xytoablmodel(channel,**kwargs):
# Construct the reference data model in general JWST imager type
input_model = datamodels.ImageModel()
# Convert input of type '1A' into the band and channel that pipeline needs
theband,thechan=bandchan(channel)
# Set the filter in the data model meta header
input_model.meta.instrument.band = theband
input_model.meta.instrument.channel = thechan
# If passed input refs keyword, unpack and use it
if ('refs' in kwargs):
therefs=kwargs['refs']
# Otherwise use default reference files
else:
therefs=get_fitsreffile(channel)
distortion = miri.detector_to_abl(input_model, therefs)
# Return the distortion object that can then be queried
return distortion
#############################
# Convenience function to return model distortion object
# for the alpha,beta to v2,v3 transform
def abtov2v3model(channel,**kwargs):
# Construct the reference data model in general JWST imager type
input_model = datamodels.ImageModel()
# Convert input of type '1A' into the band and channel that pipeline needs
theband,thechan=bandchan(channel)
# Set the filter in the data model meta header
input_model.meta.instrument.band = theband
input_model.meta.instrument.channel = thechan
# If passed input refs keyword, unpack and use it
if ('refs' in kwargs):
therefs=kwargs['refs']
# Otherwise use default reference files
else:
therefs=get_fitsreffile(channel)
# The pipeline transform actually uses the triple
# (alpha,beta,lambda) -> (v2,v3,lambda)
basedistortion = miri.abl_to_v2v3l(input_model, therefs)
distortion = basedistortion
# Therefore we need to hack a reasonable wavelength onto our input, run transform,
# then hack it back off again
thewave=midwave(channel)
# Duplicate the beta value at first, then replace with wavelength value
map=models.Mapping((0,1,1)) | models.Identity(1) & models.Identity(1) & models.Const1D(thewave)
map.inverse=models.Mapping((0,1),n_inputs=3)
allmap= map | distortion | map.inverse
allmap.inverse= map | distortion.inverse | map.inverse
# Return the distortion object that can then be queried
return allmap
#############################
# MRS test reference data
# Provided by Polychronis 5/9/19
mrs_ref_data = {
'1A': {'x': np.array([76.0,354.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([0.05765538365149925,-0.017032619150995743]),
'beta': np.array([-0.17721014379699995,-1.240471006579]),
'lam': np.array([5.348546577257886,5.5136420569934925]),
'v2': np.array([-503.57285226785064,-503.4979806620663]),
'v3': np.array([-318.5749892859028,-317.5090073056335]),
},
'1B': {'x': np.array([76.0,355.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([-0.012990737471741731,0.10766447914943456]),
'beta': np.array([-0.17720417669099997,-1.240429236837]),
'lam': np.array([6.168310398808807,6.358007642348213]),
'v2': np.array([-503.643100332753,-503.37069816112813]),
'v3': np.array([-318.72773306477103,-317.6938248759762]),
},
'1C': {'x': np.array([78.0,356.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([0.02871804339196271,-0.028315822861031847]),
'beta': np.array([-0.17720218765499984,-1.240415313585]),
'lam': np.array([7.006608159574103,7.218455147089075]),
'v2': np.array([-503.5598371896608,-503.45975848303885]),
'v3': np.array([-318.4367657801553,-317.3779485524358]),
},
'2A': {'x': np.array([574.0,719.0]),
'y': np.array([512.0,700.0]),
's':
|
np.array([10,4])
|
numpy.array
|
#!/usr/bin/env python3
import sys
import numpy as np
from interfaceBuilder import utils as ut
def len2mat(vec, ang):
"""
Transforms cell lengths and angles to cell vectors.
vec in order [a, b, c]
ang in order [alpha, beta, gamma], (conventionally defined)
"""
"""Round it to have e.g. cos(90) be 0 and not XE-17"""
prec = 10
"""M = [A, B, C]"""
mat = np.zeros((3, 3))
"""A = [ax; 0; 0]"""
mat[0, 0] = vec[0]
"""B = [bx; by; 0]"""
mat[0, 1] = vec[1] * np.round(np.cos(np.deg2rad(ang[2])), prec)
mat[1, 1] = vec[1] * np.round(np.sin(np.deg2rad(ang[2])), prec)
"""C = [cx; cy; cz]"""
mat[0, 2] = vec[2] * np.round(np.cos(np.deg2rad(ang[1])), prec)
mat[1, 2] = vec[2] * np.round((np.cos(np.deg2rad(ang[0])) - \
np.cos(np.deg2rad(ang[2])) * \
np.cos(np.deg2rad(ang[1]))) / \
np.sin(np.deg2rad(ang[2])), prec)
mat[2, 2] = np.round(np.sqrt(vec[2]**2 - mat[0, 2]**2 - mat[1, 2]**2), prec)
return mat
def mat2LammpsBox(mat):
"""Function for transforming a set of basis vectors to
a lammps simulation box"""
lx = mat[0, 0]
ly = mat[1, 1]
lz = mat[2, 2]
xy = mat[0, 1]
xz = mat[0, 2]
yz = mat[1, 2]
x_lo_b = np.min([0, lx]) + np.min([0, xy, xz, xy + xz])
x_hi_b = np.max([0, lx]) + np.max([0, xy, xz, xy + xz])
y_lo_b = np.min([0, ly]) + np.min([0, yz])
y_hi_b = np.max([0, ly]) + np.max([0, yz])
z_lo_b = np.min([0, lz])
z_hi_b = np.max([0, lz])
box = [x_lo_b, x_hi_b, y_lo_b, y_hi_b, z_lo_b, z_hi_b]
return box
def writeKPTS(cell, density = 2, N1 = None, N2 = None, N3 = None, version = "Gamma",\
S1 = 0, S2 = 0, S3 = 0, verbose = 1):
"""Function to write a vasp KPOINTS file based on cell parameters"""
"""Calculate the x-y area of the cell"""
area = np.linalg.norm(np.cross(cell[:, 0], cell[:, 1]))
"""Forms a vasp reciprical cell, without 2*pi and not Transposed
vs. the deafult cartesian cell used in the structure class"""
r_cell = np.linalg.inv(cell)
r_area = np.linalg.norm(np.cross(r_cell[0, :], r_cell[1, :]))
total_kpts = np.ceil(r_area * density)
"""The rows make up the lattice vectors"""
r_norm = np.linalg.norm(r_cell, axis = 1)
if version.lower()[0] == "g":
cmp = 0
elif version.lower()[0] == "m":
cmp = 1
if N1 is None:
N1 = np.ceil(r_norm[0] * density)
if N1 % 2 == cmp:
N1 += 1
if N2 is None:
N2 = np.ceil(r_norm[1] * density)
if N2 % 2 == cmp:
N2 += 1
if N3 is None:
N3 = np.ceil(r_norm[2] * density)
if N3 % 2 == cmp:
N3 += 1
if verbose > 0:
string = "Writing kpts as %s: %i %i %i" % (version, N1, N2, N3)
ut.infoPrint(string)
with open("KPOINTS", 'w') as k:
"""Comment line"""
k.write("Generated using file_io\n")
"""Auto generation"""
k.write("%i\n" % 0)
"""Type"""
k.write("%s\n" % version)
"""Write KPOINTS"""
k.write("%2i %2i %2i\n" % (N1, N2, N3))
"""Write shift"""
k.write("%2i %2i %2i\n" % (S1, S2, S3))
def readEON(filename, verbose = 1):
"""Load EON geometry file"""
if verbose > 0:
string = "Reading file: %s, format: EON" % filename
ut.infoPrint(string)
"""Lines to skip"""
skip = [0, 1, 4, 5, 10]
"""Open and read the file"""
with open(filename, 'r') as f:
for i, line in enumerate(f):
if i in skip:
continue
if i == 2:
"""Read cell lengths"""
vec = np.array([np.float(x) for x in line.split()])
elif i == 3:
"""Angles in order alpha (yz), beta (xz), gamma (xy)"""
ang = np.array([np.float(x) for x in line.split()[::-1]])
elif i == 6:
"""Nr of diffefrent species"""
M = np.int(line)
elif i == 7:
"""Number of each species"""
N = np.array([np.int(x) for x in line.split()], dtype = np.int)
Nt = N.cumsum()
n = 0; nt = 0
pos = np.zeros((Nt[-1], 3))
idx = np.zeros(Nt[-1])
mass = np.ones(Nt[-1])
elif i == 8:
"""Mass"""
masses = np.array([np.float(x) for x in line.split()])
elif i == 9:
"""Atom type of species 1"""
t = np.chararray(Nt[-1], itemsize = 2)
t[:Nt[nt]] = line
mass[:Nt[nt]] = masses[nt]
elif i > 10:
l = line.split()
if len(l) == 1:
t[Nt[nt] : Nt[nt + 1]] = line
mass[Nt[nt] : Nt[nt + 1]] = masses[nt + 1]
nt += 1
elif len(l) == 4:
continue
else:
pos[n, :] = [np.float(x) for x in l[:3]]
idx[n] = np.float(l[4])
n += 1
"""Convert lengths and angles to cell vectors"""
mat = len2mat(vec, ang)
return mat, pos, t, idx, mass
def writeEON(filename, atoms, sd = False, verbose = 1):
"""Write EON pos.con file
sd = bool, use atoms.frozen parameter to freeze atoms
which have any of the 3 dimentions specified as frozen
"""
if verbose > 0:
string = "Writing file: %s, format: EON" % filename
ut.infoPrint(string)
"""Change to cartesian coordinates"""
atoms.dir2car()
"""Get cell lengths from the lattice vectors"""
a = np.linalg.norm(atoms.cell[:, 0])
b = np.linalg.norm(atoms.cell[:, 1])
c = np.linalg.norm(atoms.cell[:, 2])
"""Get the cell angles (in degrees) from the lattice vectors"""
alpha = np.rad2deg(np.arccos(
|
np.dot(atoms.cell[:, 1], atoms.cell[:, 2])
|
numpy.dot
|
import numpy as np
import sys
import os
import pickle
from datasets.gigadataset import GigaDataset, GigaDataset_gpu
import torch
def param_size(model):
""" Compute parameter size in MB """
n_params = sum(
np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head'))
return n_params / 1024. / 1024.
def blockPrint():
sys.stdout = open(os.devnull, 'w')
def enablePrint():
sys.stdout = sys.__stdout__
def get_data_eeg(args,fold_idx):
seed = args.seed
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
x_data = np.load(args.data_root + '/x_data_raw.npy', mmap_mode='r')
y_data = np.load(args.data_root + '/y_data_raw.npy')
in_chans = x_data.shape[2]
input_time_length = x_data.shape[3]
x_data = x_data.reshape(108, -1, in_chans, input_time_length)
y_data = y_data.reshape(108, -1)
args.n_class = len(np.unique(y_data))
args.n_ch = x_data.shape[2]
args.n_time = x_data.shape[3]
datasets = []
for s_id in range(0, 108):
datasets.append(GigaDataset([
|
np.expand_dims(x_data[s_id, :, :, :], axis=1)
|
numpy.expand_dims
|
#!/usr/bin/env /usr/bin/python3
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
import argparse
import pickle
from scipy.optimize import curve_fit
################################################################################
#===============================================================================
# analyse_spherical_eigen.py
#===============================================================================
################################################################################
def F(x, x0, a, b):
return np.where( (x-x0) <= b/a, 0, a-b/(x-x0) )
def plot_data (data_dir = Path('../sphere_eigenvalues'),
output_file = Path('../plots/flat_eigen_plot.svg'),
return_ax = False):
fig, ax = plt.subplots(1)
n_params =
|
np.array([24, 32, 48, 64, 128])
|
numpy.array
|
# This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the orbital.py PyRate module.
"""
import os
import shutil
import tempfile
import unittest
from itertools import product
from numpy import empty, dot, concatenate, float32
from numpy import nan, isnan, array
from os.path import join
import numpy as np
from numpy.linalg import pinv, inv
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.linalg import lstsq
from .common import small5_mock_ifgs, MockIfg
from pyrate.core import algorithm, config as cf
from pyrate.core.orbital import INDEPENDENT_METHOD, NETWORK_METHOD, PLANAR, \
QUADRATIC, PART_CUBIC
from pyrate.core.orbital import OrbitalError, _orbital_correction
from pyrate.core.orbital import get_design_matrix, get_network_design_matrix
from pyrate.core.orbital import _get_num_params, remove_orbital_error
from pyrate.core.shared import Ifg
from pyrate.core.shared import nanmedian
from tests.common import TEST_CONF_ROIPAC, IFMS16
from tests.common import SML_TEST_LEGACY_ORBITAL_DIR
from tests.common import SML_TEST_TIF, small_data_setup
from tests.common import small_ifg_file_list
#TODO: Purpose of this variable? Degrees are 1, 2 and 3 respectively
DEG_LOOKUP = {
2: PLANAR,
5: QUADRATIC,
6: PART_CUBIC}
NUM_COEF_LOOKUP = {
PLANAR: 2,
QUADRATIC: 5,
PART_CUBIC: 6}
class SingleDesignMatrixTests(unittest.TestCase):
"""
Tests to verify correctness of basic planar & quadratic design matrices or
DMs. This class serves two purposes, ensuring the independent method DMs are
produced correctly. Secondly, these indivdual DMs are subsets of the larger
DM 'grid' required for the networked orbital correction method.
"""
def setUp(self):
# faked cell sizes
self.xs = 0.75
self.ys = 0.8
self.ifg = Ifg(join(SML_TEST_TIF, 'geo_060619-061002_unw.tif'))
self.ifg.open()
self.ifg.nodata_value = 0
self.m = MockIfg(self.ifg, 3, 4)
self.m.x_size = self.xs
self.m.y_size = self.ys
# tests for planar model
def test_create_planar_dm(self):
offset = False
act = get_design_matrix(self.m, PLANAR, offset)
self.assertEqual(act.shape, (self.m.num_cells, 2))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, PLANAR, offset)
assert_array_equal(act, exp)
def test_create_planar_dm_offsets(self):
offset = True
act = get_design_matrix(self.m, PLANAR, offset)
self.assertEqual(act.shape, (self.m.num_cells, 3))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, PLANAR, offset)
assert_array_almost_equal(act, exp)
# tests for quadratic model
def test_create_quadratic_dm(self):
offset = False
act = get_design_matrix(self.m, QUADRATIC, offset)
self.assertEqual(act.shape, (self.m.num_cells, 5))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, QUADRATIC, offset)
assert_array_equal(act, exp)
def test_create_quadratic_dm_offsets(self):
offset = True
act = get_design_matrix(self.m, QUADRATIC, offset)
self.assertEqual(act.shape, (self.m.num_cells, 6))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, QUADRATIC, offset)
assert_array_equal(act, exp)
# tests for partial cubic model
def test_create_partcubic_dm(self):
offset = False
act = get_design_matrix(self.m, PART_CUBIC, offset)
self.assertEqual(act.shape, (self.m.num_cells, 6))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, PART_CUBIC, offset)
assert_array_equal(act, exp)
def test_create_partcubic_dm_offsets(self):
offset = True
act = get_design_matrix(self.m, PART_CUBIC, offset)
self.assertEqual(act.shape, (self.m.num_cells, 7))
exp = unittest_dm(self.m, INDEPENDENT_METHOD, PART_CUBIC, offset)
assert_array_equal(act, exp)
# tests for unittest_dm() assuming network method
def test_create_planar_dm_network(self):
# networked method planar version should not have offsets col
ncol_exp = 2
exp = unittest_dm(self.m, NETWORK_METHOD, PLANAR, False)
self.assertEqual(exp.shape, (self.m.num_cells, ncol_exp))
exp2 = unittest_dm(self.m, NETWORK_METHOD, PLANAR, True)
self.assertEqual(exp2.shape, (self.m.num_cells, ncol_exp))
assert_array_equal(exp, exp2)
def test_create_quadratic_dm_network(self):
# quadratic version with networked method does not have offsets col
ncol_exp = 5
exp = unittest_dm(self.m, NETWORK_METHOD, QUADRATIC, False)
self.assertEqual(exp.shape, (self.m.num_cells, ncol_exp))
exp2 = unittest_dm(self.m, NETWORK_METHOD, QUADRATIC, True)
self.assertEqual(exp2.shape, (self.m.num_cells, ncol_exp))
assert_array_equal(exp, exp2)
def test_create_partcubic_dm_network(self):
# partial cubic version with networked method does not have offsets col
ncol_exp = 6
exp = unittest_dm(self.m, NETWORK_METHOD, PART_CUBIC, False)
self.assertEqual(exp.shape, (self.m.num_cells, ncol_exp))
exp2 = unittest_dm(self.m, NETWORK_METHOD, PART_CUBIC, True)
self.assertEqual(exp2.shape, (self.m.num_cells, ncol_exp))
assert_array_equal(exp, exp2)
class IndependentCorrectionTests(unittest.TestCase):
"""Test cases for the orbital correction component of PyRate."""
def setUp(self):
self.ifgs = small5_mock_ifgs()
_add_nodata(self.ifgs)
for ifg in self.ifgs:
ifg.x_size = 90.0
ifg.y_size = 89.5
ifg.open()
def alt_orbital_correction(self, ifg, deg, offset):
data = ifg.phase_data.reshape(ifg.num_cells)
dm = get_design_matrix(ifg, deg, offset)[~isnan(data)]
fd = data[~isnan(data)].reshape((dm.shape[0], 1))
dmt = dm.T
invNbb = inv(dmt.dot(dm))
orbparams = invNbb.dot(dmt.dot(fd))
alt_params = lstsq(dm, fd)[0]
# FIXME: precision
assert_array_almost_equal(orbparams, alt_params, decimal=2)
dm2 = get_design_matrix(ifg, deg, offset)
if offset:
fullorb = np.reshape(np.dot(dm2[:, :-1], orbparams[:-1]),
ifg.phase_data.shape)
else:
fullorb = np.reshape(np.dot(dm2, orbparams), ifg.phase_data.shape)
offset_removal = nanmedian(
np.reshape(ifg.phase_data - fullorb, (1, -1)))
fwd_correction = fullorb - offset_removal
# ifg.phase_data -= (fullorb - offset_removal)
return ifg.phase_data - fwd_correction
def check_correction(self, degree, method, offset, decimal=2):
orig = array([c.phase_data.copy() for c in self.ifgs])
exp = [self.alt_orbital_correction(i, degree, offset) for i in self.ifgs]
params = dict()
params[cf.ORBITAL_FIT_METHOD] = method
params[cf.ORBITAL_FIT_DEGREE] = degree
params[cf.PARALLEL] = False
params[cf.NO_DATA_VALUE] = 0
params[cf.NAN_CONVERSION] = False
for i in self.ifgs:
i.mm_converted = True
_orbital_correction(self.ifgs, params, None, offset)
corrected = array([c.phase_data for c in self.ifgs])
self.assertFalse((orig == corrected).all())
self.check_results(self.ifgs, orig) # test shape, data is non zero
# FIXME: is decimal=2 close enough?
for i, (e, a) in enumerate(zip(exp, corrected)):
assert_array_almost_equal(e, a, decimal=decimal)
def check_results(self, ifgs, corrections):
"""Helper method for result verification"""
for i, c in zip(ifgs, corrections):
ys, xs = c.shape
self.assertEqual(i.nrows, ys)
self.assertEqual(i.ncols, xs)
# ensure there is real data
self.assertFalse(isnan(i.phase_data).all())
self.assertFalse(isnan(c).all())
self.assertTrue(c.ptp() != 0) # ensure range of values in grid
def test_independent_correction_planar(self):
self.check_correction(PLANAR, INDEPENDENT_METHOD, False)
def test_independent_correction_planar_offsets(self):
self.check_correction(PLANAR, INDEPENDENT_METHOD, True)
def test_independent_correction_quadratic(self):
self.check_correction(QUADRATIC, INDEPENDENT_METHOD, False)
def test_independent_correction_quadratic_offsets(self):
self.check_correction(QUADRATIC, INDEPENDENT_METHOD, True)
def test_independent_correction_partcubic(self):
self.check_correction(PART_CUBIC, INDEPENDENT_METHOD, False)
def test_independent_correction_partcubic_offsets(self):
self.check_correction(PART_CUBIC, INDEPENDENT_METHOD, True, decimal=1)
class ErrorTests(unittest.TestCase):
"""Tests for the networked correction method"""
def test_invalid_ifgs_arg(self):
# min requirement is 1 ifg, can still subtract one epoch from the other
self.assertRaises(OrbitalError, get_network_design_matrix, [], PLANAR, True)
def test_invalid_degree_arg(self):
# test failure of a few different args for 'degree'
ifgs = small5_mock_ifgs()
for d in range(-5, 1):
self.assertRaises(OrbitalError, get_network_design_matrix, ifgs, d, True)
for d in range(4, 7):
self.assertRaises(OrbitalError, get_network_design_matrix, ifgs, d, True)
def test_invalid_method(self):
# test failure of a few different args for 'method'
ifgs = small5_mock_ifgs()
params = dict()
params[cf.ORBITAL_FIT_DEGREE] = PLANAR
params[cf.PARALLEL] = False
for m in [None, 5, -1, -3, 45.8]:
params[cf.ORBITAL_FIT_METHOD] = m
self.assertRaises(OrbitalError, _orbital_correction, ifgs, params, None)
def test_multilooked_ifgs_arg(self):
# check some bad args for network method with multilooked ifgs
ifgs = small5_mock_ifgs()
args = [[None, None, None, None, None], ["X"] * 5]
params = dict()
params[cf.ORBITAL_FIT_METHOD] = NETWORK_METHOD
params[cf.PARALLEL] = False
params[cf.ORBITAL_FIT_DEGREE] = PLANAR
for a in args:
args = (ifgs, params, a)
self.assertRaises(OrbitalError, _orbital_correction, *args)
# ensure failure if # ifgs doesn't match # mlooked ifgs
args = (ifgs, params, ifgs[:4])
self.assertRaises(OrbitalError, _orbital_correction, *args)
class NetworkDesignMatrixTests(unittest.TestCase):
"""Contains tests verifying creation of sparse network design matrix."""
def setUp(self):
self.ifgs = small5_mock_ifgs()
_add_nodata(self.ifgs)
self.nifgs = len(self.ifgs)
self.ncells = self.ifgs[0].num_cells
self.date_ids = get_date_ids(self.ifgs)
self.nepochs = len(self.date_ids)
assert self.nepochs == 6
for ifg in self.ifgs:
ifg.X_SIZE = 90.0
ifg.Y_SIZE = 89.5
def test_planar_network_dm(self):
ncoef = 2
offset = False
act = get_network_design_matrix(self.ifgs, PLANAR, offset)
self.assertEqual(act.shape, (self.ncells * self.nifgs, ncoef * self.nepochs))
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_planar_network_dm_offset(self):
ncoef = 2 # NB: doesn't include offset col
offset = True
act = get_network_design_matrix(self.ifgs, PLANAR, offset)
self.assertEqual(act.shape[0], self.ncells * self.nifgs)
self.assertEqual(act.shape[1], (self.nepochs * ncoef) + self.nifgs)
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_quadratic_network_dm(self):
ncoef = 5
offset = False
act = get_network_design_matrix(self.ifgs, QUADRATIC, offset)
self.assertEqual(act.shape, (self.ncells * self.nifgs, ncoef * self.nepochs))
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_quadratic_network_dm_offset(self):
ncoef = 5
offset = True
act = get_network_design_matrix(self.ifgs, QUADRATIC, offset)
self.assertEqual(act.shape[0], self.ncells * self.nifgs)
self.assertEqual(act.shape[1], (self.nepochs * ncoef) + self.nifgs)
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_partcubic_network_dm(self):
ncoef = 6
offset = False
act = get_network_design_matrix(self.ifgs, PART_CUBIC, offset)
self.assertEqual(act.shape, (self.ncells * self.nifgs, ncoef * self.nepochs))
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def test_partcubic_network_dm_offset(self):
ncoef = 6
offset = True
act = get_network_design_matrix(self.ifgs, PART_CUBIC, offset)
self.assertEqual(act.shape[0], self.ncells * self.nifgs)
self.assertEqual(act.shape[1], (self.nepochs * ncoef) + self.nifgs)
self.assertNotEqual(act.ptp(), 0)
self.check_equality(ncoef, act, self.ifgs, offset)
def check_equality(self, ncoef, dm, ifgs, offset):
"""
Internal test function to check subsets against network design matrix
ncoef - base number of coefficients, without extra col for offsets
dm - network design matrix to check the results
ifgs - sequence of Ifg objs
offset - boolean to include extra parameters for model offsets
"""
deg = DEG_LOOKUP[ncoef]
np = ncoef * self.nepochs # index of 1st offset col
for i, ifg in enumerate(ifgs):
exp = unittest_dm(ifg, NETWORK_METHOD, deg, offset)
self.assertEqual(exp.shape, (ifg.num_cells, ncoef))
ib1, ib2 = [x * self.ncells for x in (i, i+1)] # row start/end
jbm = ncoef * self.date_ids[ifg.master] # starting col index for master
jbs = ncoef * self.date_ids[ifg.slave] # col start for slave
assert_array_almost_equal(-exp, dm[ib1:ib2, jbm:jbm+ncoef])
assert_array_almost_equal( exp, dm[ib1:ib2, jbs:jbs+ncoef])
# ensure remaining rows/cols are zero for this ifg NOT inc offsets
assert_array_equal(0, dm[ib1:ib2, :jbm]) # all cols leading up to master
assert_array_equal(0, dm[ib1:ib2, jbm + ncoef:jbs]) # cols btwn mas/slv
assert_array_equal(0, dm[ib1:ib2, jbs + ncoef:np]) # to end of non offsets
# check offset cols for 1s and 0s
if offset is True:
ip1 = i + np # offset column index
assert_array_equal(1, dm[ib1:ib2, ip1])
assert_array_equal(0, dm[ib1:ib2, np:ip1]) # cols before offset col
assert_array_equal(0, dm[ib1:ib2, ip1 + 1:]) # cols after offset col
# components for network correction testing
def network_correction(ifgs, deg, off, ml_ifgs=None, tol=1e-6):
"""
Compares results of orbital_correction() to alternate implementation.
deg - PLANAR, QUADRATIC or PART_CUBIC
off - True/False to calculate correction with offsets
"""
ncells = ifgs[0].num_cells
if ml_ifgs:
ml_nc = ml_ifgs[0].num_cells
ml_data = concatenate([i.phase_data.reshape(ml_nc) for i in ml_ifgs])
dm = get_network_design_matrix(ml_ifgs, deg, off)[~isnan(ml_data)]
fd = ml_data[~isnan(ml_data)].reshape((dm.shape[0], 1))
else:
data = concatenate([i.phase_data.reshape(ncells) for i in ifgs])
dm = get_network_design_matrix(ifgs, deg, off)[~isnan(data)]
fd = data[~isnan(data)].reshape((dm.shape[0], 1))
params = pinv(dm, tol).dot(fd)
assert params.shape == (dm.shape[1], 1)
# calculate forward correction
sdm = unittest_dm(ifgs[0], NETWORK_METHOD, deg)
ncoef = _get_num_params(deg, offset=False) # NB: ignore offsets for network method
assert sdm.shape == (ncells, ncoef)
orbs = _expand_corrections(ifgs, sdm, params, ncoef, off)
# tricky: get expected result before orbital_correction() modifies ifg phase
return [i.phase_data - orb for i, orb in zip(ifgs, orbs)]
def _expand_corrections(ifgs, dm, params, ncoef, offsets):
"""
Convenience func returns model converted to data points.
dm: design matrix (do not filter/remove nan cells)
params: model parameters array from pinv() * dm
ncoef: number of model coefficients (2 planar, 5 quadratic)
offsets: True/False to calculate correction with offsets
"""
# NB: cannot work on singular ifgs due to date ID id/indexing requirement
date_ids = get_date_ids(ifgs)
corrections = []
for ifg in ifgs:
jbm = date_ids[ifg.master] * ncoef # starting row index for master
jbs = date_ids[ifg.slave] * ncoef # row start for slave
par = params[jbs:jbs + ncoef] - params[jbm:jbm + ncoef]
# estimate orbital correction effects
# corresponds to "fullorb = B*parm + offset" in orbfwd.m
cor = dm.dot(par).reshape(ifg.phase_data.shape)
if offsets:
off = np.ravel(ifg.phase_data - cor)
# bring all ifgs to same base level
cor -= nanmedian(off)
corrections.append(cor)
return corrections
class NetworkCorrectionTests(unittest.TestCase):
"""Verifies orbital correction using network method and no multilooking"""
def setUp(self):
# fake some real ifg data by adding nans
self.ifgs = small5_mock_ifgs()
_add_nodata(self.ifgs)
# use different sizes to differentiate axes results
for ifg in self.ifgs:
ifg.X_SIZE = 90.0
ifg.Y_SIZE = 89.5
self.nc_tol = 1e-6
def test_offset_inversion(self):
"""
Ensure pinv(DM)*obs gives equal results given constant change to fd
"""
def get_orbital_params():
"""Returns pseudo-inverse of the DM"""
ncells = self.ifgs[0].num_cells
data = concatenate([i.phase_data.reshape(ncells) for i in self.ifgs])
dm = get_network_design_matrix(self.ifgs, PLANAR, True)[~isnan(data)]
fd = data[~isnan(data)].reshape((dm.shape[0], 1))
return dot(
|
pinv(dm, self.nc_tol)
|
numpy.linalg.pinv
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
""" Base and mixin classes for nearest neighbors.
Adapted from scikit-learn codebase at
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/base.py.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Sparseness support by <NAME>
# Multi-output support by <NAME> <<EMAIL>>
# Hubness reduction and approximate nearest neighbor support by <NAME> <<EMAIL>>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from functools import partial
import warnings
import numpy as np
from scipy.sparse import issparse, csr_matrix
from sklearn.exceptions import DataConversionWarning
from sklearn.neighbors.base import NeighborsBase as SklearnNeighborsBase
from sklearn.neighbors.base import KNeighborsMixin as SklearnKNeighborsMixin
from sklearn.neighbors.base import RadiusNeighborsMixin as SklearnRadiusNeighborsMixin
from sklearn.neighbors.base import UnsupervisedMixin, SupervisedFloatMixin
from sklearn.neighbors.base import _tree_query_radius_parallel_helper
from sklearn.neighbors.ball_tree import BallTree
from sklearn.neighbors.kd_tree import KDTree
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS, pairwise_distances_chunked
from sklearn.utils import check_array, gen_even_slices
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_is_fitted, check_X_y
from joblib import Parallel, delayed, effective_n_jobs
from tqdm.auto import tqdm
from .approximate_neighbors import ApproximateNearestNeighbor, UnavailableANN
from .hnsw import HNSW
from .lsh import FalconnLSH
from .lsh import PuffinnLSH
from .nng import NNG
from .random_projection_trees import RandomProjectionTree
from ..reduction import NoHubnessReduction, LocalScaling, MutualProximity, DisSimLocal
__all__ = ['KNeighborsMixin', 'NeighborsBase', 'RadiusNeighborsMixin',
'SupervisedFloatMixin', 'SupervisedIntegerMixin', 'UnsupervisedMixin',
'VALID_METRICS', 'VALID_METRICS_SPARSE',
]
VALID_METRICS = dict(lsh=PuffinnLSH.valid_metrics if not issubclass(PuffinnLSH, UnavailableANN) else [],
falconn_lsh=FalconnLSH.valid_metrics if not issubclass(FalconnLSH, UnavailableANN) else [],
nng=NNG.valid_metrics if not issubclass(NNG, UnavailableANN) else [],
hnsw=HNSW.valid_metrics,
rptree=RandomProjectionTree.valid_metrics,
ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(lsh=[],
falconn_lsh=[],
nng=[],
hnsw=[],
rptree=[],
ball_tree=[],
kd_tree=[],
brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys()
- {'haversine'}),
)
ALG_WITHOUT_RADIUS_QUERY = ('hnsw', 'lsh', 'rptree', 'nng', )
EXACT_ALG = ('brute', 'kd_tree', 'ball_tree', )
ANN_ALG = ('hnsw', 'lsh', 'falconn_lsh', 'rptree', 'nng', )
ANN_CLS = (HNSW, FalconnLSH, PuffinnLSH, NNG, RandomProjectionTree, )
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
----------
dist : ndarray
The input distances
weights : {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
-------
weights_arr : array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(SklearnNeighborsBase):
"""Base class for nearest neighbors estimators."""
def __init__(self, n_neighbors=None, radius=None,
algorithm='auto', algorithm_params: dict = None,
hubness: str = None, hubness_params: dict = None,
leaf_size=30, metric='minkowski', p=2, metric_params=None,
n_jobs=None, verbose: int = 0, **kwargs):
super().__init__(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
n_jobs=n_jobs)
if algorithm_params is None:
n_candidates = 1 if hubness is None else 100
algorithm_params = {'n_candidates': n_candidates,
'metric': metric}
if n_jobs is not None and 'n_jobs' not in algorithm_params:
algorithm_params['n_jobs'] = self.n_jobs
if 'verbose' not in algorithm_params:
algorithm_params['verbose'] = verbose
hubness_params = hubness_params if hubness_params is not None else {}
if 'verbose' not in hubness_params:
hubness_params['verbose'] = verbose
self.algorithm_params = algorithm_params
self.hubness_params = hubness_params
self.hubness = hubness
self.verbose = verbose
self.kwargs = kwargs
def _check_hubness_algorithm(self):
if self.hubness not in ['mp', 'mutual_proximity',
'ls', 'local_scaling',
'dsl', 'dis_sim_local',
None]:
raise ValueError(f'Unrecognized hubness algorithm: {self.hubness}')
# Users are allowed to use various identifiers for the algorithms,
# but here we normalize them to the short abbreviations used downstream
if self.hubness in ['mp', 'mutual_proximity']:
self.hubness = 'mp'
elif self.hubness in ['ls', 'local_scaling']:
self.hubness = 'ls'
elif self.hubness in ['dsl', 'dis_sim_local']:
self.hubness = 'dsl'
elif self.hubness is None:
pass
else:
raise ValueError(f'Internal error: unknown hubness algorithm: {self.hubness}')
def _check_algorithm_metric(self):
if self.algorithm not in ['auto', *EXACT_ALG, *ANN_ALG]:
raise ValueError("unrecognized algorithm: '%s'" % self.algorithm)
if self.algorithm == 'auto':
if self.metric == 'precomputed':
alg_check = 'brute'
elif (callable(self.metric) or
self.metric in VALID_METRICS['ball_tree']):
alg_check = 'ball_tree'
else:
alg_check = 'brute'
else:
alg_check = self.algorithm
if callable(self.metric):
if self.algorithm in ['kd_tree', *ANN_ALG]:
# callable metric is only valid for brute force and ball_tree
raise ValueError(f"{self.algorithm} algorithm does not support callable metric '{self.metric}'")
elif self.metric not in VALID_METRICS[alg_check]:
raise ValueError(f"Metric '{self.metric}' not valid. Use "
f"sorted(skhubness.neighbors.VALID_METRICS['{alg_check}']) "
f"to get valid options. "
f"Metric can also be a callable function.")
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = self.metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p <= 0:
raise ValueError("p must be greater than zero for minkowski metric")
def _check_algorithm_hubness_compatibility(self):
if self.hubness == 'dsl':
if self.metric in ['euclidean', 'minkowski']:
self.metric = 'euclidean' # DSL input must still be squared Euclidean
self.hubness_params['squared'] = False
if self.p != 2:
warnings.warn(f'DisSimLocal only supports squared Euclidean distances: Ignoring p={self.p}.')
elif self.metric in ['sqeuclidean']:
self.hubness_params['squared'] = True
else:
warnings.warn(f'DisSimLocal only supports squared Euclidean distances: Ignoring metric={self.metric}.')
self.metric = 'euclidean'
self.hubness_params['squared'] = True
def _set_hubness_reduction(self, X):
if self._hubness_reduction_method is None:
self._hubness_reduction = NoHubnessReduction()
else:
n_candidates = self.algorithm_params['n_candidates']
if 'include_self' in self.kwargs and self.kwargs['include_self']:
neigh_train = self.kcandidates(X, n_neighbors=n_candidates, return_distance=True)
else:
neigh_train = self.kcandidates(n_neighbors=n_candidates, return_distance=True)
# Remove self distances
neigh_dist_train = neigh_train[0] # [:, 1:]
neigh_ind_train = neigh_train[1] # [:, 1:]
if self._hubness_reduction_method == 'ls':
self._hubness_reduction = LocalScaling(**self.hubness_params)
elif self._hubness_reduction_method == 'mp':
self._hubness_reduction = MutualProximity(**self.hubness_params)
elif self._hubness_reduction_method == 'dsl':
self._hubness_reduction = DisSimLocal(**self.hubness_params)
else:
raise ValueError(f'Hubness reduction algorithm = "{self._hubness_reduction_method}" not recognized.')
self._hubness_reduction.fit(neigh_dist_train, neigh_ind_train, X=X, assume_sorted=False)
def _fit(self, X):
self._check_algorithm_metric()
self._check_hubness_algorithm()
self._check_algorithm_hubness_compatibility()
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p <= 0:
raise ValueError(f"p must be greater than one for minkowski metric, "
f"or in ]0, 1[ for fractional norms.")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
self._index = X._index
self._hubness_reduction = X._hubness_reduction
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
elif isinstance(X, ApproximateNearestNeighbor):
self._tree = None
if isinstance(X, PuffinnLSH):
self._fit_X = np.array([X.index_.get(i) for i in range(X.n_indexed_)]) * X.X_indexed_norm_
self._fit_method = 'lsh'
elif isinstance(X, FalconnLSH):
self._fit_X = X.X_train_
self._fit_method = 'falconn_lsh'
elif isinstance(X, NNG):
self._fit_X = None
self._fit_method = 'nng'
elif isinstance(X, HNSW):
self._fit_X = None
self._fit_method = 'hnsw'
elif isinstance(X, RandomProjectionTree):
self._fit_X = None
self._fit_method = 'rptree'
self._index = X
# TODO enable hubness reduction here.
# We do not store X_train in all cases atm.
# self._hubness_reduction_method = self.hubness
# self._set_hubness_reduction(self._fit_X)
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError(f"n_samples must be greater than 0 (but was {n_samples}.")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute'] \
and not callable(self.effective_metric_):
raise ValueError(f"Metric '{self.effective_metric_}' not valid for sparse input. "
f"Use sorted(sklearn.neighbors.VALID_METRICS_SPARSE['brute']) "
f"to get valid options. Metric can also be a callable function.")
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
if self.hubness is not None:
warnings.warn(f'cannot use hubness reduction with sparse data: disabling hubness reduction.')
self.hubness = None
self._hubness_reduction_method = None
self._hubness_reduction = NoHubnessReduction()
return self
self._fit_method = self.algorithm
self._fit_X = X
self._hubness_reduction_method = self.hubness
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
elif (callable(self.effective_metric_) or
self.effective_metric_ in VALID_METRICS['ball_tree']):
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
else:
self._fit_method = 'brute'
self._index = None
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
self._index = None
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
self._index = None
elif self._fit_method == 'brute':
self._tree = None
self._index = None
elif self._fit_method == 'lsh':
self._index = PuffinnLSH(**self.algorithm_params)
self._index.fit(X)
self._tree = None
elif self._fit_method == 'falconn_lsh':
self._index = FalconnLSH(**self.algorithm_params)
self._index.fit(X)
self._tree = None
elif self._fit_method == 'nng':
self._index = NNG(**self.algorithm_params)
self._index.fit(X)
self._tree = None
elif self._fit_method == 'hnsw':
self._index = HNSW(**self.algorithm_params)
self._index.fit(X)
self._tree = None
elif self._fit_method == 'rptree':
self._index = RandomProjectionTree(**self.algorithm_params)
self._index.fit(X)
self._tree = None # because it's a tree, but not an sklearn tree...
else:
raise ValueError(f"algorithm = '{self.algorithm}' not recognized")
# Fit hubness reduction method
self._set_hubness_reduction(X)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(f"Expected n_neighbors > 0. Got {self.n_neighbors:d}")
else:
if not np.issubdtype(type(self.n_neighbors), np.integer):
raise TypeError(
f"n_neighbors does not take {type(self.n_neighbors)} value, "
f"enter integer value"
)
return self
def kcandidates(self, X=None, n_neighbors=None, return_distance=True) -> np.ndarray or (np.ndarray, np.ndarray):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from skhubness.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[0.5]]), array([[2]]))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
check_is_fitted(self, "_fit_method")
if n_neighbors is None:
try:
n_neighbors = self.algorithm_params['n_candidates']
except KeyError:
n_neighbors = 1 if self.hubness is None else 100
elif n_neighbors <= 0:
raise ValueError(f"Expected n_neighbors > 0. Got {n_neighbors}")
else:
if not np.issubdtype(type(n_neighbors), np.integer):
raise TypeError(
"n_neighbors does not take %s value, "
"enter integer value" %
type(n_neighbors))
# The number of candidates must not be less than the number of neighbors used downstream
if self.n_neighbors is not None:
if n_neighbors < self.n_neighbors:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
try:
train_size = self._fit_X.shape[0]
except AttributeError:
train_size = self._index.n_samples_fit_
if n_neighbors > train_size:
warnings.warn(f'n_candidates > n_samples. Setting n_candidates = n_samples.')
n_neighbors = train_size
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = effective_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# TODO handle sparse matrices here
reduce_func = partial(self._kneighbors_reduce_func,
n_neighbors=n_neighbors,
return_distance=return_distance)
# for efficiency, use squared euclidean distances
kwds = ({'squared': True} if self.effective_metric_ == 'euclidean'
else self.effective_metric_params_)
result = pairwise_distances_chunked(
X, self._fit_X, reduce_func=reduce_func,
metric=self.effective_metric_, n_jobs=n_jobs,
**kwds)
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
# require joblib >= 0.12
delayed_query = delayed(self._tree.query)
parallel_kwargs = {"prefer": "threads"}
result = Parallel(n_jobs, **parallel_kwargs)(
delayed_query(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
elif self._fit_method in ['lsh', 'falconn_lsh', 'rptree', 'nng', ]:
# assume joblib>=0.12
delayed_query = delayed(self._index.kneighbors)
parallel_kwargs = {"prefer": "threads"}
result = Parallel(n_jobs, **parallel_kwargs)(
delayed_query(X[s], n_candidates=n_neighbors, return_distance=True)
for s in gen_even_slices(X.shape[0], n_jobs)
)
elif self._fit_method in ['hnsw']:
# XXX nmslib supports multiple threads natively, so no joblib used here
# Must pack results into list to match the output format of joblib
result = self._index.kneighbors(X, n_candidates=n_neighbors, return_distance=True)
result = [result, ]
else:
raise ValueError(f"internal: _fit_method not recognized: {self._fit_method}.")
if return_distance:
dist, neigh_ind = zip(*result)
result = [np.atleast_2d(arr) for arr in [np.vstack(dist), np.vstack(neigh_ind)]]
else:
result = np.atleast_2d(np.vstack(result))
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs =
|
np.all(sample_mask, axis=1)
|
numpy.all
|
import numpy as np
import matplotlib.pyplot as plt
import shapely.geometry
from scipy.ndimage.morphology import binary_dilation
from scipy.ndimage import label
def voxels_to_polygon( image_stack, pixel_size, center=(0.5,0.5) ):
'''Take a stack of images and produce a stack of shapely polygons.
The images are interpreted as a solid shape with boundary along the pixel
exterior edge. Thus an image eith a single nonzero pixel will return a square
polygon with sidelength equal to the pixel_size.
IN:
image_stack: list of binary (1.0,0) numpy array 2d images each depicting
a single connected region of 1.0 surrounded by 0.0.
pixel_size: The absolute pixel size of the input images. Used to make the
output polygons coordinates real spaced.
center: the relative origin of the image, axis=0 is x and axis=1 is y
increasing with increasingf index. For instance center=(0.5,0.5)
will select the centre of the image as the orign.
OUT:
polygon_stack: list of shapely.geometry.polygons each representing the bound
of the corresponding input binary image.
'''
polygon_stack = [pixels_to_polygon(image, pixel_size, center) for image in image_stack]
return polygon_stack
def check_input(image):
"""Check that the provided image consists of a single connected domain of pixels.
"""
# Check that the input image has no floating pixels.
labeled_array, num_features = label(image.astype(int)+1)
assert num_features==1, "The input image must contain a single solid domain of connected pixels but it appears to have floating pixels"
#
# Check that the input image has no holes.
s = np.sum( np.abs(image.astype(int)[1:,:]-image.astype(int)[0:-1,:]), axis=0 )
assert np.alltrue( s <= 2 ), "The input image must contain a single solid domain of connected pixels but it appears to have holes"
#
def pixels_to_polygon( image, pixel_size, center=(0.5,0.5) ):
'''Take a single image and produce a shapely polygon.
'''
check_input(image)
expanded_image = expand_image(image, factor=3)
indices = get_image_boundary_index( expanded_image )
coordinates = indices_to_coordinates(indices, pixel_size/3., center, expanded_image)
polygon = shapely.geometry.Polygon(coordinates)
#show_polygon_and_image(polygon, image, pixel_size, center) #<= DEBUG
return polygon
def expand_image(image, factor):
'''Expand 2d binary image so that each pixel is split by copying
into factor x factor number of pixels.
'''
expanded_image = np.repeat(image, factor, axis=1)
expanded_image = np.repeat(expanded_image, factor, axis=0)
return expanded_image
def get_image_boundary_index( image ):
'''Find the pixel indices of the boundary pixels of a binary image.
'''
boundary_image = get_boundary_image( image )
bound_indx = np.where( boundary_image==1 )
ix,iy = bound_indx[0][0],bound_indx[1][0] # starting index
indices = [(ix,iy)]
while( not len(indices)==np.sum(boundary_image) ):
# Walk around border and save boundary pixel indices
mask = np.zeros(boundary_image.shape)
mask[ np.max([0,ix-1]):ix+2, iy ] = 1
mask[ ix, np.max([iy-1]):iy+2 ] = 1
neighbour_indx = np.where( boundary_image*mask )
for ix,iy in zip( neighbour_indx[0], neighbour_indx[1]):
if (ix,iy) not in indices:
indices.append( (ix,iy) )
break
indices = sparse_indices( indices )
return indices
def get_boundary_image( image ):
'''Return a pixel image with 1 along the boundary if the assumed
object in image.
'''
k = np.ones((3,3),dtype=int)
dilation = binary_dilation( image==0, k, border_value=1 )
boundary_image = dilation*image
return boundary_image
def sparse_indices( indices ):
'''Remove uneccesary nodes in the polygon (three nodes on a line is uneccesary).
'''
new_indices = []
for i in range(0, len(indices)-1):
if not (indices[i-1][0]==indices[i][0]==indices[i+1][0] or \
indices[i-1][1]==indices[i][1]==indices[i+1][1]):
new_indices.append(indices[i])
return new_indices
def indices_to_coordinates(indices, pixel_size, center, image ):
'''Compute real space coordinates of image boundary form set of pixel indices.
'''
dx = image.shape[1]*center[0]
dy = image.shape[0]*center[1]
coordinates = []
for c in indices:
# Verified by simulated nonsymmetric grain
ycoord = pixel_size*( c[1] + 0.5 - dx + (c[1]%3 - 1)*0.5 )
xcoord = pixel_size*( -c[0] - 0.5 + dy - (c[0]%3 - 1)*0.5 )
coordinates.append( (xcoord,ycoord) )
return coordinates
def get_integral_paths( angles, ytrans, zpos, sample_polygon, show_geom=False ):
'''Compute entry-exit points for a scanrange.
'''
# Instantiate lists to contain all measurements
all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines = [],[],[],[],[],[]
xray_endpoints = get_xray_endpoints( sample_polygon )
# Loop over all experimental settings
for i,(ang,dty) in enumerate( zip( angles, ytrans ) ):
# Translate and rotate the xray endpoints according to ytrans and angle
c, s = np.cos( np.radians( -ang ) ), np.sin( np.radians( -ang ) )
rotz = np.array([[c,-s],[s,c]])
rx = rotz.dot( xray_endpoints + np.array([[0,0],[dty,dty]]) )
xray_polygon = shapely.geometry.LineString( [ rx[:,0], rx[:,1] ] )
# compute the intersections between beam and sample in sample coordinates
intersection_points = get_intersection( xray_polygon, sample_polygon, zpos )
if intersection_points is None:
# If a measurement missed the sample or graced a corner, we skipp ahead
bad_lines.append(i)
continue
else:
# make a measurement at the current setting
entry, exit, nhat, L, nsegs = get_quanteties( intersection_points )
# save the measurement results in global lists
all_entry.append( entry )
all_exit.append( exit )
all_nhat.append( nhat )
all_L.append( L )
all_nsegs.append( nsegs )
if show_geom:
# Show sample and xray polygons
print('entry ',entry)
print('exit ',exit)
print('nhat ',nhat)
print('L ',L)
xc,yc = xray_polygon.xy
plt.figure(figsize=(11,8))
plt.scatter(entry[0::3], entry[1::3],c='k',zorder=200,label='entry')
plt.scatter(exit[0::3], exit[1::3],c='b',zorder=200,label='exit')
plt.plot(xc,yc,c='y',label='Beam')
plt.arrow(0, 0, 20*nhat[0],20*nhat[1], head_width=2, color='r', zorder=100,label=r'$\hat{n}$')
xc,yc = sample_polygon.exterior.xy
plt.fill(xc,yc,color='gray',label='Grain',zorder=1)
plt.title('L='+str(L)+' angle='+str(ang)+r'$^o$, dty='+str(dty)+', z_translation='+str(zpos)+ 'nsegs='+str(nsegs))
plt.axis('equal')
plt.xlabel('x')
plt.ylabel('y')
xcircle = np.linspace(-dty, dty, 100)
ycircle = np.sqrt( dty**2 - xcircle**2 )
plt.plot( xcircle, ycircle, c='g' )
plt.plot( xcircle, -ycircle, c='g', label='circle with R=dty' )
plt.grid(True)
plt.legend()
plt.show()
# repack lists of measurements into numpy arrays of desired format
entry, exit, nhat, L, nsegs = repack(all_entry, all_exit, all_nhat, all_L, all_nsegs)
return entry, exit, nhat, L, nsegs, bad_lines
def get_xray_endpoints( sample_polygon ):
'''Calculate endpoitns of xray line segement. The lenght of the
line segment is adapted to make sure xray always convers the full
length of the sample.
'''
xc, yc = sample_polygon.exterior.xy
xmin = np.min( xc )
xmax = np.max( xc )
ymin = np.min( yc )
ymax = np.max( yc )
D = np.sqrt( (xmax-xmin)**2 + (ymax-ymin)**2 )
return np.array([ [-1.1*D, 1.1*D], [0,0] ])
def get_intersection( xray_polygon, sample_polygon, z ):
'''Compute the 3d coordinates of intersection between xray and
sample.
'''
intersection = sample_polygon.intersection( xray_polygon )
if intersection.is_empty or isinstance(intersection, shapely.geometry.point.Point):
# we missed the sample with the beam
intersection_points = None
elif isinstance(intersection, shapely.geometry.linestring.LineString):
# we got a single line segment intersection
intersection_points = np.zeros( (2,3) )
intersection_points[:2,:2] = np.array( intersection.xy ).T
intersection_points[:,2] = z
elif isinstance(intersection, shapely.geometry.multilinestring.MultiLineString):
# we got multiple line segments intersection
intersection_points = np.zeros( (2*len(intersection.geoms),3) )
for i,line_segment in enumerate(intersection.geoms):
intersection_points[2*i:2*(i+1),:2] = np.array( line_segment.xy ).T
intersection_points[:,2] = z
return intersection_points
def get_quanteties( intersection_points ):
nsegs = intersection_points.shape[0]//2
entry,exit = [],[]
p1 = intersection_points[0,:]
p2 = intersection_points[1,:]
nhat = list( (p2-p1)/np.linalg.norm(p2-p1) )
L = 0
for i in range( nsegs ):
p1 = intersection_points[2*i,:]
p2 = intersection_points[2*i+1,:]
entry.extend( list(p1) )
exit.extend( list(p2) )
length = np.linalg.norm(p2-p1)
L += length
return entry, exit, nhat, L, nsegs
def repack( all_entry, all_exit, all_nhat, all_L, all_nsegs ):
'''Repack global measurement list into numpy arrays of desired format.
'''
N = len( all_L )
p = max( max(all_nsegs), 1 )
nsegs = np.array(all_nsegs).reshape( 1,N )
L =
|
np.array(all_L)
|
numpy.array
|
import os
import time
import cv2
import numpy as np
class PeopleDetector:
def __init__(self, yolocfg='yolo-coco/yolov3-spp.cfg',
yoloweights='yolo-coco/yolov3-spp.weights',
labelpath='yolo-coco/coco.names',
confidence=0.5,
threshold=0.5):
self._yolocfg = yolocfg
self._yoloweights = yoloweights
self._confidence = confidence
self._threshold = threshold # NMS
self._labels = open(labelpath).read().strip().split("\n")
self._colors = np.random.randint(
0, 255, size=(len(self._labels), 3), dtype="uint8")
self._net = None
self._layer_names = None
self._boxes = []
self._confidences = []
self._classIDs = []
self._centers = []
def load_network(self):
print("loading yolov3 network\n")
self._net = cv2.dnn.readNetFromDarknet(
self._yolocfg, self._yoloweights)
self._layer_names = [self._net.getLayerNames()[i[0] - 1]
for i in self._net.getUnconnectedOutLayers()]
print("yolov3 loaded successfully\n")
def predict(self, image, depug=False):
image = cv2.resize(image, (800, 800))
(H, W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
self._net.setInput(blob)
start = time.time()
layerOutputs = self._net.forward(self._layer_names)
for out in layerOutputs:
for detection in out:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > self._confidence:
box = detection[0:4] *
|
np.array([W, H, W, H])
|
numpy.array
|
#!/usr/bin/env python
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
import matplotlib as mpl
sys.path.append('..')
from models.car_rear_axle import Car
from controllers.ModelPredictiveController import MPC
from trajectory_generation.CubicSpline import Spline2D
GOAL_EPS = 0.1
# Vehicle parameters
WIDTH = 1.0 # m
WHEEL_LEN = 0.3 # m
WHEEL_WIDTH = 0.2 # m
TREAD = 0.7 # m
L = 3.0 # m
def distance(a, b):
return np.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)
def find_closest_waypoint(car, cx, cy):
distances = np.sum(( np.array([[car.x], [car.y]]) -
np.stack((cx, cy)) )**2, axis=0)
idx = np.argmin(distances)
return idx, cx[idx], cy[idx]
def plot_car(x, y, yaw, delta=0.0, cabcolor="-r", truckcolor="-k"):
x_f = x + np.cos(yaw) * L
y_f = y + np.sin(yaw) * L
plt.plot([x, x_f], [y, y_f], 'k')
rear_axle_x1 = x + WIDTH * np.cos(yaw - 1.57) / 2
rear_axle_y1 = y + WIDTH * np.sin(yaw - 1.57) / 2
rear_axle_x2 = x + WIDTH * np.cos(yaw + 1.57) / 2
rear_axle_y2 = y + WIDTH * np.sin(yaw + 1.57) / 2
plt.plot([rear_axle_x1, rear_axle_x2], [rear_axle_y1, rear_axle_y2], 'k')
front_axle_x1 = x_f + WIDTH * np.cos(yaw - 1.57) / 2
front_axle_y1 = y_f + WIDTH * np.sin(yaw - 1.57) / 2
front_axle_x2 = x_f + WIDTH * np.cos(yaw + 1.57) / 2
front_axle_y2 = y_f + WIDTH * np.sin(yaw + 1.57) / 2
plt.plot([front_axle_x1, front_axle_x2], [front_axle_y1, front_axle_y2], 'k')
right_rear_wheel = np.array([[WHEEL_LEN, -WHEEL_LEN, -WHEEL_LEN, WHEEL_LEN,
WHEEL_LEN],
[-WHEEL_WIDTH - TREAD, -WHEEL_WIDTH - TREAD,
WHEEL_WIDTH - TREAD, WHEEL_WIDTH - TREAD,
-WHEEL_WIDTH - TREAD]])
right_front_wheel = np.copy(right_rear_wheel)
left_rear_wheel =
|
np.copy(right_rear_wheel)
|
numpy.copy
|
import sys
import time
sys.path.append("./src")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
update_cnt = 10
listMethods = ['iNN', 'NN', 'iODE', 'ODE', 'ENTROPY', 'RANDOM']
resultFolder = './resultsOnLambda_100/'
drawIndex = 0
iNN = np.loadtxt(resultFolder + listMethods[drawIndex] + '_MOCU.txt', delimiter = "\t")
iNN = iNN.mean(0)
iNNT = np.loadtxt(resultFolder + listMethods[drawIndex] + '_timeComplexity.txt', delimiter = "\t")
iNNT = iNNT.mean(0)
drawIndex = 1
NN = np.loadtxt(resultFolder + listMethods[drawIndex] + '_MOCU.txt', delimiter = "\t")
NN = NN.mean(0)
NNT = np.loadtxt(resultFolder + listMethods[drawIndex] + '_timeComplexity.txt', delimiter = "\t")
NNT = NNT.mean(0)
# drawIndex = 2
# iODE = np.loadtxt(resultFolder + listMethods[drawIndex] + '_MOCU.txt', delimiter = "\t")
# iODE = iODE.mean(0)
# iODET = np.loadtxt(resultFolder + listMethods[drawIndex] + '_timeComplexity.txt', delimiter = "\t")
# iODET = iODET.mean(0)
drawIndex = 3
ODE = np.loadtxt(resultFolder + listMethods[drawIndex] + '_MOCU.txt', delimiter = "\t")
ODE = ODE.mean(0)
ODET =
|
np.loadtxt(resultFolder + listMethods[drawIndex] + '_timeComplexity.txt', delimiter = "\t")
|
numpy.loadtxt
|
import numpy as np
class Data:
def __init__(self,data_dir='data/icews14/'):
# load data
self.train_data = self._load_data(data_dir, "train.txt")
self.valid_data = self._load_data(data_dir, "valid.txt")
self.test_data = self._load_data(data_dir, "test.txt")
# Put it in 1 big matrix
self.data =
|
np.vstack((self.train_data,self.test_data))
|
numpy.vstack
|
import matplotlib
import matplotlib._color_data as mcd
import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import FuncFormatter
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
# matplotlib.rcParams['text.usetex'] = True
overlap = {name for name in mcd.CSS4_COLORS
if "xkcd:" + name in mcd.XKCD_COLORS}
font_size = 14 # 14
colors = {
"DBEst_1k": mcd.XKCD_COLORS['xkcd:coral'],
"DBEst_10k": mcd.XKCD_COLORS['xkcd:orange'], # blue
"DBEst_100k": mcd.XKCD_COLORS['xkcd:orangered'], # green
"DBEst_1m": mcd.XKCD_COLORS['xkcd:red'], # yellow
"BlinkDB_1k": mcd.XKCD_COLORS['xkcd:lightblue'], # red
"BlinkDB_10k": mcd.XKCD_COLORS['xkcd:turquoise'], # red
"BlinkDB_100k": mcd.XKCD_COLORS['xkcd:teal'], # cyan
"BlinkDB_1m": mcd.XKCD_COLORS['xkcd:azure'], # magenta
"BlinkDB_5m": mcd.XKCD_COLORS['xkcd:blue'], # red
"BlinkDB_26m": mcd.XKCD_COLORS['xkcd:darkblue'], # red
"green1":mcd.XKCD_COLORS['xkcd:pale green'],
"green2":mcd.XKCD_COLORS['xkcd:lime'],
"green3":mcd.XKCD_COLORS['xkcd:neon green'],
"lightgreen": mcd.XKCD_COLORS['xkcd:lightgreen'],
"green": mcd.XKCD_COLORS['xkcd:green'],
"orange": mcd.XKCD_COLORS['xkcd:orange'],
"orangered": mcd.XKCD_COLORS['xkcd:orangered'],
"red": mcd.XKCD_COLORS['xkcd:red'],
}
alpha = {
"1": 0.1,
"2": 0.3,
"3": 0.5,
"4": 0.7,
"5": 0.9,
'6': 1.0
}
def to_percent(y, pos):
return '%.1f%%' % (y * 100)
def to_percent3(y, pos):
return '%.3f%%' % (y * 100)
def add_value_labels(ax, spacing=5,fontsize=12,b_float=True,b_percent=False):
"""Add labels to the end of each bar in a bar chart.
Arguments:
ax (matplotlib.axes.Axes): The matplotlib object containing the axes
of the plot to annotate.
spacing (int): The distance between the labels and the bars.
"""
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# For each bar: Place a label
for rect in ax.patches:
# Get X and Y placement of label from rect.
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
# Number of points between bar and label. Change to your liking.
space = spacing
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'top'
# Use Y value as label and format number with one decimal place
if b_float:
if b_percent:
label = '%.2f%%' % (y_value * 100)
else:
label = "{:.1f}".format(y_value)
else:
label = "{:d}".format(y_value)
# Create annotation
ax.annotate(
label, # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=fontsize) # Vertically align label differently for
# positive and negative values.
def plt_tpcds_universal_relative_error():
plt.rcParams.update({'font.size': 12})
width = 0.2
# data = [
# [0.011825885, 0.011224583, 0.00359967, 0.008883379 ], # , 0.0335],
# [0.01995, 0.01972, 0.00079, 0.013486667], # , 0.0486],
# [0.019962, 0.020326766, 0.020300222, 0.020196329], # , 0.0182],
# ]
data = [
[0.0149, 0.0148, 0.00328, 0.0109 ], # , 0.0335],
[0.02134, 0.01999, 0.00123, 0.01418666], # , 0.0486],
[0.019962, 0.020326766, 0.020300222, 0.020196329], # , 0.0182],
]
X = np.arange(4)
fig, ax = plt.subplots()
p1 = plt.bar(
X + 0.00, data[0], color=colors["DBEst_1k"], width=width, alpha=0.9)
p2 = plt.bar(
X + width, data[1], color=colors["BlinkDB_1k"], width=width, alpha=0.5)
p3 = plt.bar(
X + 2*width, data[2], color=colors["green1"], width=width, alpha=0.7)
plt.legend((p1[0], p2[0], p3[0]),#, p4[0],p5[0]),
('DBEst++', 'DeepDB', 'VerdictDB', 'BlinkDB_100k',"haha"), loc='lower left')
plt.xticks(X + 1 * width, ("COUNT", 'SUM', 'AVG', 'OVERALL'))
ax.set_ylabel("Relative Error (%)")
# ax.set_yscale('log')
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
plt.subplots_adjust(bottom=0.07)
# plt.subplots_adjust(left=0.23)
# plt.subplots_adjust(bottom=0.12)
add_value_labels(ax,b_float=True,b_percent=True,fontsize=6)
plt.savefig("/home/quincy/Pictures/accuracy_universal.pdf")
print("figure saved.")
plt.show()
def plt_tpcds_universal_relative_error_scalability():
plt.rcParams.update({'font.size': 12})
width = 0.2
# data = [
# [0.011013, 0.010773, 0.008883],
# [0.015733, 0.013967, 0.013487],
# [0.021001, 0.025905, 0.020196],
# ]
data = [
[0.011013, 0.010773, 0.008883],
[0.0142 , 0.013967, 0.013487],
[0.021001, 0.025905, 0.020196],
]
X = np.arange(3)
fig, ax = plt.subplots()
p1 = plt.bar(
X + 0.00, data[0], color=colors["DBEst_1k"], width=width, alpha=0.9)
p2 = plt.bar(
X + width, data[1], color=colors["BlinkDB_1k"], width=width, alpha=0.5)
p3 = plt.bar(
X + 2*width, data[2], color=colors["green1"], width=width, alpha=0.7)
plt.legend((p1[0], p2[0], p3[0]),#, p4[0],p5[0]),
('DBEst++', 'DeepDB', 'VerdictDB', 'BlinkDB_100k',"haha"), loc='lower left')
plt.xticks(X + 1 * width, ("10", '100', '1000'))
ax.set_ylabel("Relative Error (%)")
ax.set_xlabel("TPC-DS Scaling Factor")
# ax.set_yscale('log')
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
plt.subplots_adjust(bottom=0.07)
# plt.subplots_adjust(left=0.23)
plt.subplots_adjust(bottom=0.12)
add_value_labels(ax,b_float=True,b_percent=True)
# plt.savefig("/home/quincy/Pictures/accuracy_universal_scalability.pdf")
print("figure saved.")
plt.show()
def plt_tpcds_universal_relative_error_scalability_large_number_of_groups_overall():
plt.rcParams.update({'font.size': 12})
width = 0.25
data = [
[0.1146, 0.0562, 0.0460, 0.0332, 0.0297],
[0.0394, 0.0381, 0.0375, 0.0377, 0.0374],
[0.2955, 0.2089, 0.1493, 0.1068, 0.0882],
]
X = np.arange(5)
fig, ax = plt.subplots()
p1 = plt.bar(
X + 0.00, data[0], color=colors["DBEst_1k"], width=width, alpha=0.9)
p2 = plt.bar(
X + width, data[1], color=colors["BlinkDB_1k"], width=width, alpha=0.5)
p3 = plt.bar(
X + 2*width, data[2], color=colors["green1"], width=width, alpha=0.7)
plt.legend((p1[0], p2[0], p3[0]),#, p4[0],p5[0]),
('DBEst++', 'DeepDB', 'VerdictDB', 'BlinkDB_100k',"haha"), loc='upper right')
plt.xticks(X + 1 * width, ("2.5m", '5m', '10m','20m','30m'))
ax.set_ylabel("Relative Error (%)")
ax.set_xlabel("Sample Size")
# ax.set_yscale('log')
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
# plt.subplots_adjust(bottom=0.07)
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.12)
add_value_labels(ax,b_float=True,b_percent=True,fontsize=8)
# plt.savefig("/home/quincy/Pictures/accuracy_universal_scalability.pdf")
print("figure saved.")
plt.show()
def plt_tpcds_universal_relative_error_scalability_large_number_of_groups_count():
plt.rcParams.update({'font.size': 12})
width = 0.2
data = [
[0.1190, 0.0802, 0.0614, 0.0452, 0.0384],
[0.0547, 0.0536, 0.0525, 0.0527, 0.0523],
[0.2723, 0.1961, 0.1424, 0.1038, 0.0870],
[0.0519, 0.0517, 0.0512, float('NaN'),float('NaN')]
]
X = np.arange(5)
fig, ax = plt.subplots()
p1 = plt.bar(
X + 0.00, data[0], color=colors["DBEst_1k"], width=width, alpha=0.9)
p2 = plt.bar(
X + width, data[1], color=colors["BlinkDB_1k"], width=width, alpha=0.5)
p3 = plt.bar(
X + 2*width, data[2], color=colors["green1"], width=width, alpha=0.7)
p4 = plt.bar(
X + 3*width, data[3], color=colors["DBEst_100k"], width=width, alpha=0.4)
plt.legend((p1[0], p2[0], p3[0],p4[0]),#, p4[0],p5[0]),
('DBEst++', 'DeepDB', 'VerdictDB', 'DBEst++ with FT',"haha"), loc='upper right')
plt.xticks(X + 1 * width, ("2.5m", '5m', '10m','20m','30m'))
ax.set_ylabel("Relative Error of COUNT Queries(%)")
ax.set_xlabel("Sample Size")
# ax.set_yscale('log')
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
# plt.subplots_adjust(bottom=0.07)
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.12)
add_value_labels(ax,b_float=True,b_percent=True,fontsize=10)
plt.savefig("/home/quincy/Pictures/large_group_count.pdf")
print("figure saved.")
plt.show()
def plt_tpcds_universal_relative_error_scalability_large_number_of_groups_sum():
plt.rcParams.update({'font.size': 12})
width = 0.2
data = [
[0.1267, 0.0802, 0.0625, 0.0460, 0.0402,],
[0.0564, 0.0537, 0.0530, 0.0533, 0.0527,],
[0.3576, 0.2527, 0.1791, 0.1259, 0.1029,],
[0.0558, 0.0544, 0.0537, float('NaN'),float('NaN')]
]
X = np.arange(5)
fig, ax = plt.subplots()
p1 = plt.bar(
X + 0.00, data[0], color=colors["DBEst_1k"], width=width, alpha=0.9)
p2 = plt.bar(
X + width, data[1], color=colors["BlinkDB_1k"], width=width, alpha=0.5)
p3 = plt.bar(
X + 2*width, data[2], color=colors["green1"], width=width, alpha=0.7)
p4 = plt.bar(
X + 3*width, data[3], color=colors["DBEst_100k"], width=width, alpha=0.4)
plt.legend((p1[0], p2[0], p3[0],p4[0]),#, p4[0],p5[0]),
('DBEst++', 'DeepDB', 'VerdictDB', 'DBEst++ with FT',"haha"), loc='upper right')
plt.xticks(X + 1 * width, ("2.5m", '5m', '10m','20m','30m'))
ax.set_ylabel("Relative Error of SUM Queries (%)")
ax.set_xlabel("Sample Size")
# ax.set_yscale('log')
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
# plt.subplots_adjust(bottom=0.07)
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.12)
add_value_labels(ax,b_float=True,b_percent=True,fontsize=10)
plt.savefig("/home/quincy/Pictures/large_group_sum.pdf")
print("figure saved.")
plt.show()
def plt_tpcds_universal_relative_error_scalability_large_number_of_groups_avg():
plt.rcParams.update({'font.size': 12})
width = 0.25
data = [
[0.0981, 0.0083, 0.0142, 0.0085, 0.0106,],
[0.0072, 0.0071, 0.0071, 0.0071, 0.0071,],
[0.2567, 0.1780, 0.1264, 0.0907, 0.0748,],
]
X = np.arange(5)
fig, ax = plt.subplots()
p1 = plt.bar(
X + 0.00, data[0], color=colors["DBEst_1k"], width=width, alpha=0.9)
p2 = plt.bar(
X + width, data[1], color=colors["BlinkDB_1k"], width=width, alpha=0.5)
p3 = plt.bar(
X + 2*width, data[2], color=colors["green1"], width=width, alpha=0.7)
plt.legend((p1[0], p2[0], p3[0]),#, p4[0],p5[0]),
('DBEst++', 'DeepDB', 'VerdictDB', 'BlinkDB_100k',"haha"), loc='upper right')
plt.xticks(X + 1 * width, ("2.5m", '5m', '10m','20m','30m'))
ax.set_ylabel("Relative Error of AVG Queries(%)")
ax.set_xlabel("Sample Size")
# ax.set_yscale('log')
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
# plt.subplots_adjust(bottom=0.07)
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.12)
add_value_labels(ax,b_float=True,b_percent=True,fontsize=8)
# plt.savefig("/home/quincy/Pictures/accuracy_universal_scalability.pdf")
print("figure saved.")
plt.show()
def plt_tpcds_universal_relative_error_scalability_count():
plt.rcParams.update({'font.size': 16})
width = 0.2
# data = [
# [0.011013, 0.010773, 0.008883],
# [0.015733, 0.013967, 0.013487],
# [0.021001, 0.025905, 0.020196],
# ]
data = [
[0.0149, 0.0108, 0.0118],
[0.0213, 0.0168, 0.0126],
[0.0203, 0.0259, 0.0200],
]
X = np.arange(3)
fig, ax = plt.subplots()
p1 = plt.bar(
X + 0.00, data[0], color=colors["DBEst_1k"], width=width, alpha=0.9)
p2 = plt.bar(
X + width, data[1], color=colors["BlinkDB_1k"], width=width, alpha=0.5)
p3 = plt.bar(
X + 2*width, data[2], color=colors["green1"], width=width, alpha=0.7)
plt.legend((p1[0], p2[0], p3[0]),#, p4[0],p5[0]),
('DBEst++', 'DeepDB', 'VerdictDB', 'BlinkDB_100k',"haha"), loc='lower left')
plt.xticks(X + 1 * width, ("10", '100', '1000'))
ax.set_ylabel("Relative Error (%)")
ax.set_xlabel("TPC-DS Scaling Factor")
# ax.set_yscale('log')
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
plt.subplots_adjust(bottom=0.07)
plt.subplots_adjust(left=0.17)
plt.subplots_adjust(bottom=0.12)
add_value_labels(ax,b_float=True,b_percent=True)
plt.savefig("/home/quincy/Pictures/accuracy_universal_scalability_count.pdf")
print("figure saved.")
plt.show()
def plt_tpcds_universal_relative_error_scalability_sum():
plt.rcParams.update({'font.size': 16})
width = 0.2
# data = [
# [0.011013, 0.010773, 0.008883],
# [0.015733, 0.013967, 0.013487],
# [0.021001, 0.025905, 0.020196],
# ]
data = [
[0.0148, 0.0111, 0.0112],
[0.0200, 0.0178, 0.0125],
[0.0216, 0.0259, 0.0203],
]
X =
|
np.arange(3)
|
numpy.arange
|
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print("\nData: \n\n", data)
print("\nType of data: \n\n", type(data))
census=np.concatenate((data,new_record))
print(census.shape)
#Code starts here
age=census[:,0]
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.mean(age)
age_std=
|
np.std(age)
|
numpy.std
|
import argparse
from collections import Counter
from copy import deepcopy
from math import sqrt
import sys
from time import time
import numpy as np
from typing import List, Tuple
parser = argparse.ArgumentParser()
parser.add_argument("input_file",
help="Sudoku input file to solve (CSV).")
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
args = parser.parse_args()
FILENAME = args.input_file
def load_from_csv(filename):
""" Function to load input from .csv, and perform basic input checking. """
parsed = np.loadtxt(filename, delimiter=",", dtype=np.uint8)
print('Board loaded successfully.')
size = len(parsed)
if size not in (9, 16) or parsed.shape[0] != parsed.shape[1]:
print('Unsupported board size detected, exiting. (9x9 or 16x16 are supported as of now.)')
sys.exit(1)
if size == 16:
start = 1
end = 17
else:
start = 1
end = 10
dimensions = (start, end)
if args.verbose:
print(f'Board size detected: {size}x{size}')
return parsed, size, dimensions
def create_mask(board: np.ndarray, dimensions: Tuple[int, int]) -> List[List[int]]:
""" Function to create Mask of possible valid values based on the initial sudoku Board. """
mask = list(board.tolist())
counts = Counter(board.flatten())
del counts[0]
counts = [number[0] for number in counts.most_common()]
most_common_clues = counts
for clue in range(dimensions[0], dimensions[1]):
if clue not in most_common_clues:
most_common_clues.append(clue)
for i, row in enumerate(mask):
if 0 in row:
while 0 in row:
zero_index = row.index(0)
mask[i][zero_index] = []
for number in most_common_clues:
if valid(board, number, (i, zero_index), box_size):
mask[i][zero_index].append(number)
else:
for number in row:
if number != 0:
mask[i][row.index(number)] = {number}
return mask
def update_mask(board: np.ndarray, mask: List[List[int]], box_size: int) -> List[List[int]]:
""" Function to update Mask of possible valid values. """
def is_list(item):
return bool(isinstance(item, list))
for y_pos, row in enumerate(mask):
for numbers in filter(is_list, row):
x_pos = row.index(numbers)
to_remove = set()
for number in numbers:
if not valid(board, number, (y_pos, x_pos), box_size):
to_remove.add(number)
for num in to_remove:
mask[y_pos][x_pos].remove(num)
return mask
def update_board(board: np.ndarray, mask: List[List[int]]) -> (np.ndarray, [List[int]]):
""" Function to update Board based on possible values Mask. """
def is_one_element_list(item):
return bool(isinstance(item, list) and len(item) == 1)
for y_pos, row in enumerate(mask):
for number in filter(is_one_element_list, row):
x_pos = row.index(number)
num = number.pop()
board[y_pos][x_pos] = num
return board, mask
def preprocess_board(board: np.ndarray, box_size: int) -> (np.ndarray, [List[int]]):
""" Board preprocessor to reduce necessary iterations during solving. """
mask = create_mask(board, dimensions)
temp_mask = None
passes = 0
while temp_mask != mask:
passes += 1
temp_mask = deepcopy(mask)
mask = update_mask(board, mask, box_size)
board, mask = update_board(board, mask)
if args.verbose:
print(f'Preprocess passes: {passes}')
return
|
np.array(board)
|
numpy.array
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import numpy.fft as fft
import scipy.signal as sig
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import interpolate
import csv
import datetime
#design output
#v=0 critical current v stuff
#time series for quiet squid
#time series for d
# In[2]:
import time, sys
from IPython.display import clear_output
def update_progress(progress):
bar_length = 20
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
# In[3]:
def noisyRK4(s,th,tau,derivsRK,par,vn10,vn20,vn11,vn21,vn12,vn22):
"""RK4 integrator modified to use noise
DEPENDENCIES
derivsRK - RHS of ODE, fn defined somewhere
INPUTS
s - state vector
th - time, theta
tau - time step size
derivsRK - RHS of ODE, fn defined somewhere
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
OUTPUTS
sout - new state vector new time
[delta_1,delta_2,ddelta_1,ddelta_2,d^2delta_1,d^2delta_2]"""
# parse out parameter array
alpha = par[0]; beta = par[1]; eta = par[2]
rho = par[3]; i = par[4]; phia = par[5]; Gamma=par[6]
betaC=par[7]; kappa=par[8]
# noisySQUIDrk(s,th,alpha,beta,eta,rho,i,phia,vn1,vn2)
half_tau = 0.5*tau
F1 = derivsRK(s,th,par,vn10,vn20) # use current voltage noise
th_half = th + half_tau
stemp = s + half_tau*F1
F2 = derivsRK(stemp,th_half,par,vn11,vn21) # use half-tau step voltage noise
stemp = s + half_tau*F2
F3 = derivsRK(stemp,th_half,par,vn11,vn21) # use half-tau step voltage noise
th_full = th + tau
stemp = s + tau*F3
F4 = derivsRK(stemp,th_full,par,vn12,vn22) # use full-tau step voltage noise
sout = s + tau/6.*(F1 + F4 + 2.*(F2 + F3))
return sout
# In[4]:
def noisySQUIDrk(s,th,par,vn1,vn2):
"""Returns RHS of ODE
DEPENDENCIES
numpy as np
INPUTS
s - state vector [del1(theta), del2(theta)]
th - time, theta
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
alpha - critical current symmetry parameter (0 to 1)
beta - inductance constant
eta - inductance symmetry parameter (0 to 1)
rho - resistance symmetry parameter (0 to 1)
i - dimensionless bias current
phia - dimensionless applied flux
Gamma - Johnson noise parameter
betaC - capacitance constant
kappa - capacitance symmetry parameter
nv1,nv2 - noise values at each junction
OUTPUTS
deriv - array
[ddel1/dth, ddel2/dth, d^2del1/dth^2, d^2del2/dth^2]"""
# parse out parameter array
alpha = par[0]; beta = par[1]; eta = par[2]
rho = par[3]; i = par[4]; phia = par[5]; Gamma=par[6]
betaC=par[7]; kappa=par[8]
#del1 = s[0] # del_1(theta)
#del2 = s[1] # del_2(theta)
j = (s[0] - s[1] - 2*np.pi*phia)/(np.pi*betaL) - eta*i/2
dddel1 = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-rho)*s[2])/((1-kappa)*betaC)
dddel2 = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+rho)*s[3])/((1+kappa)*betaC)
ddel1 = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-kappa)*betaC*dddel1)/(1-rho) + vn1 # ddel1/dth
ddel2 = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+kappa)*betaC*dddel2)/(1+rho) + vn2 # ddel2/dth
deriv = np.array([ddel1,ddel2,dddel1,dddel2])
return(deriv)
# In[5]:
def noisySQUID(nStep,tau,s,par):
"""Handles RK4 solver, returns time series sim of SQUID
DEPENDENCIES
noisySQUIDrk - modified RK4 solver
numpy as np
INPUTS
nStep - number of steps
tau - time step size
s - initial state vector
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
alpha - critical current symmetry parameter (0 to 1)
beta - inductance constant
eta - inductance symmetry parameter (0 to 1)
rho - resistance symmetry parameter (0 to 1)
i - dimensionless bias current
phia - dimensionless applied flux
Gamma - Johnson noise parameter
betaC - capacitance constant
kappa - capacitance symmetry parameter
OUTPUTS
S - time series state vector
[theta,delta_1,delta_2,j,ddel1/dth,ddel2/dth,v]"""
#parse out the parameter vector
alpha=par[0]; betaL=par[1]; eta=par[2]; rho=par[3]
i=par[4]; phia=par[5]; Gamma=par[6]; betaC=par[7]
kappa=par[8]
# change state vector s to include all the derivs
# little s denotes a 1-d vector of, current values
# big S denotes the output array of all s, a 2-d array in time
## NOISE ##
# set an appropriate variance based on Gamma.
# variance is twice normal because freq of noise
# is twice that of the sampling freq so that rk4 has
# a noise value to use at each half tau step
var = 4*Gamma/tau
sd = var**.5
# make two time series of noise voltages
# lowercase designators are current values, uppercase are arrays in time
VN1 = np.zeros(2*nStep+1)
VN2 = np.zeros(2*nStep+1)
for ist in range(2*nStep+1):
VN1[ist] = np.random.normal(0,sd)
VN2[ist] = np.random.normal(0,sd)
# DATA STRUCTURE
# S = [theta,del1,del2,ddel1,ddel2,dddel1,dddel2,j,v]
S = np.zeros([8,nStep],float)
# set initial conditions
theta = 0.
S[0,0] = theta
S[1,0] = s[0] # del1
S[2,0] = s[1] # del2
j = (s[0] - s[1] - 2*np.pi*phia)/(np.pi*betaL) - eta*i/2
S[3,0] = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-kappa)*betaC*s[4])/(1-rho) # ddel1
S[4,0] = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+kappa)*betaC*s[5])/(1+rho) # ddel2
S[5,0] = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-rho)*s[2])/((1-kappa)*betaC) # dddel1
S[6,0] = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+rho)*s[3])/((1+kappa)*betaC) # dddel2
s = np.copy(S[1:5,0])
for iStep in range(1,nStep):
vn10 = VN1[2*iStep-2]
vn20 = VN2[2*iStep-2]
vn11 = VN1[2*iStep-1]
vn21 = VN2[2*iStep-1]
vn12 = VN1[2*iStep]
vn22 = VN2[2*iStep]
# noisyRK4(s,th,alpha,beta,eta,rho,i,phia,tau,derivsRK,vn10,vn20,vn11,vn21,vn12,vn22)
s = noisyRK4(s,theta,tau,noisySQUIDrk,par,vn10,vn20,vn11,vn21,vn12,vn22)
S[0,iStep] = theta # time theta
S[1,iStep] = s[0] # del1
S[2,iStep] = s[1] # del2
S[3,iStep] = s[2] # ddel1
S[4,iStep] = s[3] # ddel2
#S[5,iStep] = # dddel1
#S[6,iStep] = # dddel2
theta = theta + tau
# S[5,:] =
# S[6,:] =
S[6] = S[3]*(1+eta)/2 + S[4]*(1-eta)/2
return(S)
# In[9]:
def vj_timeseries(nStep,tau,s,par):
"""Returns time series simulation of squid, figure and csv
DEPENDENCIES
qSQUID()
numpy as np
matplotlib.pyplot as plt
INPUTS
nStep - number of steps to run in time series
tau - step size for time series
s - initial state vector [delta_1[theta=0],delta_2[theta=0]]
par - parameter vector
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
OUTPUTS
figure - plots of
voltage time series w average
circulating current time series w average
output to screen
png 'timeseriesdatetime.png' saved to parent directory
csv - time series csv file containing
theta,delta_1,delta_2,j,ddel1/dth,ddel2/dth,v
csv 'timeseriesdatetime.csv' saved to parent directory
"""
# run sim
S = noisySQUID(nStep,tau,s,par)
# chop off first 10% of time series to remove any transient
md = int(.1*len(S[0,:]))
# build figure title with parameters used
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s'% (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)))+'\n'+ r'$\rho$=%s, $i$=%s, $\phi_a$=%s' % (str(round(par[3],3)),str(round(par[4],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
# plot
fig, ax = plt.subplots(2,1,figsize=(3,7))
fig.suptitle(ti)
ax1 = plt.subplot(2,1,1)
ax1.plot(S[0,md:],S[6,md:])
ax1.hlines((sum(S[6,md:])/len(S[6,md:])),S[0,md],S[0,-1],linestyle='dotted')
ax1.set(ylabel="Voltage, v",
xticklabels=([]))
ax2 = plt.subplot(2,1,2)
ax2.plot(S[0,md:],S[3,md:])
ax2.hlines((sum(S[3,md:])/len(S[3,md:])),S[0,md],S[0,-1],linestyle='dotted')
ax2.set(ylabel="Circ Current, j",
xlabel=r"Time,$\theta$")
# create output file metadata
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
meta2 = ['# nStep=%s'%nStep,'tau=%s'%tau]
header = ['theta','delta_1','delta_2','j','ddel1/dth','ddel2/dth','v']
csvtime = datetime.datetime.now()
timestr = [datetime.datetime.strftime(csvtime, '# %Y/%m/%d, %H:%M:%S')]
timeti = str(datetime.datetime.strftime(csvtime, '%Y%m%d%H%M%S'))
csvtitle='timeseries'+timeti+'.csv'
pngtitle='timeseris'+timeti+'.png'
Sf = np.matrix.transpose(S)
# create, write, output(close) csv file
with open(csvtitle, 'w') as csvFile:
filewr = csv.writer(csvFile,delimiter=',')
filewr.writerow(timestr)
filewr.writerow(meta1)
filewr.writerow(meta2)
filewr.writerow(header)
filewr.writerows(Sf)
csvFile.close()
# save figure
fig.savefig(pngtitle)
print('csv file written out:', csvtitle)
print('png file written out:', pngtitle)
# In[11]:
def iv_curve(nStep,tau,s,par,alpha=0,betaL=0,eta=0,rho=0,phia=0,Gamma=0,betaC=0,kappa=0):
"""Returns contour plot and data file for IV curves
DEPENDENCIES
qSQUID()
update_progress()
numpy as np
matplotlib.pyplot as plt
INPUTS
nStep - number of steps to run in time series
tau - step size for time series
s - initial state vector [delta_1[theta=0],delta_2[theta=0]]
par - parameter vector
[alpha, beta_L, eta, rho, i, phia]
input parameter LIST - alpha, beta, eta, rho, phia
multiple values of input parameter as list
draws contour for each
if given, overwrites value in par
if not given, value from par is used for one contour
ONLY SUPPLY maximum of one input list here
OUTPUTS
plot - IV contours at levels given in input param array
output to screen
png 'IVdatetime.png' saved to parent directory
csv - IV contours at levels given
csv 'IVdatetime.png' saved to parent directory
"""
# create currents to sweep
i = np.arange(0.,6.,.1)
ch = 0 # check for only one parameter sweeped.
k = 1 # set 0 axis dim to 1 at min
md = int(0.1*len(i)) # cut of the first 10 percent of points in time series
# check if an array was given for an input parameter
# k - length of input parameter array (number of contours)
# parj - build a list of parameters to pass at each array value of that parameter
# la, lc - plot label and csv header lable
# lb - rename parameter array to add in plot and header later
# ti - plot title
# meta1 - csv metadata
# ch - check value, check for only one input parameter array, or none for one contour
if alpha != 0:
alpha = np.array(alpha)
k = len(alpha)
parj = np.zeros([k,9])
la = r'$\alpha$'; lc = 'alpha'
lb = np.copy(alpha)
ti = r'$\beta_L$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
# add input array values to iteration parameters as appropriate
for j in range(k):
parj[j,:] = np.array([alpha[j],par[1],par[2],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if betaL != 0:
betaL = np.array(betaL)
k = len(betaL)
parj = np.zeros([k,9])
la = r'$\beta_L$'; lc = 'betaL'
lb = np.copy(betaL)
ti = r'$\alpha$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[2],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],betaL[j],par[2],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if eta != 0:
eta = np.array(eta)
k = len(eta)
parj = np.zeros([k,9])
la = r'$\eta$'; lc = 'eta'
lb = np.copy(eta)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],eta[j],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if rho != 0:
rho = np.array(rho)
k = len(rho)
parj = np.zeros([k,9])
la = r'$\rho$'; lc = 'rho'
lb = np.copy(phia)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],rho[j],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if phia != 0:
phia = np.array(phia)
k = len(phia)
parj = np.zeros([k,9])
la = r'$\phi_a$'; lc = 'phi_a'
lb = np.copy(phia)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,phia[j],par[6],par[7],par[8]])
ch = ch + 1
if Gamma != 0:
Gamma = np.array(Gamma)
k = len(Gamma)
parj = np.zeros([k,9])
la = r'$\Gamma$'; lc = 'Gamma'
lb = np.copy(Gamma)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s, $\beta_C$=%s, $\kappa$=%s' % (par[0],par[1],par[2],par[3],par[5],par[7],par[8])
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],Gamma[j],par[7],par[8]])
ch = ch + 1
if betaC != 0:
betaC = np.array(betaC)
k = len(betaC)
parj = np.zeros([k,9])
la = r'$\beta_C$'; lc = 'betaC'
lb = np.copy(betaC)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\phi_a$=$s, $\Gamma$=%s, $\kappa$=%s' %(str(round(par[5],3)),str(round(par[6],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],par[6],betaC[j],par[8]])
ch = ch + 1
if kappa != 0:
kappa = np.array(kappa)
k = len(kappa)
parj = np.zeros([k,9])
la = r'$\kappa$'; lc = 'kappa'
lb = np.copy(kappa)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\phi_a$=$s, $\Gamma$=%s, $\beta_C$=%s' %(str(round(par[5],3)),str(round(par[6],3)),str(round(par[7],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],par[6],par[7],kappa[j]])
ch = ch + 1
# if check value is more than one, too many input parameter arrays given
if ch > 1:
return('Please supply at most one parameter to sweep')
# if check value zero, assume plotting only one contour
if ch == 0:
parj = np.zeros([2,9])
parj[0,:] = par
parj[1,:] = par
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+ '\n' + r'$\phi_a$=$s, $\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' % (str(round(par[5],3)),str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
# build sim output array of appropriate size
# needs as many rows as contours determined by input parameter array
if k > 1:
V = np.zeros([k,len(i)])
else:
V = np.zeros([2,len(i)])
# cp - check progress, total outputs in V
cp = k*len(i)
# loop over k rows and len(i) colums of V
# fill V with average voltage from time series for given params
# parjj - parameter array for this time series
# S - state array output from sim
for j in range(k):
parjj = parj[j,:]
for m in range(len(i)):
parjj[4] = i[m]
S = noisySQUID(nStep,tau,s,parjj)
V[j,m] = sum(S[6,md:])/len(S[6,md:])
# new progress bar current iter/total iters
update_progress((m + j*len(i))/cp)
# fill out progress bar
update_progress(1)
# build output for csv
# join i values and average Voltage matrix
Sf = np.concatenate((np.matrix(i),V),axis=0)
# flip independent axis, i, from horizontal to vertical
Sf = np.matrix.transpose(Sf)
# convert from matrix to array to ease csv output
Sf =
|
np.array(Sf)
|
numpy.array
|
import milk.unsupervised
import numpy as np
def test_nnmf():
def test3(method):
np.random.seed(8)
X3 = np.random.rand(20,3)
X = np.c_[ X3,
X3*2+np.random.rand(20,3)/20.,
-X3*2+
|
np.random.rand(20,3)
|
numpy.random.rand
|
import os
import sys
import logging
import math
import numpy as np
from threading import Thread
from multiprocessing import Process
from scipy.io.wavfile import read
from kaldi.matrix import Matrix, Vector, SubVector, DoubleVector
from kaldi.feat.mfcc import Mfcc, MfccOptions
from kaldi.feat.functions import compute_deltas, DeltaFeaturesOptions, sliding_window_cmn, SlidingWindowCmnOptions
from kaldi.ivector import compute_vad_energy, VadEnergyOptions, estimate_ivectors_online, \
IvectorExtractor, IvectorExtractorUtteranceStats, IvectorEstimationOptions
from kaldi.gmm import DiagGmm, FullGmm
from kaldi.hmm.posterior import total_posterior
from kaldi.util.io import xopen
LOG = logging.getLogger(__name__)
def kaldi_Matrix(mat):
_mat = Matrix(mat.num_rows, mat.num_cols)
_mat.add_mat_(1, mat)
return _mat
def make_feat_pipeline(base, sliding_opts,vad_opts, delta_opts=DeltaFeaturesOptions()):
def feat_pipeline(vec, freq):
feats = base.compute_features(vec, freq, 1.0)
voice = Vector(compute_vad_energy(vad_opts, feats)) # Use origin mfcc to computed
delta_feats = compute_deltas(delta_opts, feats)
sliding_feats = Matrix(delta_feats.num_rows, delta_feats.num_cols)
sliding_window_cmn(sliding_opts, delta_feats, sliding_feats)
if not voice.sum():
LOG.warning('No features were judged as voiced for utterance')
return False
dim = int(voice.sum())
voice_feats = Matrix(dim, delta_feats.num_cols)
feats = kaldi_Matrix(sliding_feats)
index = 0
for i, sub_vec in enumerate(feats):
if voice[i] != 0 and voice[i] == 1:
voice_feats.row(index).copy_row_from_mat_(feats, i)
index += 1
LOG.debug('Feats extract successed')
return voice_feats
return feat_pipeline
mfcc_opts = MfccOptions()
mfcc_opts.frame_opts.samp_freq = 16000
mfcc_opts.frame_opts.allow_downsample = True
mfcc_opts.mel_opts.num_bins = 40
mfcc_opts.num_ceps = 20
mfcc_opts.use_energy = True
mfcc = Mfcc(mfcc_opts)
sliding_opts = SlidingWindowCmnOptions()
sliding_opts.cmn_window = 300
sliding_opts.normalize_variance = False
sliding_opts.center = True
vad_opts = VadEnergyOptions()
vad_opts.vad_energy_threshold = 5.5
vad_opts.vad_energy_mean_scale = 0.5
delta_opts = DeltaFeaturesOptions()
delta_opts.window = 3
delta_opts.order = 2
feat_pipeline = make_feat_pipeline(mfcc, sliding_opts,vad_opts, delta_opts)
try:
LOG.info('Loading ubm...')
if not os.path.exists('app/extractor/final.ubm'):
LOG.error('Not Found extractor/final.ubm, please recheck file')
exit(1)
with xopen('app/extractor/final.ubm') as ki:
fgmm = FullGmm()
fgmm.read(ki.stream(), ki.binary)
gmm = DiagGmm()
gmm.copy_from_full(fgmm)
if not os.path.exists('app/extractor/final.ie'):
LOG.error('Not Found app/extractor/final.ie, please recheck file')
exit(1)
with xopen('app/extractor/final.ie') as ki:
extractor_ = IvectorExtractor()
extractor_.read(ki.stream(), ki.binary)
LOG.info('IvectorExtractor ready')
except Exception:
raise Exception
LOG.info('Loading ubm model successed')
def make_gmm_pipeline(gmm, fgmm):
def gmm_pipeline(feats, utt, min_post = 0.025):
gselect = gmm.gaussian_selection_matrix(feats, 20)[1]
num_frames = feats.num_rows
utt_ok = True
post = [ [] for i in range(num_frames) ]
tot_loglike = 0
for i in range(num_frames):
frame = SubVector(feats.row(i))
this_gselect = gselect[i]
log_likes = Vector(fgmm.log_likelihoods_preselect(frame, this_gselect))
tot_loglike += log_likes.apply_softmax_()
if(abs(log_likes.sum()-1.0) > 0.01):
utt_ok = False
else:
if min_post != 0:
max_index = log_likes.max_index()[1]
for x in range(log_likes.dim):
if log_likes[x] < min_post:
log_likes[x] = 0.0
if sum(log_likes) == 0:
log_likes[max_index] = 1.0
else:
log_likes.scale_(1.0/sum(log_likes))
for x in range(log_likes.dim):
if log_likes[x] != 0:
post[i].append((this_gselect[x], log_likes[x]))
if not utt_ok:
LOG.warning("Skipping utterance because bad posterior-sum encountered (NaN?)")
return False
else:
LOG.debug('Like/frame for utt {} was {} perframe over {} frames.'.format(utt, tot_loglike/num_frames, num_frames))
return post
return gmm_pipeline
gmm_pipeline = make_gmm_pipeline(gmm, fgmm)
def scale_posterior(scale, post):
if scale == 1.0:
return post
for i in range(len(post)):
if scale == 0.0:
post[i].clear()
else:
for x in range(len(post[i])):
post[i][j][1] *= scale
return post
tot_auxf_change = 0.0
tot_t = 0.0
need_2nd_order_stats = False
def make_ivector_pipeline(compute_objf_change = True, opts = IvectorEstimationOptions()):
def ivector_pipeline(wav, utt=None):
rate, vec = read(wav)
vec = Vector(vec)
feats = feat_pipeline(vec, rate)
try:
if utt is None:
utt = os.path.basename(wav).split('.')[0]
except Exception:
utt = 'None'
if not feats:
return False
post = gmm_pipeline(feats, utt)
if not post:
return False
global tot_auxf_change
global tot_t
auxf = tot_auxf_change if compute_objf_change else None
this_t = opts.acoustic_weight * total_posterior(post)
max_count_scale = 1.0
if (opts.max_count > 0 and this_t > opts.max_count):
max_count_scale = opts.max_count / this_t
LOG.info("Scaling stats for utterance {} by scale {} due to --max-count={}".format(utt,max_count_scale, opts.max_count))
this_t = opts.max_count
post = scale_posterior(opts.acoustic_weight * max_count_scale, post)
utt_stats = IvectorExtractorUtteranceStats.new_with_params(extractor_.num_gauss(), extractor_.feat_dim(), need_2nd_order_stats)
utt_stats.acc_stats(feats, post)
ivector_ = DoubleVector()
ivector_.resize_(extractor_.ivector_dim())
ivector_[0] = extractor_.prior_offset()
if auxf != None:
old_auxf = extractor_.get_auxf(utt_stats, ivector_)
extractor_.get_ivector_distribution(utt_stats, ivector_, None)
new_auxf = extractor_.get_auxf(utt_stats, ivector_)
auxf_change_ = new_auxf - old_auxf
else:
extractor_.get_ivector_distribution(utt_stats, ivector_, None)
if auxf != None:
T = total_posterior(post)
tot_auxf_change += auxf_change_
LOG.debug("Auxf change for utterance was {} per frame over {} frames (weighted)".format((auxf_change_/T), T))
ivector_[0] -= extractor_.prior_offset()
LOG.debug("Ivector norm for utterance {} was {}".format(utt ,ivector_.norm(2.0)))
tot_t += this_t
LOG.info("Ivector for utterance {} extract done".format(utt))
return ivector_.numpy()
return ivector_pipeline
ivector_pipeline = make_ivector_pipeline()
def thread_run(pair, num):
# pair is (speaker, filename)
_ivectors = []
_spks = []
for spk, filename in pair:
_ivectors.append(ivector_pipeline(filename))
_spks.append(spk)
np.save('ivector/data_{}'.format(num), _ivectors)
np.save('ivector/label_{}'.format(num), _spks)
if __name__ == '__main__':
thread_num = 4
pairs = []
for speaker in os.listdir('waves'):
for wav in os.listdir(os.path.join('waves', speaker)):
if os.path.splitext(wav)[1] == '.wav':
pairs.append((speaker ,os.path.join('waves', speaker, wav)))
processes = []
part = len(pairs) // thread_num
for x in range(1, thread_num+1):
if x == thread_num:
_process = Process(target=thread_run, args=(pairs[(x-1)*part:], x))
else:
_process = Process(target=thread_run, args=(pairs[(x-1)*part:x*part], x))
processes.append(_process)
_process.start()
for x in processes:
x.join()
print("all done")
ivectors =
|
np.array([])
|
numpy.array
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The OrthoSlicer class.
The main purpose of this class is to have auto adjust of axes size to
the data.
"""
import numpy as np
import pylab as pl
import matplotlib as mp
from matplotlib.transforms import Bbox
# Local imports
from .coord_tools import coord_transform, get_bounds, get_mask_bounds
################################################################################
# Bugware to have transparency work OK with MPL < .99.1
if mp.__version__ < '0.99.1':
# We wrap the lut as a callable and replace its evalution to put
# alpha to zero where the mask is true. This is what is done in
# MPL >= .99.1
from matplotlib import colors
class CMapProxy(colors.Colormap):
def __init__(self, lut):
self.__lut = lut
def __call__(self, arr, *args, **kwargs):
results = self.__lut(arr, *args, **kwargs)
if not isinstance(arr, np.ma.MaskedArray):
return results
else:
results[arr.mask, -1] = 0
return results
def __getattr__(self, attr):
# Dark magic: we are delegating any call to the lut instance
# we wrap
return self.__dict__.get(attr, getattr(self.__lut, attr))
################################################################################
# class OrthoSlicer
################################################################################
class OrthoSlicer(object):
""" A class to create 3 linked axes for plotting orthogonal
cuts of 3D maps.
Attributes
----------
axes: dictionnary of axes
The 3 axes used to plot each view.
frame_axes: axes
The axes framing the whole set of views.
Notes
-----
The extent of the different axes are adjusted to fit the data
best in the viewing area.
"""
def __init__(self, cut_coords, axes=None):
""" Create 3 linked axes for plotting orthogonal cuts.
Parameters
----------
cut_coords: 3 tuple of ints
The cut position, in world space.
axes: matplotlib axes object, optional
The axes that will be subdivided in 3.
"""
self._cut_coords = cut_coords
if axes is None:
axes = pl.axes((0., 0., 1., 1.))
axes.axis('off')
self.frame_axes = axes
axes.set_zorder(1)
bb = axes.get_position()
self.rect = (bb.x0, bb.y0, bb.x1, bb.y1)
self._object_bounds = dict()
# Create our axes:
self.axes = dict()
for index, name in enumerate(('x', 'y', 'z')):
ax = pl.axes([0.3*index, 0, .3, 1])
ax.axis('off')
self.axes[name] = ax
ax.set_axes_locator(self._locator)
self._object_bounds[ax] = list()
def _get_object_bounds(self, ax):
""" Return the bounds of the objects on one axes.
"""
xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds[ax]).T
xmax = max(xmaxs.max(), xmins.max())
xmin = min(xmins.min(), xmaxs.min())
ymax = max(ymaxs.max(), ymins.max())
ymin = min(ymins.min(), ymaxs.min())
return xmin, xmax, ymin, ymax
def _locator(self, axes, renderer):
""" The locator function used by matplotlib to position axes.
Here we put the logic used to adjust the size of the axes.
"""
x0, y0, x1, y1 = self.rect
width_dict = dict()
ax_dict = self.axes
x_ax = ax_dict['x']
y_ax = ax_dict['y']
z_ax = ax_dict['z']
for ax in ax_dict.itervalues():
xmin, xmax, ymin, ymax = self._get_object_bounds(ax)
width_dict[ax] = (xmax - xmin)
total_width = float(sum(width_dict.values()))
for ax, width in width_dict.iteritems():
width_dict[ax] = width/total_width*(x1 -x0)
left_dict = dict()
left_dict[x_ax] = x0
left_dict[y_ax] = x0 + width_dict[x_ax]
left_dict[z_ax] = x0 + width_dict[x_ax] + width_dict[y_ax]
return Bbox([[left_dict[axes], 0],
[left_dict[axes] + width_dict[axes], 1]])
def draw_cross(self, cut_coords=None, **kwargs):
""" Draw a crossbar on the plot to show where the cut is
performed.
Parameters
----------
cut_coords: 3-tuple of floats, optional
The position of the cross to draw. If none is passed, the
ortho_slicer's cut coordinnates are used.
kwargs:
Extra keyword arguments are passed to axhline
"""
if cut_coords is None:
cut_coords = self._cut_coords
x, y, z = cut_coords
ax = self.axes['x']
ax.axvline(x, ymin=.05, ymax=.95, **kwargs)
ax.axhline(z, **kwargs)
ax = self.axes['y']
xmin, xmax, ymin, ymax = self._get_object_bounds(ax)
ax.axvline(y, ymin=.05, ymax=.95, **kwargs)
ax.axhline(z, xmax=.95, **kwargs)
ax = self.axes['z']
ax.axvline(x, ymin=.05, ymax=.95, **kwargs)
ax.axhline(y, **kwargs)
def annotate(self, left_right=True, positions=True, size=12, **kwargs):
""" Add annotations to the plot.
Parameters
----------
left_right: boolean, optional
If left_right is True, annotations indicating which side
is left and which side is right are drawn.
positions: boolean, optional
If positions is True, annotations indicating the
positions of the cuts are drawn.
size: integer, optional
The size of the text used.
kwargs:
Extra keyword arguments are passed to matplotlib's text
function.
"""
if left_right:
ax_z = self.axes['z']
ax_z.text(.1, .95, 'L',
transform=ax_z.transAxes,
horizontalalignment='left',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec="1", fc="1", alpha=.9),
**kwargs)
ax_z.text(.9, .95, 'R',
transform=ax_z.transAxes,
horizontalalignment='right',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec="1", fc="1", alpha=.9),
**kwargs)
ax_x = self.axes['x']
ax_x.text(.1, .95, 'L',
transform=ax_x.transAxes,
horizontalalignment='left',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec="1", fc="1", alpha=.9),
**kwargs)
ax_x.text(.9, .95, 'R',
transform=ax_x.transAxes,
horizontalalignment='right',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec="1", fc="1", alpha=.9),
**kwargs)
if positions:
x, y, z = self._cut_coords
ax_x.text(0, 0, 'y=%i' % y,
transform=ax_x.transAxes,
horizontalalignment='left',
verticalalignment='bottom',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec="1", fc="1", alpha=.9),
**kwargs)
ax_y = self.axes['y']
ax_y.text(0, 0, 'x=%i' % x,
transform=ax_y.transAxes,
horizontalalignment='left',
verticalalignment='bottom',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec="1", fc="1", alpha=.9),
**kwargs)
ax_z.text(0, 0, 'z=%i' % z,
transform=ax_z.transAxes,
horizontalalignment='left',
verticalalignment='bottom',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec="1", fc="1", alpha=.9),
**kwargs)
def title(self, text, x=0.01, y=0.99, size=15, color='w',
bgcolor='k', alpha=.9, **kwargs):
""" Write a title to the view.
Parameters
----------
text: string
The text of the title
x: float, optional
The horizontal position of the title on the frame in
fraction of the frame width.
y: float, optional
The vertical position of the title on the frame in
fraction of the frame height.
size: integer, optional
The size of the title text.
color: matplotlib color specifier, optional
The color of the font of the title.
bgcolor: matplotlib color specifier, optional
The color of the background of the title.
alpha: float, optional
The alpha value for the background.
kwargs:
Extra keyword arguments are passed to matplotlib's text
function.
"""
self.frame_axes.text(x, y, text,
transform=self.frame_axes.transAxes,
horizontalalignment='left',
verticalalignment='top',
size=size, color=color,
bbox=dict(boxstyle="square,pad=.3",
ec=bgcolor, fc=bgcolor, alpha=alpha),
**kwargs)
def plot_map(self, map, affine, **kwargs):
""" Plot a 3D map in all the views.
Parameters
-----------
map: 3D ndarray
The 3D map to be plotted. If it is a masked array, only
the non-masked part will be plotted.
affine: 4x4 ndarray
The affine matrix giving the transformation from voxel
indices to world space.
kwargs:
Extra keyword arguments are passed to imshow.
"""
if mp.__version__ < '0.99.1':
cmap = kwargs.get('cmap',
pl.cm.cmap_d[pl.rcParams['image.cmap']])
kwargs['cmap'] = CMapProxy(cmap)
x, y, z = self._cut_coords
x_map, y_map, z_map = [int(round(c)) for c in
coord_transform(x, y, z, np.linalg.inv(affine))]
(xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(map.shape, affine)
xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \
xmin, xmax, ymin, ymax, zmin, zmax
if hasattr(map, 'mask'):
not_mask = np.logical_not(map.mask)
xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \
get_mask_bounds(not_mask, affine)
if kwargs.get('vmin') is None and kwargs.get('vmax') is None:
# Avoid dealing with masked arrays: they are slow
masked_map = np.asarray(map)[not_mask]
if kwargs.get('vmin') is None:
kwargs['vmin'] = masked_map.min()
if kwargs.get('max') is None:
kwargs['vmax'] = masked_map.max()
else:
if not 'vmin' in kwargs:
kwargs['vmin'] = map.min()
if not 'vmax' in kwargs:
kwargs['vmax'] = map.max()
ax = self.axes['x']
ax.imshow(np.rot90(map[:, y_map, :]),
extent=(xmin, xmax, zmin, zmax),
**kwargs)
self._object_bounds[ax].append((xmin_, xmax_, zmin_, zmax_))
ax.axis(self._get_object_bounds(ax))
ax = self.axes['y']
ax.imshow(np.rot90(map[x_map, :, :]),
extent=(ymin, ymax, zmin, zmax),
**kwargs)
self._object_bounds[ax].append((ymin_, ymax_, zmin_, zmax_))
ax.axis(self._get_object_bounds(ax))
ax = self.axes['z']
ax.imshow(
|
np.rot90(map[:, :, z_map])
|
numpy.rot90
|
#!/usr/bin/env python3
import glob
import os
import sys
import random
import time
import sys
import numpy as np
import cv2
import math
from collections import deque
import tensorflow as tf
# from keras.applications.xception import Xception
from keras.layers import Dense, GlobalAveragePooling2D, Flatten
from tensorflow.keras.optimizers import Adam
from keras.models import Model
from keras.callbacks import TensorBoard
import tensorflow.keras.backend as backend
from threading import Thread
from tqdm import tqdm
import matplotlib.pyplot as plt
"Starting script for any carla programming"
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
IM_WIDTH = 640
IM_HEIGHT = 480
SECONDS_PER_EPISODE = 20
REPLAY_MEMORY_SIZE = 5_000
MIN_REPLAY_MEMORY_SIZE = 1_000
MINIBATCH_SIZE = 16
PREDICTION_BATCH_SIZE = 1
TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4
UPDATE_TARGET_EVERY = 5 #used to be 10
MODEL_NAME = "Xception"
MEMORY_FRACTION = 0.8
MIN_REWARD = -200
EPISODES = 1000
DISCOUNT = 0.99
epsilon = 1
EPSILON_DECAY = 0.995 ## 0.9975 99975
MIN_EPSILON = 0.001
AGGREGATE_STATS_EVERY = 5 ## checking per 5 episodes
SHOW_PREVIEW = True ## for debugging purpose
class CarEnv:
SHOW_CAM = SHOW_PREVIEW
STEER_AMT = 1.0 ## full turn for every single time
im_width = IM_WIDTH
im_height = IM_HEIGHT
front_camera = None
def __init__(self):
self.client = carla.Client('127.0.0.1', 2000)
self.client.set_timeout(2.0)
# self.actor = carla.Actor
self.world = self.client.load_world('Town04')
self.map = self.world.get_map() ## added for map creating
self.blueprint_library = self.world.get_blueprint_library()
# weather = carla.WeatherParameters(
# cloudyness=10.0,
# precipitation=10.0,
# sun_altitude_angle=90.0)
# self.world.set_weather(weather)
self.model_3 = self.blueprint_library.filter("model3")[0] ## grab tesla model3 from library
def reset(self):
self.collision_hist = []
self.actor_list = []
self.waypoints = self.client.get_world().get_map().generate_waypoints(distance=3.0)
self.filtered_waypoints = [] ## chaned
i = 0
for self.waypoint in self.waypoints:
if(self.waypoint.road_id == 10):
self.filtered_waypoints.append(self.waypoint)
for i in range(len(self.filtered_waypoints)):
self.world.debug.draw_string(self.filtered_waypoints[i].transform.location, 'O', draw_shadow=False,
color=carla.Color(r=0, g=255, b=0), life_time=40,
persistent_lines=True)
i = i+1
self.spawn_point = self.filtered_waypoints[1].transform
self.spawn_point.location.z += 2
self.vehicle = self.world.spawn_actor(self.model_3, self.spawn_point) ## changed for adding waypoints
# self.spawn_points = self.map.get_spawn_points()
# self.vehicle = self.world.spawn_actor(self.model_3, self.spawn_points) ## changed for adding waypoints
# self.waypoint = self.map.get_waypoint(self.vehicle.get_location())
# self.vehicle.set_simulate_physics(False)
# self.world.debug.draw_string(self.waypoint.transform.location, 'O', draw_shadow=False,
# color=carla.Color(r=0, g=255, b=0), life_time=40,
# persistent_lines=True)
# while True:
# # Find next waypoint 2 meters ahead.
# self.waypoint = random.choice(self.waypoint.next(20.0))
# # Teleport the vehicle.
# self.vehicle.set_transform(self.waypoint.transform)
# self.transform = random.choice(self.world.get_map().get_spawn_points())
# self.vehicle = self.world.spawn_actor(self.model_3, self.transform)
self.actor_list.append(self.vehicle)
self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')
self.rgb_cam.set_attribute("image_size_x", f"{self.im_width}")
self.rgb_cam.set_attribute("image_size_y", f"{self.im_height}")
self.rgb_cam.set_attribute("fov", f"110") ## fov, field of view
transform = carla.Transform(carla.Location(x=2.5, z=0.7))
self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle)
self.actor_list.append(self.sensor)
self.sensor.listen(lambda data: self.process_img(data))
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0)) # initially passing some commands seems to help with time. Not sure why.
time.sleep(4) # sleep to get things started and to not detect a collision when the car spawns/falls from sky.
colsensor = self.world.get_blueprint_library().find('sensor.other.collision')
self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle)
self.actor_list.append(self.colsensor)
self.colsensor.listen(lambda event: self.collision_data(event))
while self.front_camera is None: ## return the observation
time.sleep(0.01)
self.episode_start = time.time()
self.vehicle.apply_control(carla.VehicleControl(brake=0.0, throttle=0.0))
return self.front_camera
def collision_data(self, event):
self.collision_hist.append(event)
def process_img(self, image):
i = np.array(image.raw_data)
#np.save("iout.npy", i)
i2 = i.reshape((self.im_height, self.im_width, 4))
i3 = i2[:, :, :3]
if self.SHOW_CAM:
cv2.imshow("",i3)
cv2.waitKey(1)
self.front_camera = i3 ## remember to scale this down between 0 and 1 for CNN input purpose
def step(self, action):
'''
For now let's just pass steer left, straight, right
0, 1, 2
'''
if action == 0:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer= 0.0 ))
if action == 1:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1.0*self.STEER_AMT))
if action == 2:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1.0*self.STEER_AMT))
v = self.vehicle.get_velocity()
kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))
# if len(self.collision_hist) != 0:
# done = True
# reward = -200
# elif kmh < 50:
# done = False
# reward = -1
# elif carla.Location.distance(self, self.waypoint) == 0:
# done = False
# reward = 150
# else:
# done = False
# reward = 10
# if self.episode_start + SECONDS_PER_EPISODE < time.time(): ## when to stop
# done = True
# return self.front_camera, reward, done, None
i = 2
for i in range(2, len(self.filtered_waypoints)):
if len(self.collision_hist) != 0:
done = True
reward = -300
elif kmh < 30:
done = False
reward = -5
elif carla.Location.distance(carla.Actor.get_location(self.actor_list[0]), self.filtered_waypoints[i].transform.location) == 0:
done = False
reward = 25
else:
done = False
reward = 30
i = i + 1
if self.episode_start + SECONDS_PER_EPISODE < time.time(): ## when to stop
done = True
return self.front_camera, reward, done, None
class DQNAgent:
def __init__(self):
## replay_memory is used to remember the sized previous actions, and then fit our model of this amout of memory by doing random sampling
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE) ## batch step
self.tensorboard = ModifiedTensorBoard(log_dir=f"logs/{MODEL_NAME}-{int(time.time())}")
self.target_update_counter = 0 # will track when it's time to update the target model
self.model = self.create_model()
## target model (this is what we .predict against every step)
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
self.terminate = False # Should we quit?
self.last_logged_episode = 0
self.training_initialized = False # waiting for TF to get rolling
def create_model(self):
## input: RGB data, should be normalized when coming into CNN
# base_model = tf.keras.applications.Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3))
# x = base_model.output
# x = GlobalAveragePooling2D()(x)
# x = Flatten()(x)
# predictions = Dense(3, activation="linear")(x) ## output layer include three nuros, representing three actions
# model = Model(inputs=base_model.input, outputs=predictions)
# model.compile(loss="mse", optimizer="Adam", metrics=["accuracy"]) ## changed
# return model
base_model = tf.keras.applications.ResNet50(weights='imagenet', include_top=False, input_shape=(480, 640, 3))
base_model.trainable = False
# Additional Linear Layers
inputs = tf.keras.Input(shape=(480, 640, 3))
x = base_model(inputs, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(units=40, activation='relu')(x)
output = tf.keras.layers.Dense(units=3, activation='linear')(x)
# Compile the Model
model = tf.keras.Model(inputs, output)
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(learning_rate=0.001))
print(model.summary)
return model
## function handler
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)= (current_state, action, reward, new_state, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
def train(self):
## starting training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
## if we do have the proper amount of data to train, we need to randomly select the data we want to train off from our memory
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
## get current states from minibatch and then get Q values from NN model
## transition is being defined by this: transition = (current_state, action, reward, new_state, done)
current_states = np.array([transition[0] for transition in minibatch])/255
## This is the crazyly changed model:
current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE) ## changed
## This is normal model
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)
## image data(normalized RGB data): input
X = []
## action we take(Q values): output
y = []
## calculate Q values for the next step based on Qnew equation
## index = step
for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
current_qs = current_qs_list[index]
current_qs[action] = new_q ## Q for the action that we took is now equal to the new Q value
X.append(current_state) ## image we have
y.append(current_qs) ## Q value we have
## only trying to log per episode, not actual training step, so we're going to use the below to keep track
log_this_step = False
if self.tensorboard.step > self.last_logged_episode:
log_this_step = True
self.last_log_episode = self.tensorboard.step
## fit our model
## setting the tensorboard callback, only if log_this_step is true. If it's false, then we'll still fit, we just wont log to TensorBoard.
self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)
## updating to determine if we want to update target_model
if log_this_step:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def get_qs(self, state):
q_out = self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
return q_out
## first to train to some nonsense. just need to get a quicl fitment because the first training and predication is slow
def train_in_loop(self):
X = np.random.uniform(size=(1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32)
y =
|
np.random.uniform(size=(1, 3))
|
numpy.random.uniform
|
from gym import Env
from gym import spaces
from gym.utils import seeding
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
from datetime import datetime
import json
from pathlib import Path, PurePath
import config
class valueTradingEnv(Env):
"""
Inherits from gym.Env. It is a stock trading Environment
params:
- df (pandas DataFrame): DF with 'date' and 'symbol' set as multilevel index and at least opening-, closing-prices and 0/1 column if stock is tradeable (1 is tradable) as first, second and third column. IMPORTANT: Open, Close and tradeable always has to be as first, second and third columns respectively for taking actions and calculating reward.
- sample (bool): Should the env sample the trading period (episode) out of df in length of yearrange.
- yearrange (int): How many years will one episode take? DF has to be at least this range.
- cagr (boo): Should reward be calculated by CAGR=
- episodic (bool): Will the Agent get sparse rewards (only on end of episode).
...
returns:
A gym.Env object.
"""
metadata = {'render.modes': "human"}
def __init__(self, df: pd.DataFrame, sample: bool=False, episodic: bool=False,
save_path: Path=None, yearrange: int=4, cagr: bool=False, seeding: bool=None):
# self variables
self.df = df
self.sample = sample
self.seeding = seeding
self.episodic = episodic
self.save_path = save_path
self.yearrange = yearrange
self.cagr = cagr
self.df_dt_filter = self.df.index.get_level_values(level="date")
self.indicators = self.df.columns.tolist()
self.init_time = datetime.now().strftime('%H-%M-%S-%f')
self.num_symbols = len(self.df.index.get_level_values(level="symbol").unique().tolist())
self.num_eps = 0
self.fee = config.TRADE_FEE_PRCT
self.scaling = config.ACTION_SCALING
self.init_cash = config.INIT_CASH
# Vars not yet set
self.cost = None
self.data = None
self.data_dt_filter = None
self.data_dt_unique = None
self.date = None
self.date_idx = None
self.done = None
self.end_date = None
self.episode_totalValues = None
self.info = None
self.new_state = None
self.state = None
# If we want to sample during training, we have to seed the RNG
if self.sample:
assert not isinstance(self.seeding, type(None)), "If you want to sample, you have to set a seeding."
self.seed(seed=self.seeding)
# Spaces
self.action_space = spaces.Box(low = -1, high = 1,shape = (self.num_symbols,))
obs_shape = 1 + self.num_symbols + (len(self.indicators) * self.num_symbols)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (obs_shape,))
# Create env_info save path
if not isinstance(self.save_path, type(None)):
assert isinstance(self.save_path, PurePath), "save_path is no pathlib.Path object."
self.save_path.mkdir(parents=True, exist_ok=True)
def _sell_stock(self, num, index):
# Are we selling less or equal num of stocks we have?
if self.new_state[1+index] >= num:
# get open price of stock to calculate amount
price = self.new_state[1+self.num_symbols+index]
amount = price * num
# calculate cost
self.cost[index] = amount * self.fee
# put the stock from portfolio
self.new_state[1+index] -= num
# recalculate the cash
self.new_state[0] += (amount - self.cost[index])
else:
pass
def _buy_stock(self, num, index):
# get open price of stock
price = self.new_state[1+self.num_symbols+index]
amount = price * num
# calculate cost
self.cost[index] = amount * self.fee
# Check if we have enough cash
if self.new_state[0] >= amount+self.cost[index]:
# call the stock into portfolio
self.new_state[1+index] += num
# update the cash
self.new_state[0] -= (amount + self.cost[index])
else:
pass
def _get_time_range(self):
# get all unique dates in df
dates = self.df_dt_filter.unique()
if self.sample:
# set max end date to 4 years befor max date
sample_end = dates.max() - relativedelta(years=self.yearrange)
sample_begin = dates.min()
# throw away all dates out of begin and end
dates = dates[dates.slice_indexer(sample_begin, sample_end)].tolist()
# sample start date randomly out of possible dates
start_date = self.np_random.choice(dates)
# set end date num-yrs minus 1day relative to start date
end_date = start_date + relativedelta(years=self.yearrange,days=-1)
else: # If we are not in train environment
# Set start date and end date to min and max of df respectively
start_date = dates.min()
end_date = dates.max()
return (start_date, end_date)
def step(self, action):
"""
Run one timestep of the environment's dynamics.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the agent
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
self.cost = [0]*self.num_symbols
# Skale action by item
action = np.array([int(item*self.scaling) for item in action])
# real_action = action.copy() # save for info
# count date one day ahead
self.date_idx += 1
self.date = self.data_dt_unique[self.date_idx]
# Check done conditions
# Is date equal to end_date?
if self.date == self.end_date:
self.done = True
# set up new state based on current state
# We manipulate the portfolios cash and number of shares in new_state when buying and selling
# we need the old portfolio balance to calculate the reward
self.new_state = [self.state[0]] + \
self.state[1:(1+self.num_symbols)] + \
[item for indicator in self.indicators for item in self.data[self.data_dt_filter == self.date][indicator].values.tolist()]
# When tradeable is 0 Set action of stock to 0
# Because it is not tradable at this state
for idx in range(len(action)):
tradeable = self.new_state[1+self.num_symbols*3+idx]
if tradeable == 0:
action[idx] = 0
# return sorted actions indices in sorted order (lowest first)
argsort_actions = np.argsort(action)
# get indices of sell actions
sell_indices = argsort_actions[:np.where(action < 0)[0].shape[0]]
# get indices of buy actions
buy_indices = argsort_actions[::-1][:np.where(action > 0)[0].shape[0]]
# perform each sell action
for idx in sell_indices:
self._sell_stock(int(action[idx]*-1), idx)
# perform each buy action
for idx in buy_indices:
self._buy_stock(int(action[idx]), idx)
# Is the cash lower than x% of init_cash?
# If we set this to 0.0 we allow to don't hold cash anyway
if self.new_state[0] < self.init_cash*0:
self.done = True
### calculate reward
# We calculate the total value from the closing price
self.episode_totalValues[self.date_idx] = self.new_state[0] + \
sum(
|
np.array(self.new_state[1:(1+self.num_symbols)])
|
numpy.array
|
"""
Various helping functions. Plotting, saving/loading results, creating synt
cubes, image processing.
"""
from __future__ import division
from __future__ import print_function
__all__ = ['plot_traindata',
'save_res',
'load_res',
'load_model',
'svd_decomp',
'get_cumexpvar',
'frame_shift',
'inspect_patch_multik',
'normalize_01',
'get_indices_annulus',
'create_synt_cube',
'create_feature_matrix']
import torch
import numpy as np
from keras import models
from skimage.draw import circle
from vip_hci.preproc import cube_derotate, cube_crop_frames, cube_derotate
from vip_hci.preproc import frame_shift, frame_crop
from vip_hci.phot import noise_per_annulus, cube_inject_companions
from vip_hci.var import pp_subplots as plots
from vip_hci.var import reshape_matrix, prepare_matrix, frame_center
from vip_hci.pca import pca, svd_wrapper, randomized_svd_gpu
from matplotlib.pyplot import (figure, show, subplot, title)
import cv2
import pickle
from matplotlib.pyplot import hist as histogram
import cupy
def normalize_01(array, mode='slice'):
"""
"""
n1, n2, n3, n4 = array.shape
array = array.copy()
if mode == 'slice':
array_reshaped = array.reshape(n1 * n2, n3 * n4)
elif mode == 'sample':
array_reshaped = array.reshape(n1, n2 * n3 * n4)
else:
raise RuntimeError('Normalization mode not recognized')
minvec = np.abs(np.min(array_reshaped, axis=1))
array_reshaped += minvec[:, np.newaxis]
maxvec = np.max(array_reshaped, axis=1)
array_reshaped /= maxvec[:, np.newaxis]
return array_reshaped.reshape(n1,n2,n3,n4)
def plot_traindata(T, zeroind=None, oneind=None, full_info=False,
plot_pair=True, dpi=100, indices=None, save_plot=False):
"""
"""
xarr = T.x
yarr = T.y
if 'xnor' in T:
xarrn = T.xnor
if zeroind is None:
zeroind = np.random.randint(0,xarr.shape[0]/2.)
if oneind is None:
oneind = np.random.randint(xarr.shape[0]/2.,xarr.shape[0])
if full_info:
msg1 = 'N samples : {} | Runtime : {}'
print(msg1.format(T.nsamp, T.runtime))
msg2 = 'FWHM : {} | PLSC : {} | K list : {}'
print(msg2.format(T.fwhm, T.plsc, T.klist))
msg3 = 'In Rad : {} | Out Rad : {} | Patch size : {}'
print(msg3.format(T.inrad, T.outrad, T.sizepatch))
msg4 = 'Collapse func : {} | Scaling : {}'
print(msg4.format(T.collaf.__name__, T.scaling))
msg5 = 'N patches : {} | Perc orig zeros : {}'
print(msg5.format(T.npatches, T.perorigzeros))
msg6 = 'Flux distro : {} | Par1 : {} | Par2 : {}'
print(msg6.format(T.fluxdistro, T.fluxdistrop1, T.fluxdistrop2))
msg7 = 'N injections : {} | Perc aug ones : {}'
print(msg7.format(T.nsamp*0.5*T.peraugones, T.peraugones))
msg8 = 'Aug shifts : {} | Aug range rotat : {}'
print(msg8.format(T.shifts, T.rangerot))
figure(figsize=(12,2))
subplot(1, 3, 1)
_ = histogram(T.fluxes, bins=int(np.sqrt(T.fluxes.shape[0])))
title('Fluxes histogram')
subplot(1, 3, 2)
_ = histogram(np.array(T.dists).flatten(),
bins=int(np.sqrt(T.fluxes.shape[0])))
title('Distances histogram')
subplot(1, 3, 3)
_ = histogram(np.array(T.thetas).flatten(),
bins=int(np.sqrt(T.fluxes.shape[0])))
title('Thetas histogram')
show()
print()
npatches = xarr[zeroind].shape[0]
if plot_pair or save_plot:
if indices is not None:
zerarr = xarr[zeroind][indices]
onearr = xarr[oneind][indices]
if xarrn is not None: zerarrn = xarrn[zeroind][indices]
if xarrn is not None: onearrn = xarrn[oneind][indices]
else:
zerarr = xarr[zeroind]
onearr = xarr[oneind]
if xarrn is not None: zerarrn = xarrn[zeroind]
if xarrn is not None: onearrn = xarrn[oneind]
if save_plot:
print('{} | Sample {}'.format(int(yarr[zeroind]), zeroind))
plots(zerarr, dpi=dpi, axis=False, vmin=xarr[zeroind].min(),
vmax=xarr[zeroind].max(), save='patch_zero.pdf', colorb=False,
maxplots=npatches, horsp=0.1)
if xarrn is not None:
plots(zerarrn, axis=False, dpi=dpi, colorb=False,
save='patch_zero_nor.pdf', maxplots=npatches, horsp=0.1)
print(int(yarr[oneind]),'| Sample', oneind)
plots(onearr, axis=False, vmin=xarr[oneind].min(),
vmax=xarr[oneind].max(), dpi=dpi, save='patch_one.pdf',
colorb=False, maxplots=npatches, horsp=0.1)
if xarrn is not None:
plots(onearr, axis=False, dpi=dpi, horsp=0.1,
save='patch_one_nor.pdf', colorb=False, maxplots=npatches)
else:
plots(zerarr, title='Unnormalized ZERO multiK patch', dpi=dpi,
axis=False, vmin=xarr[zeroind].min(), vmax=xarr[zeroind].max(),
maxplots=npatches, horsp=0.1)
if xarrn is not None:
plots(zerarrn, title='Normalized ZERO multiK patch',
axis=False, dpi=dpi, maxplots=npatches, horsp=0.1)
plots(onearr, title='Unnormalized ONE multiK patch', axis=False,
vmin=xarr[oneind].min(), vmax=xarr[oneind].max(), dpi=dpi,
maxplots=npatches, horsp=0.1)
if xarrn is not None:
plots(onearrn, title='Normalized ONE multiK patch',
axis=False, dpi=dpi, maxplots=npatches, horsp=0.1)
def save_res(filename, data):
pickle.dump(data, open(filename, "wb"))
def load_res(filename):
out = pickle.load(open(filename, "rb"), encoding='latin1')
return out
def load_model(filename):
return models.load_model(filename)
def svd_decomp(cube, angle_list, size_patch, inrad, outrad, sca, k_list,
collapse_func, neg_ang=True, lr_mode='eigen', nproc=1,
interp='nearneig', verbose=False):
"""
"""
frsize = int(cube.shape[1])
n_frames = cube.shape[0]
if n_frames>1000:
ind_for_svd = range(0,n_frames,2)
else:
ind_for_svd = range(0,n_frames)
ann_width = outrad-inrad
cent_ann = inrad + int(np.round(ann_width/2.))
ann_width += size_patch+2
matrix, annind = prepare_matrix(cube, sca, None, mode='annular',
annulus_radius=cent_ann,
annulus_width=ann_width, verbose=False)
matrix_svd, _ = prepare_matrix(cube[ind_for_svd], sca, None, mode='annular',
annulus_radius=cent_ann,
annulus_width=ann_width, verbose=False)
V = svd_wrapper(matrix_svd, lr_mode, k_list[-1], False, False, to_numpy=False)
if verbose:
print("SVD done")
cube_residuals = []
if neg_ang:
cube_residuals2 = []
for k in k_list:
if lr_mode in ['cupy', 'randcupy', 'eigencupy']:
matrix = cupy.array(matrix)
transformed = cupy.dot(V[:k], matrix.T)
reconstructed = cupy.dot(transformed.T, V[:k])
residuals_ann = matrix - reconstructed
residuals_ann = cupy.asnumpy(residuals_ann)
elif lr_mode in ['pytorch', 'randpytorch', 'eigenpytorch']:
matrix = matrix.astype('float32')
matrix_gpu = torch.Tensor.cuda(torch.from_numpy(matrix))
transformed = torch.mm(V[:k], torch.transpose(matrix_gpu, 0, 1))
reconstructed = torch.mm(torch.transpose(transformed, 0, 1), V[:k])
residuals_ann = matrix_gpu - reconstructed
else:
transformed = np.dot(V[:k], matrix.T)
reconstructed = np.dot(transformed.T, V[:k])
residuals_ann = matrix - reconstructed
# This is a bottleneck when nframes grows. The cube derotation is not
# very efficient in parallel.
residual_frames = np.zeros_like(cube)
residual_frames[:,annind[0],annind[1]] = residuals_ann
residual_frames_rot = cube_derotate(residual_frames, angle_list,
nproc=nproc, interpolation=interp)
cube_residuals.append(collapse_func(residual_frames_rot, axis=0))
if neg_ang:
residual_frames_rot_neg = cube_derotate(residual_frames,
-angle_list, nproc=nproc,
interpolation=interp)
cube_residuals2.append(collapse_func(residual_frames_rot_neg, axis=0))
if neg_ang:
return cube_residuals, cube_residuals2
else:
return cube_residuals
def get_cumexpvar(cube, expvar_mode, inrad, outrad, size_patch, k_list=None,
verbose=True):
"""
"""
n_frames = cube.shape[0]
if n_frames>1000:
ind_for_svd = range(0,n_frames,2)
else:
ind_for_svd = range(0,n_frames)
ann_width = outrad-inrad
cent_ann = inrad + int(np.round(ann_width/2.))
ann_width += size_patch+2
if expvar_mode=='annular':
matrix_svd, _ = prepare_matrix(cube[ind_for_svd], 'temp-mean', None,
mode=expvar_mode, annulus_radius=cent_ann,
annulus_width=ann_width, verbose=False)
U, S, V = svd_wrapper(matrix_svd, 'lapack',
min(matrix_svd.shape[0], matrix_svd.shape[1]),
False, False, True)
elif expvar_mode=='fullfr':
matrix_svd = prepare_matrix(cube[ind_for_svd], 'temp-mean', None,
mode=expvar_mode, verbose=False)
U,S,V = svd_wrapper(matrix_svd, 'lapack', n_frames, False, False, True)
exp_var = (S ** 2) / (S.shape[0] - 1)
full_var = np.sum(exp_var)
explained_variance_ratio = exp_var / full_var # % of variance explained by each PC
ratio_cumsum = np.cumsum(explained_variance_ratio)
if k_list is not None:
ratio_cumsum_klist = []
for k in k_list:
ratio_cumsum_klist.append(ratio_cumsum[k-1])
if verbose:
print("SVD on input matrix (annulus from cube)")
print(" Number of PCs :\t")
print(" ",k_list)
print(" Cum. explained variance ratios :\t")
print(" ",str(["{0:0.2f}".format(i) for i in ratio_cumsum_klist]).replace("'", ""), "\n")
else:
ratio_cumsum_klist = ratio_cumsum
return ratio_cumsum, ratio_cumsum_klist
def frame_shift(array, shift_y, shift_x, interpolation='bicubic'):
""" Shifts an 2d array by shift_y, shift_x.
"""
if not array.ndim == 2:
raise TypeError ('Input array is not a frame or 2d array')
image = array.copy()
if interpolation == 'bilinear':
intp = cv2.INTER_LINEAR
elif interpolation == 'bicubic':
intp= cv2.INTER_CUBIC
elif interpolation == 'nearneig':
intp = cv2.INTER_NEAREST
else:
raise TypeError('Interpolation method not recognized.')
image = np.float32(image)
y, x = image.shape
M = np.float32([[1,0,shift_x],[0,1,shift_y]])
array_shifted = cv2.warpAffine(image, M, (x,y), flags=intp,
borderMode=cv2.BORDER_REPLICATE)
return array_shifted
def inspect_patch_multik(model, cube, angle_list, k_list=[1,2,3,5,10,20,50,100],
inrad=10, outrad=14, size_patch=11, xy=(0,0),
scaling='temp-standard', collapse_func=np.mean,
normalize='slice', plot=True, dpi=70, psf=None):
"""
"""
n_frames = cube.shape[0]
frsize = int(cube.shape[1])
if hasattr(model, 'base_estimator'):
mode = 'rf'
elif hasattr(model, 'name'):
mode = 'nn'
else:
raise RuntimeError('Model not recognized')
im_zeros = np.zeros_like(cube[0])
im_zeros[xy[1],xy[0]] = 1
cube_residuals = svd_decomp(cube, angle_list, size_patch, inrad, outrad,
scaling, k_list, collapse_func, neg_ang=False)
patches_cube = []
y, x = np.where(im_zeros==1)
patch = cube_crop_frames(np.array(cube_residuals), size_patch,
xy=(int(x),int(y)), verbose=False)
patch_reshaped = np.expand_dims(patch, 0)
if normalize is not None:
patch_reshaped = normalize_01(patch_reshaped, normalize)
if mode=='nn':
# adding extra dimension (channel) for keras model
patch_reshaped = np.expand_dims(patch_reshaped, -1)
proba = model.predict(patch_reshaped, verbose=0)
#prediction = model.predict_classes(patch_reshaped, verbose=0)
#proba = model.predict_proba(patch_reshaped, verbose=0)
elif mode=='rf':
if psf is not None:
patch_vector = create_feature_matrix(patch_reshaped, psf)
else:
# vectorizing the 3d samples to get a feature matrix
patch_vector = patch_reshaped.flatten()
proba = model.predict_proba(patch_vector)
if plot:
plots(np.squeeze(patch_reshaped), cmap='viridis', axis=False, dpi=dpi,
maxplots=np.squeeze(patch_reshaped).shape[0], colorb=False)
print('Proba :', proba, '\n')
#return patch, prediction, proba
return patch, proba
def get_indices_annulus(shape, inrad, outrad, mask=None, maskrad=None,
verbose=False):
""" mask is a list of tuples X,Y
"""
framemp = np.zeros(shape)
if mask is not None:
if not isinstance(mask, list):
raise TypeError('Mask should be a list of tuples')
if maskrad is None:
raise ValueError('Fwhm not given')
for xy in mask:
# patch_size/2 diameter aperture
cir = circle(xy[1], xy[0], maskrad, shape)
framemp[cir] = 1
annulus_width = outrad - inrad
cy, cx = frame_center(framemp)
yy, xx = np.mgrid[:framemp.shape[0], :framemp.shape[1]]
circ = np.sqrt((xx - cx)**2 + (yy - cy)**2)
donut_mask = (circ <= (inrad + annulus_width)) & (circ >= inrad)
y, x = np.where(donut_mask)
if mask is not None:
npix = y.shape[0]
ymask, xmask = np.where(framemp) # masked pixels where == 1
inds = []
for i, tup in enumerate(zip(y,x)):
if tup in zip(ymask,xmask): inds.append(i)
y = np.delete(y, inds)
x = np.delete(x, inds)
if verbose:
print(y.shape[0], 'pixels in annulus')
return y, x
def create_synt_cube(cube, psf, ang, plsc, dist=None, theta=None, flux=None,
verbose=False):
"""
"""
centy_fr, centx_fr = frame_center(cube[0])
np.random.seed()
if theta is None:
theta = np.random.randint(0,360)
posy = dist * np.sin(np.deg2rad(theta)) + centy_fr
posx = dist * np.cos(
|
np.deg2rad(theta)
|
numpy.deg2rad
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.